shmem.c revision 91828a405ae454a9503c41a7744f6ff877a80714
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 *		 2000 Transmeta Corp.
6 *		 2000-2001 Christoph Rohland
7 *		 2000-2001 SAP AG
8 *		 2002 Red Hat Inc.
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
12 *
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16 *
17 * This file is released under the GPL.
18 */
19
20/*
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/fs.h>
29#include <linux/xattr.h>
30#include <linux/generic_acl.h>
31#include <linux/mm.h>
32#include <linux/mman.h>
33#include <linux/file.h>
34#include <linux/swap.h>
35#include <linux/pagemap.h>
36#include <linux/string.h>
37#include <linux/slab.h>
38#include <linux/backing-dev.h>
39#include <linux/shmem_fs.h>
40#include <linux/mount.h>
41#include <linux/writeback.h>
42#include <linux/vfs.h>
43#include <linux/blkdev.h>
44#include <linux/security.h>
45#include <linux/swapops.h>
46#include <linux/mempolicy.h>
47#include <linux/namei.h>
48#include <linux/ctype.h>
49#include <linux/migrate.h>
50#include <linux/highmem.h>
51
52#include <asm/uaccess.h>
53#include <asm/div64.h>
54#include <asm/pgtable.h>
55
56/* This magic number is used in glibc for posix shared memory */
57#define TMPFS_MAGIC	0x01021994
58
59#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
60#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
61#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
62
63#define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
64#define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
65
66#define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
67
68/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
69#define SHMEM_PAGEIN	 VM_READ
70#define SHMEM_TRUNCATE	 VM_WRITE
71
72/* Definition to limit shmem_truncate's steps between cond_rescheds */
73#define LATENCY_LIMIT	 64
74
75/* Pretend that each entry is of this size in directory's i_size */
76#define BOGO_DIRENT_SIZE 20
77
78/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
79enum sgp_type {
80	SGP_QUICK,	/* don't try more than file page cache lookup */
81	SGP_READ,	/* don't exceed i_size, don't allocate page */
82	SGP_CACHE,	/* don't exceed i_size, may allocate page */
83	SGP_WRITE,	/* may exceed i_size, may allocate page */
84};
85
86static int shmem_getpage(struct inode *inode, unsigned long idx,
87			 struct page **pagep, enum sgp_type sgp, int *type);
88
89static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
90{
91	/*
92	 * The above definition of ENTRIES_PER_PAGE, and the use of
93	 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
94	 * might be reconsidered if it ever diverges from PAGE_SIZE.
95	 */
96	return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
97}
98
99static inline void shmem_dir_free(struct page *page)
100{
101	__free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
102}
103
104static struct page **shmem_dir_map(struct page *page)
105{
106	return (struct page **)kmap_atomic(page, KM_USER0);
107}
108
109static inline void shmem_dir_unmap(struct page **dir)
110{
111	kunmap_atomic(dir, KM_USER0);
112}
113
114static swp_entry_t *shmem_swp_map(struct page *page)
115{
116	return (swp_entry_t *)kmap_atomic(page, KM_USER1);
117}
118
119static inline void shmem_swp_balance_unmap(void)
120{
121	/*
122	 * When passing a pointer to an i_direct entry, to code which
123	 * also handles indirect entries and so will shmem_swp_unmap,
124	 * we must arrange for the preempt count to remain in balance.
125	 * What kmap_atomic of a lowmem page does depends on config
126	 * and architecture, so pretend to kmap_atomic some lowmem page.
127	 */
128	(void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
129}
130
131static inline void shmem_swp_unmap(swp_entry_t *entry)
132{
133	kunmap_atomic(entry, KM_USER1);
134}
135
136static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
137{
138	return sb->s_fs_info;
139}
140
141/*
142 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
143 * for shared memory and for shared anonymous (/dev/zero) mappings
144 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
145 * consistent with the pre-accounting of private mappings ...
146 */
147static inline int shmem_acct_size(unsigned long flags, loff_t size)
148{
149	return (flags & VM_ACCOUNT)?
150		security_vm_enough_memory(VM_ACCT(size)): 0;
151}
152
153static inline void shmem_unacct_size(unsigned long flags, loff_t size)
154{
155	if (flags & VM_ACCOUNT)
156		vm_unacct_memory(VM_ACCT(size));
157}
158
159/*
160 * ... whereas tmpfs objects are accounted incrementally as
161 * pages are allocated, in order to allow huge sparse files.
162 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
163 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
164 */
165static inline int shmem_acct_block(unsigned long flags)
166{
167	return (flags & VM_ACCOUNT)?
168		0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
169}
170
171static inline void shmem_unacct_blocks(unsigned long flags, long pages)
172{
173	if (!(flags & VM_ACCOUNT))
174		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
175}
176
177static struct super_operations shmem_ops;
178static const struct address_space_operations shmem_aops;
179static struct file_operations shmem_file_operations;
180static struct inode_operations shmem_inode_operations;
181static struct inode_operations shmem_dir_inode_operations;
182static struct inode_operations shmem_special_inode_operations;
183static struct vm_operations_struct shmem_vm_ops;
184
185static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
186	.ra_pages	= 0,	/* No readahead */
187	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
188	.unplug_io_fn	= default_unplug_io_fn,
189};
190
191static LIST_HEAD(shmem_swaplist);
192static DEFINE_SPINLOCK(shmem_swaplist_lock);
193
194static void shmem_free_blocks(struct inode *inode, long pages)
195{
196	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
197	if (sbinfo->max_blocks) {
198		spin_lock(&sbinfo->stat_lock);
199		sbinfo->free_blocks += pages;
200		inode->i_blocks -= pages*BLOCKS_PER_PAGE;
201		spin_unlock(&sbinfo->stat_lock);
202	}
203}
204
205/*
206 * shmem_recalc_inode - recalculate the size of an inode
207 *
208 * @inode: inode to recalc
209 *
210 * We have to calculate the free blocks since the mm can drop
211 * undirtied hole pages behind our back.
212 *
213 * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
214 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
215 *
216 * It has to be called with the spinlock held.
217 */
218static void shmem_recalc_inode(struct inode *inode)
219{
220	struct shmem_inode_info *info = SHMEM_I(inode);
221	long freed;
222
223	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
224	if (freed > 0) {
225		info->alloced -= freed;
226		shmem_unacct_blocks(info->flags, freed);
227		shmem_free_blocks(inode, freed);
228	}
229}
230
231/*
232 * shmem_swp_entry - find the swap vector position in the info structure
233 *
234 * @info:  info structure for the inode
235 * @index: index of the page to find
236 * @page:  optional page to add to the structure. Has to be preset to
237 *         all zeros
238 *
239 * If there is no space allocated yet it will return NULL when
240 * page is NULL, else it will use the page for the needed block,
241 * setting it to NULL on return to indicate that it has been used.
242 *
243 * The swap vector is organized the following way:
244 *
245 * There are SHMEM_NR_DIRECT entries directly stored in the
246 * shmem_inode_info structure. So small files do not need an addional
247 * allocation.
248 *
249 * For pages with index > SHMEM_NR_DIRECT there is the pointer
250 * i_indirect which points to a page which holds in the first half
251 * doubly indirect blocks, in the second half triple indirect blocks:
252 *
253 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
254 * following layout (for SHMEM_NR_DIRECT == 16):
255 *
256 * i_indirect -> dir --> 16-19
257 * 	      |	     +-> 20-23
258 * 	      |
259 * 	      +-->dir2 --> 24-27
260 * 	      |	       +-> 28-31
261 * 	      |	       +-> 32-35
262 * 	      |	       +-> 36-39
263 * 	      |
264 * 	      +-->dir3 --> 40-43
265 * 	       	       +-> 44-47
266 * 	      	       +-> 48-51
267 * 	      	       +-> 52-55
268 */
269static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
270{
271	unsigned long offset;
272	struct page **dir;
273	struct page *subdir;
274
275	if (index < SHMEM_NR_DIRECT) {
276		shmem_swp_balance_unmap();
277		return info->i_direct+index;
278	}
279	if (!info->i_indirect) {
280		if (page) {
281			info->i_indirect = *page;
282			*page = NULL;
283		}
284		return NULL;			/* need another page */
285	}
286
287	index -= SHMEM_NR_DIRECT;
288	offset = index % ENTRIES_PER_PAGE;
289	index /= ENTRIES_PER_PAGE;
290	dir = shmem_dir_map(info->i_indirect);
291
292	if (index >= ENTRIES_PER_PAGE/2) {
293		index -= ENTRIES_PER_PAGE/2;
294		dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
295		index %= ENTRIES_PER_PAGE;
296		subdir = *dir;
297		if (!subdir) {
298			if (page) {
299				*dir = *page;
300				*page = NULL;
301			}
302			shmem_dir_unmap(dir);
303			return NULL;		/* need another page */
304		}
305		shmem_dir_unmap(dir);
306		dir = shmem_dir_map(subdir);
307	}
308
309	dir += index;
310	subdir = *dir;
311	if (!subdir) {
312		if (!page || !(subdir = *page)) {
313			shmem_dir_unmap(dir);
314			return NULL;		/* need a page */
315		}
316		*dir = subdir;
317		*page = NULL;
318	}
319	shmem_dir_unmap(dir);
320	return shmem_swp_map(subdir) + offset;
321}
322
323static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
324{
325	long incdec = value? 1: -1;
326
327	entry->val = value;
328	info->swapped += incdec;
329	if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
330		struct page *page = kmap_atomic_to_page(entry);
331		set_page_private(page, page_private(page) + incdec);
332	}
333}
334
335/*
336 * shmem_swp_alloc - get the position of the swap entry for the page.
337 *                   If it does not exist allocate the entry.
338 *
339 * @info:	info structure for the inode
340 * @index:	index of the page to find
341 * @sgp:	check and recheck i_size? skip allocation?
342 */
343static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
344{
345	struct inode *inode = &info->vfs_inode;
346	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
347	struct page *page = NULL;
348	swp_entry_t *entry;
349
350	if (sgp != SGP_WRITE &&
351	    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
352		return ERR_PTR(-EINVAL);
353
354	while (!(entry = shmem_swp_entry(info, index, &page))) {
355		if (sgp == SGP_READ)
356			return shmem_swp_map(ZERO_PAGE(0));
357		/*
358		 * Test free_blocks against 1 not 0, since we have 1 data
359		 * page (and perhaps indirect index pages) yet to allocate:
360		 * a waste to allocate index if we cannot allocate data.
361		 */
362		if (sbinfo->max_blocks) {
363			spin_lock(&sbinfo->stat_lock);
364			if (sbinfo->free_blocks <= 1) {
365				spin_unlock(&sbinfo->stat_lock);
366				return ERR_PTR(-ENOSPC);
367			}
368			sbinfo->free_blocks--;
369			inode->i_blocks += BLOCKS_PER_PAGE;
370			spin_unlock(&sbinfo->stat_lock);
371		}
372
373		spin_unlock(&info->lock);
374		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
375		if (page)
376			set_page_private(page, 0);
377		spin_lock(&info->lock);
378
379		if (!page) {
380			shmem_free_blocks(inode, 1);
381			return ERR_PTR(-ENOMEM);
382		}
383		if (sgp != SGP_WRITE &&
384		    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
385			entry = ERR_PTR(-EINVAL);
386			break;
387		}
388		if (info->next_index <= index)
389			info->next_index = index + 1;
390	}
391	if (page) {
392		/* another task gave its page, or truncated the file */
393		shmem_free_blocks(inode, 1);
394		shmem_dir_free(page);
395	}
396	if (info->next_index <= index && !IS_ERR(entry))
397		info->next_index = index + 1;
398	return entry;
399}
400
401/*
402 * shmem_free_swp - free some swap entries in a directory
403 *
404 * @dir:   pointer to the directory
405 * @edir:  pointer after last entry of the directory
406 */
407static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
408{
409	swp_entry_t *ptr;
410	int freed = 0;
411
412	for (ptr = dir; ptr < edir; ptr++) {
413		if (ptr->val) {
414			free_swap_and_cache(*ptr);
415			*ptr = (swp_entry_t){0};
416			freed++;
417		}
418	}
419	return freed;
420}
421
422static int shmem_map_and_free_swp(struct page *subdir,
423		int offset, int limit, struct page ***dir)
424{
425	swp_entry_t *ptr;
426	int freed = 0;
427
428	ptr = shmem_swp_map(subdir);
429	for (; offset < limit; offset += LATENCY_LIMIT) {
430		int size = limit - offset;
431		if (size > LATENCY_LIMIT)
432			size = LATENCY_LIMIT;
433		freed += shmem_free_swp(ptr+offset, ptr+offset+size);
434		if (need_resched()) {
435			shmem_swp_unmap(ptr);
436			if (*dir) {
437				shmem_dir_unmap(*dir);
438				*dir = NULL;
439			}
440			cond_resched();
441			ptr = shmem_swp_map(subdir);
442		}
443	}
444	shmem_swp_unmap(ptr);
445	return freed;
446}
447
448static void shmem_free_pages(struct list_head *next)
449{
450	struct page *page;
451	int freed = 0;
452
453	do {
454		page = container_of(next, struct page, lru);
455		next = next->next;
456		shmem_dir_free(page);
457		freed++;
458		if (freed >= LATENCY_LIMIT) {
459			cond_resched();
460			freed = 0;
461		}
462	} while (next);
463}
464
465static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
466{
467	struct shmem_inode_info *info = SHMEM_I(inode);
468	unsigned long idx;
469	unsigned long size;
470	unsigned long limit;
471	unsigned long stage;
472	unsigned long diroff;
473	struct page **dir;
474	struct page *topdir;
475	struct page *middir;
476	struct page *subdir;
477	swp_entry_t *ptr;
478	LIST_HEAD(pages_to_free);
479	long nr_pages_to_free = 0;
480	long nr_swaps_freed = 0;
481	int offset;
482	int freed;
483	int punch_hole = 0;
484
485	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
486	idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
487	if (idx >= info->next_index)
488		return;
489
490	spin_lock(&info->lock);
491	info->flags |= SHMEM_TRUNCATE;
492	if (likely(end == (loff_t) -1)) {
493		limit = info->next_index;
494		info->next_index = idx;
495	} else {
496		limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
497		if (limit > info->next_index)
498			limit = info->next_index;
499		punch_hole = 1;
500	}
501
502	topdir = info->i_indirect;
503	if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
504		info->i_indirect = NULL;
505		nr_pages_to_free++;
506		list_add(&topdir->lru, &pages_to_free);
507	}
508	spin_unlock(&info->lock);
509
510	if (info->swapped && idx < SHMEM_NR_DIRECT) {
511		ptr = info->i_direct;
512		size = limit;
513		if (size > SHMEM_NR_DIRECT)
514			size = SHMEM_NR_DIRECT;
515		nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
516	}
517	if (!topdir)
518		goto done2;
519
520	BUG_ON(limit <= SHMEM_NR_DIRECT);
521	limit -= SHMEM_NR_DIRECT;
522	idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
523	offset = idx % ENTRIES_PER_PAGE;
524	idx -= offset;
525
526	dir = shmem_dir_map(topdir);
527	stage = ENTRIES_PER_PAGEPAGE/2;
528	if (idx < ENTRIES_PER_PAGEPAGE/2) {
529		middir = topdir;
530		diroff = idx/ENTRIES_PER_PAGE;
531	} else {
532		dir += ENTRIES_PER_PAGE/2;
533		dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
534		while (stage <= idx)
535			stage += ENTRIES_PER_PAGEPAGE;
536		middir = *dir;
537		if (*dir) {
538			diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
539				ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
540			if (!diroff && !offset) {
541				*dir = NULL;
542				nr_pages_to_free++;
543				list_add(&middir->lru, &pages_to_free);
544			}
545			shmem_dir_unmap(dir);
546			dir = shmem_dir_map(middir);
547		} else {
548			diroff = 0;
549			offset = 0;
550			idx = stage;
551		}
552	}
553
554	for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
555		if (unlikely(idx == stage)) {
556			shmem_dir_unmap(dir);
557			dir = shmem_dir_map(topdir) +
558			    ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
559			while (!*dir) {
560				dir++;
561				idx += ENTRIES_PER_PAGEPAGE;
562				if (idx >= limit)
563					goto done1;
564			}
565			stage = idx + ENTRIES_PER_PAGEPAGE;
566			middir = *dir;
567			*dir = NULL;
568			nr_pages_to_free++;
569			list_add(&middir->lru, &pages_to_free);
570			shmem_dir_unmap(dir);
571			cond_resched();
572			dir = shmem_dir_map(middir);
573			diroff = 0;
574		}
575		subdir = dir[diroff];
576		if (subdir && page_private(subdir)) {
577			size = limit - idx;
578			if (size > ENTRIES_PER_PAGE)
579				size = ENTRIES_PER_PAGE;
580			freed = shmem_map_and_free_swp(subdir,
581						offset, size, &dir);
582			if (!dir)
583				dir = shmem_dir_map(middir);
584			nr_swaps_freed += freed;
585			if (offset)
586				spin_lock(&info->lock);
587			set_page_private(subdir, page_private(subdir) - freed);
588			if (offset)
589				spin_unlock(&info->lock);
590			if (!punch_hole)
591				BUG_ON(page_private(subdir) > offset);
592		}
593		if (offset)
594			offset = 0;
595		else if (subdir && !page_private(subdir)) {
596			dir[diroff] = NULL;
597			nr_pages_to_free++;
598			list_add(&subdir->lru, &pages_to_free);
599		}
600	}
601done1:
602	shmem_dir_unmap(dir);
603done2:
604	if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
605		/*
606		 * Call truncate_inode_pages again: racing shmem_unuse_inode
607		 * may have swizzled a page in from swap since vmtruncate or
608		 * generic_delete_inode did it, before we lowered next_index.
609		 * Also, though shmem_getpage checks i_size before adding to
610		 * cache, no recheck after: so fix the narrow window there too.
611		 */
612		truncate_inode_pages_range(inode->i_mapping, start, end);
613	}
614
615	spin_lock(&info->lock);
616	info->flags &= ~SHMEM_TRUNCATE;
617	info->swapped -= nr_swaps_freed;
618	if (nr_pages_to_free)
619		shmem_free_blocks(inode, nr_pages_to_free);
620	shmem_recalc_inode(inode);
621	spin_unlock(&info->lock);
622
623	/*
624	 * Empty swap vector directory pages to be freed?
625	 */
626	if (!list_empty(&pages_to_free)) {
627		pages_to_free.prev->next = NULL;
628		shmem_free_pages(pages_to_free.next);
629	}
630}
631
632static void shmem_truncate(struct inode *inode)
633{
634	shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
635}
636
637static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
638{
639	struct inode *inode = dentry->d_inode;
640	struct page *page = NULL;
641	int error;
642
643	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
644		if (attr->ia_size < inode->i_size) {
645			/*
646			 * If truncating down to a partial page, then
647			 * if that page is already allocated, hold it
648			 * in memory until the truncation is over, so
649			 * truncate_partial_page cannnot miss it were
650			 * it assigned to swap.
651			 */
652			if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
653				(void) shmem_getpage(inode,
654					attr->ia_size>>PAGE_CACHE_SHIFT,
655						&page, SGP_READ, NULL);
656			}
657			/*
658			 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
659			 * detect if any pages might have been added to cache
660			 * after truncate_inode_pages.  But we needn't bother
661			 * if it's being fully truncated to zero-length: the
662			 * nrpages check is efficient enough in that case.
663			 */
664			if (attr->ia_size) {
665				struct shmem_inode_info *info = SHMEM_I(inode);
666				spin_lock(&info->lock);
667				info->flags &= ~SHMEM_PAGEIN;
668				spin_unlock(&info->lock);
669			}
670		}
671	}
672
673	error = inode_change_ok(inode, attr);
674	if (!error)
675		error = inode_setattr(inode, attr);
676#ifdef CONFIG_TMPFS_POSIX_ACL
677	if (!error && (attr->ia_valid & ATTR_MODE))
678		error = generic_acl_chmod(inode, &shmem_acl_ops);
679#endif
680	if (page)
681		page_cache_release(page);
682	return error;
683}
684
685static void shmem_delete_inode(struct inode *inode)
686{
687	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
688	struct shmem_inode_info *info = SHMEM_I(inode);
689
690	if (inode->i_op->truncate == shmem_truncate) {
691		truncate_inode_pages(inode->i_mapping, 0);
692		shmem_unacct_size(info->flags, inode->i_size);
693		inode->i_size = 0;
694		shmem_truncate(inode);
695		if (!list_empty(&info->swaplist)) {
696			spin_lock(&shmem_swaplist_lock);
697			list_del_init(&info->swaplist);
698			spin_unlock(&shmem_swaplist_lock);
699		}
700	}
701	BUG_ON(inode->i_blocks);
702	if (sbinfo->max_inodes) {
703		spin_lock(&sbinfo->stat_lock);
704		sbinfo->free_inodes++;
705		spin_unlock(&sbinfo->stat_lock);
706	}
707	clear_inode(inode);
708}
709
710static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
711{
712	swp_entry_t *ptr;
713
714	for (ptr = dir; ptr < edir; ptr++) {
715		if (ptr->val == entry.val)
716			return ptr - dir;
717	}
718	return -1;
719}
720
721static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
722{
723	struct inode *inode;
724	unsigned long idx;
725	unsigned long size;
726	unsigned long limit;
727	unsigned long stage;
728	struct page **dir;
729	struct page *subdir;
730	swp_entry_t *ptr;
731	int offset;
732
733	idx = 0;
734	ptr = info->i_direct;
735	spin_lock(&info->lock);
736	limit = info->next_index;
737	size = limit;
738	if (size > SHMEM_NR_DIRECT)
739		size = SHMEM_NR_DIRECT;
740	offset = shmem_find_swp(entry, ptr, ptr+size);
741	if (offset >= 0) {
742		shmem_swp_balance_unmap();
743		goto found;
744	}
745	if (!info->i_indirect)
746		goto lost2;
747
748	dir = shmem_dir_map(info->i_indirect);
749	stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
750
751	for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
752		if (unlikely(idx == stage)) {
753			shmem_dir_unmap(dir-1);
754			dir = shmem_dir_map(info->i_indirect) +
755			    ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
756			while (!*dir) {
757				dir++;
758				idx += ENTRIES_PER_PAGEPAGE;
759				if (idx >= limit)
760					goto lost1;
761			}
762			stage = idx + ENTRIES_PER_PAGEPAGE;
763			subdir = *dir;
764			shmem_dir_unmap(dir);
765			dir = shmem_dir_map(subdir);
766		}
767		subdir = *dir;
768		if (subdir && page_private(subdir)) {
769			ptr = shmem_swp_map(subdir);
770			size = limit - idx;
771			if (size > ENTRIES_PER_PAGE)
772				size = ENTRIES_PER_PAGE;
773			offset = shmem_find_swp(entry, ptr, ptr+size);
774			if (offset >= 0) {
775				shmem_dir_unmap(dir);
776				goto found;
777			}
778			shmem_swp_unmap(ptr);
779		}
780	}
781lost1:
782	shmem_dir_unmap(dir-1);
783lost2:
784	spin_unlock(&info->lock);
785	return 0;
786found:
787	idx += offset;
788	inode = &info->vfs_inode;
789	if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
790		info->flags |= SHMEM_PAGEIN;
791		shmem_swp_set(info, ptr + offset, 0);
792	}
793	shmem_swp_unmap(ptr);
794	spin_unlock(&info->lock);
795	/*
796	 * Decrement swap count even when the entry is left behind:
797	 * try_to_unuse will skip over mms, then reincrement count.
798	 */
799	swap_free(entry);
800	return 1;
801}
802
803/*
804 * shmem_unuse() search for an eventually swapped out shmem page.
805 */
806int shmem_unuse(swp_entry_t entry, struct page *page)
807{
808	struct list_head *p, *next;
809	struct shmem_inode_info *info;
810	int found = 0;
811
812	spin_lock(&shmem_swaplist_lock);
813	list_for_each_safe(p, next, &shmem_swaplist) {
814		info = list_entry(p, struct shmem_inode_info, swaplist);
815		if (!info->swapped)
816			list_del_init(&info->swaplist);
817		else if (shmem_unuse_inode(info, entry, page)) {
818			/* move head to start search for next from here */
819			list_move_tail(&shmem_swaplist, &info->swaplist);
820			found = 1;
821			break;
822		}
823	}
824	spin_unlock(&shmem_swaplist_lock);
825	return found;
826}
827
828/*
829 * Move the page from the page cache to the swap cache.
830 */
831static int shmem_writepage(struct page *page, struct writeback_control *wbc)
832{
833	struct shmem_inode_info *info;
834	swp_entry_t *entry, swap;
835	struct address_space *mapping;
836	unsigned long index;
837	struct inode *inode;
838
839	BUG_ON(!PageLocked(page));
840	BUG_ON(page_mapped(page));
841
842	mapping = page->mapping;
843	index = page->index;
844	inode = mapping->host;
845	info = SHMEM_I(inode);
846	if (info->flags & VM_LOCKED)
847		goto redirty;
848	swap = get_swap_page();
849	if (!swap.val)
850		goto redirty;
851
852	spin_lock(&info->lock);
853	shmem_recalc_inode(inode);
854	if (index >= info->next_index) {
855		BUG_ON(!(info->flags & SHMEM_TRUNCATE));
856		goto unlock;
857	}
858	entry = shmem_swp_entry(info, index, NULL);
859	BUG_ON(!entry);
860	BUG_ON(entry->val);
861
862	if (move_to_swap_cache(page, swap) == 0) {
863		shmem_swp_set(info, entry, swap.val);
864		shmem_swp_unmap(entry);
865		spin_unlock(&info->lock);
866		if (list_empty(&info->swaplist)) {
867			spin_lock(&shmem_swaplist_lock);
868			/* move instead of add in case we're racing */
869			list_move_tail(&info->swaplist, &shmem_swaplist);
870			spin_unlock(&shmem_swaplist_lock);
871		}
872		unlock_page(page);
873		return 0;
874	}
875
876	shmem_swp_unmap(entry);
877unlock:
878	spin_unlock(&info->lock);
879	swap_free(swap);
880redirty:
881	set_page_dirty(page);
882	return AOP_WRITEPAGE_ACTIVATE;	/* Return with the page locked */
883}
884
885#ifdef CONFIG_NUMA
886static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
887{
888	char *nodelist = strchr(value, ':');
889	int err = 1;
890
891	if (nodelist) {
892		/* NUL-terminate policy string */
893		*nodelist++ = '\0';
894		if (nodelist_parse(nodelist, *policy_nodes))
895			goto out;
896	}
897	if (!strcmp(value, "default")) {
898		*policy = MPOL_DEFAULT;
899		/* Don't allow a nodelist */
900		if (!nodelist)
901			err = 0;
902	} else if (!strcmp(value, "prefer")) {
903		*policy = MPOL_PREFERRED;
904		/* Insist on a nodelist of one node only */
905		if (nodelist) {
906			char *rest = nodelist;
907			while (isdigit(*rest))
908				rest++;
909			if (!*rest)
910				err = 0;
911		}
912	} else if (!strcmp(value, "bind")) {
913		*policy = MPOL_BIND;
914		/* Insist on a nodelist */
915		if (nodelist)
916			err = 0;
917	} else if (!strcmp(value, "interleave")) {
918		*policy = MPOL_INTERLEAVE;
919		/* Default to nodes online if no nodelist */
920		if (!nodelist)
921			*policy_nodes = node_online_map;
922		err = 0;
923	}
924out:
925	/* Restore string for error message */
926	if (nodelist)
927		*--nodelist = ':';
928	return err;
929}
930
931static struct page *shmem_swapin_async(struct shared_policy *p,
932				       swp_entry_t entry, unsigned long idx)
933{
934	struct page *page;
935	struct vm_area_struct pvma;
936
937	/* Create a pseudo vma that just contains the policy */
938	memset(&pvma, 0, sizeof(struct vm_area_struct));
939	pvma.vm_end = PAGE_SIZE;
940	pvma.vm_pgoff = idx;
941	pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
942	page = read_swap_cache_async(entry, &pvma, 0);
943	mpol_free(pvma.vm_policy);
944	return page;
945}
946
947struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
948			  unsigned long idx)
949{
950	struct shared_policy *p = &info->policy;
951	int i, num;
952	struct page *page;
953	unsigned long offset;
954
955	num = valid_swaphandles(entry, &offset);
956	for (i = 0; i < num; offset++, i++) {
957		page = shmem_swapin_async(p,
958				swp_entry(swp_type(entry), offset), idx);
959		if (!page)
960			break;
961		page_cache_release(page);
962	}
963	lru_add_drain();	/* Push any new pages onto the LRU now */
964	return shmem_swapin_async(p, entry, idx);
965}
966
967static struct page *
968shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
969		 unsigned long idx)
970{
971	struct vm_area_struct pvma;
972	struct page *page;
973
974	memset(&pvma, 0, sizeof(struct vm_area_struct));
975	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
976	pvma.vm_pgoff = idx;
977	pvma.vm_end = PAGE_SIZE;
978	page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
979	mpol_free(pvma.vm_policy);
980	return page;
981}
982#else
983static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
984{
985	return 1;
986}
987
988static inline struct page *
989shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
990{
991	swapin_readahead(entry, 0, NULL);
992	return read_swap_cache_async(entry, NULL, 0);
993}
994
995static inline struct page *
996shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
997{
998	return alloc_page(gfp | __GFP_ZERO);
999}
1000#endif
1001
1002/*
1003 * shmem_getpage - either get the page from swap or allocate a new one
1004 *
1005 * If we allocate a new one we do not mark it dirty. That's up to the
1006 * vm. If we swap it in we mark it dirty since we also free the swap
1007 * entry since a page cannot live in both the swap and page cache
1008 */
1009static int shmem_getpage(struct inode *inode, unsigned long idx,
1010			struct page **pagep, enum sgp_type sgp, int *type)
1011{
1012	struct address_space *mapping = inode->i_mapping;
1013	struct shmem_inode_info *info = SHMEM_I(inode);
1014	struct shmem_sb_info *sbinfo;
1015	struct page *filepage = *pagep;
1016	struct page *swappage;
1017	swp_entry_t *entry;
1018	swp_entry_t swap;
1019	int error;
1020
1021	if (idx >= SHMEM_MAX_INDEX)
1022		return -EFBIG;
1023	/*
1024	 * Normally, filepage is NULL on entry, and either found
1025	 * uptodate immediately, or allocated and zeroed, or read
1026	 * in under swappage, which is then assigned to filepage.
1027	 * But shmem_prepare_write passes in a locked filepage,
1028	 * which may be found not uptodate by other callers too,
1029	 * and may need to be copied from the swappage read in.
1030	 */
1031repeat:
1032	if (!filepage)
1033		filepage = find_lock_page(mapping, idx);
1034	if (filepage && PageUptodate(filepage))
1035		goto done;
1036	error = 0;
1037	if (sgp == SGP_QUICK)
1038		goto failed;
1039
1040	spin_lock(&info->lock);
1041	shmem_recalc_inode(inode);
1042	entry = shmem_swp_alloc(info, idx, sgp);
1043	if (IS_ERR(entry)) {
1044		spin_unlock(&info->lock);
1045		error = PTR_ERR(entry);
1046		goto failed;
1047	}
1048	swap = *entry;
1049
1050	if (swap.val) {
1051		/* Look it up and read it in.. */
1052		swappage = lookup_swap_cache(swap);
1053		if (!swappage) {
1054			shmem_swp_unmap(entry);
1055			/* here we actually do the io */
1056			if (type && *type == VM_FAULT_MINOR) {
1057				__count_vm_event(PGMAJFAULT);
1058				*type = VM_FAULT_MAJOR;
1059			}
1060			spin_unlock(&info->lock);
1061			swappage = shmem_swapin(info, swap, idx);
1062			if (!swappage) {
1063				spin_lock(&info->lock);
1064				entry = shmem_swp_alloc(info, idx, sgp);
1065				if (IS_ERR(entry))
1066					error = PTR_ERR(entry);
1067				else {
1068					if (entry->val == swap.val)
1069						error = -ENOMEM;
1070					shmem_swp_unmap(entry);
1071				}
1072				spin_unlock(&info->lock);
1073				if (error)
1074					goto failed;
1075				goto repeat;
1076			}
1077			wait_on_page_locked(swappage);
1078			page_cache_release(swappage);
1079			goto repeat;
1080		}
1081
1082		/* We have to do this with page locked to prevent races */
1083		if (TestSetPageLocked(swappage)) {
1084			shmem_swp_unmap(entry);
1085			spin_unlock(&info->lock);
1086			wait_on_page_locked(swappage);
1087			page_cache_release(swappage);
1088			goto repeat;
1089		}
1090		if (PageWriteback(swappage)) {
1091			shmem_swp_unmap(entry);
1092			spin_unlock(&info->lock);
1093			wait_on_page_writeback(swappage);
1094			unlock_page(swappage);
1095			page_cache_release(swappage);
1096			goto repeat;
1097		}
1098		if (!PageUptodate(swappage)) {
1099			shmem_swp_unmap(entry);
1100			spin_unlock(&info->lock);
1101			unlock_page(swappage);
1102			page_cache_release(swappage);
1103			error = -EIO;
1104			goto failed;
1105		}
1106
1107		if (filepage) {
1108			shmem_swp_set(info, entry, 0);
1109			shmem_swp_unmap(entry);
1110			delete_from_swap_cache(swappage);
1111			spin_unlock(&info->lock);
1112			copy_highpage(filepage, swappage);
1113			unlock_page(swappage);
1114			page_cache_release(swappage);
1115			flush_dcache_page(filepage);
1116			SetPageUptodate(filepage);
1117			set_page_dirty(filepage);
1118			swap_free(swap);
1119		} else if (!(error = move_from_swap_cache(
1120				swappage, idx, mapping))) {
1121			info->flags |= SHMEM_PAGEIN;
1122			shmem_swp_set(info, entry, 0);
1123			shmem_swp_unmap(entry);
1124			spin_unlock(&info->lock);
1125			filepage = swappage;
1126			swap_free(swap);
1127		} else {
1128			shmem_swp_unmap(entry);
1129			spin_unlock(&info->lock);
1130			unlock_page(swappage);
1131			page_cache_release(swappage);
1132			if (error == -ENOMEM) {
1133				/* let kswapd refresh zone for GFP_ATOMICs */
1134				blk_congestion_wait(WRITE, HZ/50);
1135			}
1136			goto repeat;
1137		}
1138	} else if (sgp == SGP_READ && !filepage) {
1139		shmem_swp_unmap(entry);
1140		filepage = find_get_page(mapping, idx);
1141		if (filepage &&
1142		    (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1143			spin_unlock(&info->lock);
1144			wait_on_page_locked(filepage);
1145			page_cache_release(filepage);
1146			filepage = NULL;
1147			goto repeat;
1148		}
1149		spin_unlock(&info->lock);
1150	} else {
1151		shmem_swp_unmap(entry);
1152		sbinfo = SHMEM_SB(inode->i_sb);
1153		if (sbinfo->max_blocks) {
1154			spin_lock(&sbinfo->stat_lock);
1155			if (sbinfo->free_blocks == 0 ||
1156			    shmem_acct_block(info->flags)) {
1157				spin_unlock(&sbinfo->stat_lock);
1158				spin_unlock(&info->lock);
1159				error = -ENOSPC;
1160				goto failed;
1161			}
1162			sbinfo->free_blocks--;
1163			inode->i_blocks += BLOCKS_PER_PAGE;
1164			spin_unlock(&sbinfo->stat_lock);
1165		} else if (shmem_acct_block(info->flags)) {
1166			spin_unlock(&info->lock);
1167			error = -ENOSPC;
1168			goto failed;
1169		}
1170
1171		if (!filepage) {
1172			spin_unlock(&info->lock);
1173			filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1174						    info,
1175						    idx);
1176			if (!filepage) {
1177				shmem_unacct_blocks(info->flags, 1);
1178				shmem_free_blocks(inode, 1);
1179				error = -ENOMEM;
1180				goto failed;
1181			}
1182
1183			spin_lock(&info->lock);
1184			entry = shmem_swp_alloc(info, idx, sgp);
1185			if (IS_ERR(entry))
1186				error = PTR_ERR(entry);
1187			else {
1188				swap = *entry;
1189				shmem_swp_unmap(entry);
1190			}
1191			if (error || swap.val || 0 != add_to_page_cache_lru(
1192					filepage, mapping, idx, GFP_ATOMIC)) {
1193				spin_unlock(&info->lock);
1194				page_cache_release(filepage);
1195				shmem_unacct_blocks(info->flags, 1);
1196				shmem_free_blocks(inode, 1);
1197				filepage = NULL;
1198				if (error)
1199					goto failed;
1200				goto repeat;
1201			}
1202			info->flags |= SHMEM_PAGEIN;
1203		}
1204
1205		info->alloced++;
1206		spin_unlock(&info->lock);
1207		flush_dcache_page(filepage);
1208		SetPageUptodate(filepage);
1209	}
1210done:
1211	if (*pagep != filepage) {
1212		unlock_page(filepage);
1213		*pagep = filepage;
1214	}
1215	return 0;
1216
1217failed:
1218	if (*pagep != filepage) {
1219		unlock_page(filepage);
1220		page_cache_release(filepage);
1221	}
1222	return error;
1223}
1224
1225struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
1226{
1227	struct inode *inode = vma->vm_file->f_dentry->d_inode;
1228	struct page *page = NULL;
1229	unsigned long idx;
1230	int error;
1231
1232	idx = (address - vma->vm_start) >> PAGE_SHIFT;
1233	idx += vma->vm_pgoff;
1234	idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1235	if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1236		return NOPAGE_SIGBUS;
1237
1238	error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1239	if (error)
1240		return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1241
1242	mark_page_accessed(page);
1243	return page;
1244}
1245
1246static int shmem_populate(struct vm_area_struct *vma,
1247	unsigned long addr, unsigned long len,
1248	pgprot_t prot, unsigned long pgoff, int nonblock)
1249{
1250	struct inode *inode = vma->vm_file->f_dentry->d_inode;
1251	struct mm_struct *mm = vma->vm_mm;
1252	enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1253	unsigned long size;
1254
1255	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1256	if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1257		return -EINVAL;
1258
1259	while ((long) len > 0) {
1260		struct page *page = NULL;
1261		int err;
1262		/*
1263		 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1264		 */
1265		err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1266		if (err)
1267			return err;
1268		/* Page may still be null, but only if nonblock was set. */
1269		if (page) {
1270			mark_page_accessed(page);
1271			err = install_page(mm, vma, addr, page, prot);
1272			if (err) {
1273				page_cache_release(page);
1274				return err;
1275			}
1276		} else if (vma->vm_flags & VM_NONLINEAR) {
1277			/* No page was found just because we can't read it in
1278			 * now (being here implies nonblock != 0), but the page
1279			 * may exist, so set the PTE to fault it in later. */
1280    			err = install_file_pte(mm, vma, addr, pgoff, prot);
1281			if (err)
1282	    			return err;
1283		}
1284
1285		len -= PAGE_SIZE;
1286		addr += PAGE_SIZE;
1287		pgoff++;
1288	}
1289	return 0;
1290}
1291
1292#ifdef CONFIG_NUMA
1293int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1294{
1295	struct inode *i = vma->vm_file->f_dentry->d_inode;
1296	return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1297}
1298
1299struct mempolicy *
1300shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1301{
1302	struct inode *i = vma->vm_file->f_dentry->d_inode;
1303	unsigned long idx;
1304
1305	idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1306	return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1307}
1308#endif
1309
1310int shmem_lock(struct file *file, int lock, struct user_struct *user)
1311{
1312	struct inode *inode = file->f_dentry->d_inode;
1313	struct shmem_inode_info *info = SHMEM_I(inode);
1314	int retval = -ENOMEM;
1315
1316	spin_lock(&info->lock);
1317	if (lock && !(info->flags & VM_LOCKED)) {
1318		if (!user_shm_lock(inode->i_size, user))
1319			goto out_nomem;
1320		info->flags |= VM_LOCKED;
1321	}
1322	if (!lock && (info->flags & VM_LOCKED) && user) {
1323		user_shm_unlock(inode->i_size, user);
1324		info->flags &= ~VM_LOCKED;
1325	}
1326	retval = 0;
1327out_nomem:
1328	spin_unlock(&info->lock);
1329	return retval;
1330}
1331
1332int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1333{
1334	file_accessed(file);
1335	vma->vm_ops = &shmem_vm_ops;
1336	return 0;
1337}
1338
1339static struct inode *
1340shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1341{
1342	struct inode *inode;
1343	struct shmem_inode_info *info;
1344	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1345
1346	if (sbinfo->max_inodes) {
1347		spin_lock(&sbinfo->stat_lock);
1348		if (!sbinfo->free_inodes) {
1349			spin_unlock(&sbinfo->stat_lock);
1350			return NULL;
1351		}
1352		sbinfo->free_inodes--;
1353		spin_unlock(&sbinfo->stat_lock);
1354	}
1355
1356	inode = new_inode(sb);
1357	if (inode) {
1358		inode->i_mode = mode;
1359		inode->i_uid = current->fsuid;
1360		inode->i_gid = current->fsgid;
1361		inode->i_blocks = 0;
1362		inode->i_mapping->a_ops = &shmem_aops;
1363		inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1364		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1365		inode->i_generation = get_seconds();
1366		info = SHMEM_I(inode);
1367		memset(info, 0, (char *)inode - (char *)info);
1368		spin_lock_init(&info->lock);
1369		INIT_LIST_HEAD(&info->swaplist);
1370
1371		switch (mode & S_IFMT) {
1372		default:
1373			inode->i_op = &shmem_special_inode_operations;
1374			init_special_inode(inode, mode, dev);
1375			break;
1376		case S_IFREG:
1377			inode->i_op = &shmem_inode_operations;
1378			inode->i_fop = &shmem_file_operations;
1379			mpol_shared_policy_init(&info->policy, sbinfo->policy,
1380							&sbinfo->policy_nodes);
1381			break;
1382		case S_IFDIR:
1383			inc_nlink(inode);
1384			/* Some things misbehave if size == 0 on a directory */
1385			inode->i_size = 2 * BOGO_DIRENT_SIZE;
1386			inode->i_op = &shmem_dir_inode_operations;
1387			inode->i_fop = &simple_dir_operations;
1388			break;
1389		case S_IFLNK:
1390			/*
1391			 * Must not load anything in the rbtree,
1392			 * mpol_free_shared_policy will not be called.
1393			 */
1394			mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1395						NULL);
1396			break;
1397		}
1398	} else if (sbinfo->max_inodes) {
1399		spin_lock(&sbinfo->stat_lock);
1400		sbinfo->free_inodes++;
1401		spin_unlock(&sbinfo->stat_lock);
1402	}
1403	return inode;
1404}
1405
1406#ifdef CONFIG_TMPFS
1407static struct inode_operations shmem_symlink_inode_operations;
1408static struct inode_operations shmem_symlink_inline_operations;
1409
1410/*
1411 * Normally tmpfs makes no use of shmem_prepare_write, but it
1412 * lets a tmpfs file be used read-write below the loop driver.
1413 */
1414static int
1415shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1416{
1417	struct inode *inode = page->mapping->host;
1418	return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1419}
1420
1421static ssize_t
1422shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1423{
1424	struct inode	*inode = file->f_dentry->d_inode;
1425	loff_t		pos;
1426	unsigned long	written;
1427	ssize_t		err;
1428
1429	if ((ssize_t) count < 0)
1430		return -EINVAL;
1431
1432	if (!access_ok(VERIFY_READ, buf, count))
1433		return -EFAULT;
1434
1435	mutex_lock(&inode->i_mutex);
1436
1437	pos = *ppos;
1438	written = 0;
1439
1440	err = generic_write_checks(file, &pos, &count, 0);
1441	if (err || !count)
1442		goto out;
1443
1444	err = remove_suid(file->f_dentry);
1445	if (err)
1446		goto out;
1447
1448	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1449
1450	do {
1451		struct page *page = NULL;
1452		unsigned long bytes, index, offset;
1453		char *kaddr;
1454		int left;
1455
1456		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1457		index = pos >> PAGE_CACHE_SHIFT;
1458		bytes = PAGE_CACHE_SIZE - offset;
1459		if (bytes > count)
1460			bytes = count;
1461
1462		/*
1463		 * We don't hold page lock across copy from user -
1464		 * what would it guard against? - so no deadlock here.
1465		 * But it still may be a good idea to prefault below.
1466		 */
1467
1468		err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1469		if (err)
1470			break;
1471
1472		left = bytes;
1473		if (PageHighMem(page)) {
1474			volatile unsigned char dummy;
1475			__get_user(dummy, buf);
1476			__get_user(dummy, buf + bytes - 1);
1477
1478			kaddr = kmap_atomic(page, KM_USER0);
1479			left = __copy_from_user_inatomic(kaddr + offset,
1480							buf, bytes);
1481			kunmap_atomic(kaddr, KM_USER0);
1482		}
1483		if (left) {
1484			kaddr = kmap(page);
1485			left = __copy_from_user(kaddr + offset, buf, bytes);
1486			kunmap(page);
1487		}
1488
1489		written += bytes;
1490		count -= bytes;
1491		pos += bytes;
1492		buf += bytes;
1493		if (pos > inode->i_size)
1494			i_size_write(inode, pos);
1495
1496		flush_dcache_page(page);
1497		set_page_dirty(page);
1498		mark_page_accessed(page);
1499		page_cache_release(page);
1500
1501		if (left) {
1502			pos -= left;
1503			written -= left;
1504			err = -EFAULT;
1505			break;
1506		}
1507
1508		/*
1509		 * Our dirty pages are not counted in nr_dirty,
1510		 * and we do not attempt to balance dirty pages.
1511		 */
1512
1513		cond_resched();
1514	} while (count);
1515
1516	*ppos = pos;
1517	if (written)
1518		err = written;
1519out:
1520	mutex_unlock(&inode->i_mutex);
1521	return err;
1522}
1523
1524static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1525{
1526	struct inode *inode = filp->f_dentry->d_inode;
1527	struct address_space *mapping = inode->i_mapping;
1528	unsigned long index, offset;
1529
1530	index = *ppos >> PAGE_CACHE_SHIFT;
1531	offset = *ppos & ~PAGE_CACHE_MASK;
1532
1533	for (;;) {
1534		struct page *page = NULL;
1535		unsigned long end_index, nr, ret;
1536		loff_t i_size = i_size_read(inode);
1537
1538		end_index = i_size >> PAGE_CACHE_SHIFT;
1539		if (index > end_index)
1540			break;
1541		if (index == end_index) {
1542			nr = i_size & ~PAGE_CACHE_MASK;
1543			if (nr <= offset)
1544				break;
1545		}
1546
1547		desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1548		if (desc->error) {
1549			if (desc->error == -EINVAL)
1550				desc->error = 0;
1551			break;
1552		}
1553
1554		/*
1555		 * We must evaluate after, since reads (unlike writes)
1556		 * are called without i_mutex protection against truncate
1557		 */
1558		nr = PAGE_CACHE_SIZE;
1559		i_size = i_size_read(inode);
1560		end_index = i_size >> PAGE_CACHE_SHIFT;
1561		if (index == end_index) {
1562			nr = i_size & ~PAGE_CACHE_MASK;
1563			if (nr <= offset) {
1564				if (page)
1565					page_cache_release(page);
1566				break;
1567			}
1568		}
1569		nr -= offset;
1570
1571		if (page) {
1572			/*
1573			 * If users can be writing to this page using arbitrary
1574			 * virtual addresses, take care about potential aliasing
1575			 * before reading the page on the kernel side.
1576			 */
1577			if (mapping_writably_mapped(mapping))
1578				flush_dcache_page(page);
1579			/*
1580			 * Mark the page accessed if we read the beginning.
1581			 */
1582			if (!offset)
1583				mark_page_accessed(page);
1584		} else {
1585			page = ZERO_PAGE(0);
1586			page_cache_get(page);
1587		}
1588
1589		/*
1590		 * Ok, we have the page, and it's up-to-date, so
1591		 * now we can copy it to user space...
1592		 *
1593		 * The actor routine returns how many bytes were actually used..
1594		 * NOTE! This may not be the same as how much of a user buffer
1595		 * we filled up (we may be padding etc), so we can only update
1596		 * "pos" here (the actor routine has to update the user buffer
1597		 * pointers and the remaining count).
1598		 */
1599		ret = actor(desc, page, offset, nr);
1600		offset += ret;
1601		index += offset >> PAGE_CACHE_SHIFT;
1602		offset &= ~PAGE_CACHE_MASK;
1603
1604		page_cache_release(page);
1605		if (ret != nr || !desc->count)
1606			break;
1607
1608		cond_resched();
1609	}
1610
1611	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1612	file_accessed(filp);
1613}
1614
1615static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1616{
1617	read_descriptor_t desc;
1618
1619	if ((ssize_t) count < 0)
1620		return -EINVAL;
1621	if (!access_ok(VERIFY_WRITE, buf, count))
1622		return -EFAULT;
1623	if (!count)
1624		return 0;
1625
1626	desc.written = 0;
1627	desc.count = count;
1628	desc.arg.buf = buf;
1629	desc.error = 0;
1630
1631	do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1632	if (desc.written)
1633		return desc.written;
1634	return desc.error;
1635}
1636
1637static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1638			 size_t count, read_actor_t actor, void *target)
1639{
1640	read_descriptor_t desc;
1641
1642	if (!count)
1643		return 0;
1644
1645	desc.written = 0;
1646	desc.count = count;
1647	desc.arg.data = target;
1648	desc.error = 0;
1649
1650	do_shmem_file_read(in_file, ppos, &desc, actor);
1651	if (desc.written)
1652		return desc.written;
1653	return desc.error;
1654}
1655
1656static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1657{
1658	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1659
1660	buf->f_type = TMPFS_MAGIC;
1661	buf->f_bsize = PAGE_CACHE_SIZE;
1662	buf->f_namelen = NAME_MAX;
1663	spin_lock(&sbinfo->stat_lock);
1664	if (sbinfo->max_blocks) {
1665		buf->f_blocks = sbinfo->max_blocks;
1666		buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1667	}
1668	if (sbinfo->max_inodes) {
1669		buf->f_files = sbinfo->max_inodes;
1670		buf->f_ffree = sbinfo->free_inodes;
1671	}
1672	/* else leave those fields 0 like simple_statfs */
1673	spin_unlock(&sbinfo->stat_lock);
1674	return 0;
1675}
1676
1677/*
1678 * File creation. Allocate an inode, and we're done..
1679 */
1680static int
1681shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1682{
1683	struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1684	int error = -ENOSPC;
1685
1686	if (inode) {
1687		error = security_inode_init_security(inode, dir, NULL, NULL,
1688						     NULL);
1689		if (error) {
1690			if (error != -EOPNOTSUPP) {
1691				iput(inode);
1692				return error;
1693			}
1694		}
1695		error = shmem_acl_init(inode, dir);
1696		if (error) {
1697			iput(inode);
1698			return error;
1699		}
1700		if (dir->i_mode & S_ISGID) {
1701			inode->i_gid = dir->i_gid;
1702			if (S_ISDIR(mode))
1703				inode->i_mode |= S_ISGID;
1704		}
1705		dir->i_size += BOGO_DIRENT_SIZE;
1706		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1707		d_instantiate(dentry, inode);
1708		dget(dentry); /* Extra count - pin the dentry in core */
1709	}
1710	return error;
1711}
1712
1713static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1714{
1715	int error;
1716
1717	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1718		return error;
1719	inc_nlink(dir);
1720	return 0;
1721}
1722
1723static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1724		struct nameidata *nd)
1725{
1726	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1727}
1728
1729/*
1730 * Link a file..
1731 */
1732static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1733{
1734	struct inode *inode = old_dentry->d_inode;
1735	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1736
1737	/*
1738	 * No ordinary (disk based) filesystem counts links as inodes;
1739	 * but each new link needs a new dentry, pinning lowmem, and
1740	 * tmpfs dentries cannot be pruned until they are unlinked.
1741	 */
1742	if (sbinfo->max_inodes) {
1743		spin_lock(&sbinfo->stat_lock);
1744		if (!sbinfo->free_inodes) {
1745			spin_unlock(&sbinfo->stat_lock);
1746			return -ENOSPC;
1747		}
1748		sbinfo->free_inodes--;
1749		spin_unlock(&sbinfo->stat_lock);
1750	}
1751
1752	dir->i_size += BOGO_DIRENT_SIZE;
1753	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1754	inc_nlink(inode);
1755	atomic_inc(&inode->i_count);	/* New dentry reference */
1756	dget(dentry);		/* Extra pinning count for the created dentry */
1757	d_instantiate(dentry, inode);
1758	return 0;
1759}
1760
1761static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1762{
1763	struct inode *inode = dentry->d_inode;
1764
1765	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1766		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1767		if (sbinfo->max_inodes) {
1768			spin_lock(&sbinfo->stat_lock);
1769			sbinfo->free_inodes++;
1770			spin_unlock(&sbinfo->stat_lock);
1771		}
1772	}
1773
1774	dir->i_size -= BOGO_DIRENT_SIZE;
1775	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1776	drop_nlink(inode);
1777	dput(dentry);	/* Undo the count from "create" - this does all the work */
1778	return 0;
1779}
1780
1781static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1782{
1783	if (!simple_empty(dentry))
1784		return -ENOTEMPTY;
1785
1786	drop_nlink(dentry->d_inode);
1787	drop_nlink(dir);
1788	return shmem_unlink(dir, dentry);
1789}
1790
1791/*
1792 * The VFS layer already does all the dentry stuff for rename,
1793 * we just have to decrement the usage count for the target if
1794 * it exists so that the VFS layer correctly free's it when it
1795 * gets overwritten.
1796 */
1797static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1798{
1799	struct inode *inode = old_dentry->d_inode;
1800	int they_are_dirs = S_ISDIR(inode->i_mode);
1801
1802	if (!simple_empty(new_dentry))
1803		return -ENOTEMPTY;
1804
1805	if (new_dentry->d_inode) {
1806		(void) shmem_unlink(new_dir, new_dentry);
1807		if (they_are_dirs)
1808			drop_nlink(old_dir);
1809	} else if (they_are_dirs) {
1810		drop_nlink(old_dir);
1811		inc_nlink(new_dir);
1812	}
1813
1814	old_dir->i_size -= BOGO_DIRENT_SIZE;
1815	new_dir->i_size += BOGO_DIRENT_SIZE;
1816	old_dir->i_ctime = old_dir->i_mtime =
1817	new_dir->i_ctime = new_dir->i_mtime =
1818	inode->i_ctime = CURRENT_TIME;
1819	return 0;
1820}
1821
1822static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1823{
1824	int error;
1825	int len;
1826	struct inode *inode;
1827	struct page *page = NULL;
1828	char *kaddr;
1829	struct shmem_inode_info *info;
1830
1831	len = strlen(symname) + 1;
1832	if (len > PAGE_CACHE_SIZE)
1833		return -ENAMETOOLONG;
1834
1835	inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1836	if (!inode)
1837		return -ENOSPC;
1838
1839	error = security_inode_init_security(inode, dir, NULL, NULL,
1840					     NULL);
1841	if (error) {
1842		if (error != -EOPNOTSUPP) {
1843			iput(inode);
1844			return error;
1845		}
1846		error = 0;
1847	}
1848
1849	info = SHMEM_I(inode);
1850	inode->i_size = len-1;
1851	if (len <= (char *)inode - (char *)info) {
1852		/* do it inline */
1853		memcpy(info, symname, len);
1854		inode->i_op = &shmem_symlink_inline_operations;
1855	} else {
1856		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1857		if (error) {
1858			iput(inode);
1859			return error;
1860		}
1861		inode->i_op = &shmem_symlink_inode_operations;
1862		kaddr = kmap_atomic(page, KM_USER0);
1863		memcpy(kaddr, symname, len);
1864		kunmap_atomic(kaddr, KM_USER0);
1865		set_page_dirty(page);
1866		page_cache_release(page);
1867	}
1868	if (dir->i_mode & S_ISGID)
1869		inode->i_gid = dir->i_gid;
1870	dir->i_size += BOGO_DIRENT_SIZE;
1871	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1872	d_instantiate(dentry, inode);
1873	dget(dentry);
1874	return 0;
1875}
1876
1877static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1878{
1879	nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1880	return NULL;
1881}
1882
1883static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1884{
1885	struct page *page = NULL;
1886	int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1887	nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1888	return page;
1889}
1890
1891static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1892{
1893	if (!IS_ERR(nd_get_link(nd))) {
1894		struct page *page = cookie;
1895		kunmap(page);
1896		mark_page_accessed(page);
1897		page_cache_release(page);
1898	}
1899}
1900
1901static struct inode_operations shmem_symlink_inline_operations = {
1902	.readlink	= generic_readlink,
1903	.follow_link	= shmem_follow_link_inline,
1904};
1905
1906static struct inode_operations shmem_symlink_inode_operations = {
1907	.truncate	= shmem_truncate,
1908	.readlink	= generic_readlink,
1909	.follow_link	= shmem_follow_link,
1910	.put_link	= shmem_put_link,
1911};
1912
1913#ifdef CONFIG_TMPFS_POSIX_ACL
1914/**
1915 * Superblocks without xattr inode operations will get security.* xattr
1916 * support from the VFS "for free". As soon as we have any other xattrs
1917 * like ACLs, we also need to implement the security.* handlers at
1918 * filesystem level, though.
1919 */
1920
1921static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1922					size_t list_len, const char *name,
1923					size_t name_len)
1924{
1925	return security_inode_listsecurity(inode, list, list_len);
1926}
1927
1928static int shmem_xattr_security_get(struct inode *inode, const char *name,
1929				    void *buffer, size_t size)
1930{
1931	if (strcmp(name, "") == 0)
1932		return -EINVAL;
1933	return security_inode_getsecurity(inode, name, buffer, size,
1934					  -EOPNOTSUPP);
1935}
1936
1937static int shmem_xattr_security_set(struct inode *inode, const char *name,
1938				    const void *value, size_t size, int flags)
1939{
1940	if (strcmp(name, "") == 0)
1941		return -EINVAL;
1942	return security_inode_setsecurity(inode, name, value, size, flags);
1943}
1944
1945struct xattr_handler shmem_xattr_security_handler = {
1946	.prefix = XATTR_SECURITY_PREFIX,
1947	.list   = shmem_xattr_security_list,
1948	.get    = shmem_xattr_security_get,
1949	.set    = shmem_xattr_security_set,
1950};
1951
1952static struct xattr_handler *shmem_xattr_handlers[] = {
1953	&shmem_xattr_acl_access_handler,
1954	&shmem_xattr_acl_default_handler,
1955	&shmem_xattr_security_handler,
1956	NULL
1957};
1958#endif
1959
1960static struct dentry *shmem_get_parent(struct dentry *child)
1961{
1962	return ERR_PTR(-ESTALE);
1963}
1964
1965static int shmem_match(struct inode *ino, void *vfh)
1966{
1967	__u32 *fh = vfh;
1968	__u64 inum = fh[2];
1969	inum = (inum << 32) | fh[1];
1970	return ino->i_ino == inum && fh[0] == ino->i_generation;
1971}
1972
1973static struct dentry *shmem_get_dentry(struct super_block *sb, void *vfh)
1974{
1975	struct dentry *de = NULL;
1976	struct inode *inode;
1977	__u32 *fh = vfh;
1978	__u64 inum = fh[2];
1979	inum = (inum << 32) | fh[1];
1980
1981	inode = ilookup5(sb, (unsigned long)(inum+fh[0]), shmem_match, vfh);
1982	if (inode) {
1983		de = d_find_alias(inode);
1984		iput(inode);
1985	}
1986
1987	return de? de: ERR_PTR(-ESTALE);
1988}
1989
1990static struct dentry *shmem_decode_fh(struct super_block *sb, __u32 *fh,
1991		int len, int type,
1992		int (*acceptable)(void *context, struct dentry *de),
1993		void *context)
1994{
1995	if (len < 3)
1996		return ERR_PTR(-ESTALE);
1997
1998	return sb->s_export_op->find_exported_dentry(sb, fh, NULL, acceptable,
1999							context);
2000}
2001
2002static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2003				int connectable)
2004{
2005	struct inode *inode = dentry->d_inode;
2006
2007	if (*len < 3)
2008		return 255;
2009
2010	if (hlist_unhashed(&inode->i_hash)) {
2011		/* Unfortunately insert_inode_hash is not idempotent,
2012		 * so as we hash inodes here rather than at creation
2013		 * time, we need a lock to ensure we only try
2014		 * to do it once
2015		 */
2016		static DEFINE_SPINLOCK(lock);
2017		spin_lock(&lock);
2018		if (hlist_unhashed(&inode->i_hash))
2019			__insert_inode_hash(inode,
2020					    inode->i_ino + inode->i_generation);
2021		spin_unlock(&lock);
2022	}
2023
2024	fh[0] = inode->i_generation;
2025	fh[1] = inode->i_ino;
2026	fh[2] = ((__u64)inode->i_ino) >> 32;
2027
2028	*len = 3;
2029	return 1;
2030}
2031
2032static struct export_operations shmem_export_ops = {
2033	.get_parent     = shmem_get_parent,
2034	.get_dentry     = shmem_get_dentry,
2035	.encode_fh      = shmem_encode_fh,
2036	.decode_fh      = shmem_decode_fh,
2037};
2038
2039static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2040	gid_t *gid, unsigned long *blocks, unsigned long *inodes,
2041	int *policy, nodemask_t *policy_nodes)
2042{
2043	char *this_char, *value, *rest;
2044
2045	while (options != NULL) {
2046		this_char = options;
2047		for (;;) {
2048			/*
2049			 * NUL-terminate this option: unfortunately,
2050			 * mount options form a comma-separated list,
2051			 * but mpol's nodelist may also contain commas.
2052			 */
2053			options = strchr(options, ',');
2054			if (options == NULL)
2055				break;
2056			options++;
2057			if (!isdigit(*options)) {
2058				options[-1] = '\0';
2059				break;
2060			}
2061		}
2062		if (!*this_char)
2063			continue;
2064		if ((value = strchr(this_char,'=')) != NULL) {
2065			*value++ = 0;
2066		} else {
2067			printk(KERN_ERR
2068			    "tmpfs: No value for mount option '%s'\n",
2069			    this_char);
2070			return 1;
2071		}
2072
2073		if (!strcmp(this_char,"size")) {
2074			unsigned long long size;
2075			size = memparse(value,&rest);
2076			if (*rest == '%') {
2077				size <<= PAGE_SHIFT;
2078				size *= totalram_pages;
2079				do_div(size, 100);
2080				rest++;
2081			}
2082			if (*rest)
2083				goto bad_val;
2084			*blocks = size >> PAGE_CACHE_SHIFT;
2085		} else if (!strcmp(this_char,"nr_blocks")) {
2086			*blocks = memparse(value,&rest);
2087			if (*rest)
2088				goto bad_val;
2089		} else if (!strcmp(this_char,"nr_inodes")) {
2090			*inodes = memparse(value,&rest);
2091			if (*rest)
2092				goto bad_val;
2093		} else if (!strcmp(this_char,"mode")) {
2094			if (!mode)
2095				continue;
2096			*mode = simple_strtoul(value,&rest,8);
2097			if (*rest)
2098				goto bad_val;
2099		} else if (!strcmp(this_char,"uid")) {
2100			if (!uid)
2101				continue;
2102			*uid = simple_strtoul(value,&rest,0);
2103			if (*rest)
2104				goto bad_val;
2105		} else if (!strcmp(this_char,"gid")) {
2106			if (!gid)
2107				continue;
2108			*gid = simple_strtoul(value,&rest,0);
2109			if (*rest)
2110				goto bad_val;
2111		} else if (!strcmp(this_char,"mpol")) {
2112			if (shmem_parse_mpol(value,policy,policy_nodes))
2113				goto bad_val;
2114		} else {
2115			printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2116			       this_char);
2117			return 1;
2118		}
2119	}
2120	return 0;
2121
2122bad_val:
2123	printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2124	       value, this_char);
2125	return 1;
2126
2127}
2128
2129static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2130{
2131	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2132	unsigned long max_blocks = sbinfo->max_blocks;
2133	unsigned long max_inodes = sbinfo->max_inodes;
2134	int policy = sbinfo->policy;
2135	nodemask_t policy_nodes = sbinfo->policy_nodes;
2136	unsigned long blocks;
2137	unsigned long inodes;
2138	int error = -EINVAL;
2139
2140	if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2141				&max_inodes, &policy, &policy_nodes))
2142		return error;
2143
2144	spin_lock(&sbinfo->stat_lock);
2145	blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2146	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2147	if (max_blocks < blocks)
2148		goto out;
2149	if (max_inodes < inodes)
2150		goto out;
2151	/*
2152	 * Those tests also disallow limited->unlimited while any are in
2153	 * use, so i_blocks will always be zero when max_blocks is zero;
2154	 * but we must separately disallow unlimited->limited, because
2155	 * in that case we have no record of how much is already in use.
2156	 */
2157	if (max_blocks && !sbinfo->max_blocks)
2158		goto out;
2159	if (max_inodes && !sbinfo->max_inodes)
2160		goto out;
2161
2162	error = 0;
2163	sbinfo->max_blocks  = max_blocks;
2164	sbinfo->free_blocks = max_blocks - blocks;
2165	sbinfo->max_inodes  = max_inodes;
2166	sbinfo->free_inodes = max_inodes - inodes;
2167	sbinfo->policy = policy;
2168	sbinfo->policy_nodes = policy_nodes;
2169out:
2170	spin_unlock(&sbinfo->stat_lock);
2171	return error;
2172}
2173#endif
2174
2175static void shmem_put_super(struct super_block *sb)
2176{
2177	kfree(sb->s_fs_info);
2178	sb->s_fs_info = NULL;
2179}
2180
2181static int shmem_fill_super(struct super_block *sb,
2182			    void *data, int silent)
2183{
2184	struct inode *inode;
2185	struct dentry *root;
2186	int mode   = S_IRWXUGO | S_ISVTX;
2187	uid_t uid = current->fsuid;
2188	gid_t gid = current->fsgid;
2189	int err = -ENOMEM;
2190	struct shmem_sb_info *sbinfo;
2191	unsigned long blocks = 0;
2192	unsigned long inodes = 0;
2193	int policy = MPOL_DEFAULT;
2194	nodemask_t policy_nodes = node_online_map;
2195
2196#ifdef CONFIG_TMPFS
2197	/*
2198	 * Per default we only allow half of the physical ram per
2199	 * tmpfs instance, limiting inodes to one per page of lowmem;
2200	 * but the internal instance is left unlimited.
2201	 */
2202	if (!(sb->s_flags & MS_NOUSER)) {
2203		blocks = totalram_pages / 2;
2204		inodes = totalram_pages - totalhigh_pages;
2205		if (inodes > blocks)
2206			inodes = blocks;
2207		if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2208					&inodes, &policy, &policy_nodes))
2209			return -EINVAL;
2210	}
2211	sb->s_export_op = &shmem_export_ops;
2212#else
2213	sb->s_flags |= MS_NOUSER;
2214#endif
2215
2216	/* Round up to L1_CACHE_BYTES to resist false sharing */
2217	sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2218				L1_CACHE_BYTES), GFP_KERNEL);
2219	if (!sbinfo)
2220		return -ENOMEM;
2221
2222	spin_lock_init(&sbinfo->stat_lock);
2223	sbinfo->max_blocks = blocks;
2224	sbinfo->free_blocks = blocks;
2225	sbinfo->max_inodes = inodes;
2226	sbinfo->free_inodes = inodes;
2227	sbinfo->policy = policy;
2228	sbinfo->policy_nodes = policy_nodes;
2229
2230	sb->s_fs_info = sbinfo;
2231	sb->s_maxbytes = SHMEM_MAX_BYTES;
2232	sb->s_blocksize = PAGE_CACHE_SIZE;
2233	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2234	sb->s_magic = TMPFS_MAGIC;
2235	sb->s_op = &shmem_ops;
2236	sb->s_time_gran = 1;
2237#ifdef CONFIG_TMPFS_POSIX_ACL
2238	sb->s_xattr = shmem_xattr_handlers;
2239	sb->s_flags |= MS_POSIXACL;
2240#endif
2241
2242	inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2243	if (!inode)
2244		goto failed;
2245	inode->i_uid = uid;
2246	inode->i_gid = gid;
2247	root = d_alloc_root(inode);
2248	if (!root)
2249		goto failed_iput;
2250	sb->s_root = root;
2251	return 0;
2252
2253failed_iput:
2254	iput(inode);
2255failed:
2256	shmem_put_super(sb);
2257	return err;
2258}
2259
2260static struct kmem_cache *shmem_inode_cachep;
2261
2262static struct inode *shmem_alloc_inode(struct super_block *sb)
2263{
2264	struct shmem_inode_info *p;
2265	p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
2266	if (!p)
2267		return NULL;
2268	return &p->vfs_inode;
2269}
2270
2271static void shmem_destroy_inode(struct inode *inode)
2272{
2273	if ((inode->i_mode & S_IFMT) == S_IFREG) {
2274		/* only struct inode is valid if it's an inline symlink */
2275		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2276	}
2277	shmem_acl_destroy_inode(inode);
2278	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2279}
2280
2281static void init_once(void *foo, struct kmem_cache *cachep,
2282		      unsigned long flags)
2283{
2284	struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2285
2286	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2287	    SLAB_CTOR_CONSTRUCTOR) {
2288		inode_init_once(&p->vfs_inode);
2289#ifdef CONFIG_TMPFS_POSIX_ACL
2290		p->i_acl = NULL;
2291		p->i_default_acl = NULL;
2292#endif
2293	}
2294}
2295
2296static int init_inodecache(void)
2297{
2298	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2299				sizeof(struct shmem_inode_info),
2300				0, 0, init_once, NULL);
2301	if (shmem_inode_cachep == NULL)
2302		return -ENOMEM;
2303	return 0;
2304}
2305
2306static void destroy_inodecache(void)
2307{
2308	kmem_cache_destroy(shmem_inode_cachep);
2309}
2310
2311static const struct address_space_operations shmem_aops = {
2312	.writepage	= shmem_writepage,
2313	.set_page_dirty	= __set_page_dirty_nobuffers,
2314#ifdef CONFIG_TMPFS
2315	.prepare_write	= shmem_prepare_write,
2316	.commit_write	= simple_commit_write,
2317#endif
2318	.migratepage	= migrate_page,
2319};
2320
2321static struct file_operations shmem_file_operations = {
2322	.mmap		= shmem_mmap,
2323#ifdef CONFIG_TMPFS
2324	.llseek		= generic_file_llseek,
2325	.read		= shmem_file_read,
2326	.write		= shmem_file_write,
2327	.fsync		= simple_sync_file,
2328	.sendfile	= shmem_file_sendfile,
2329#endif
2330};
2331
2332static struct inode_operations shmem_inode_operations = {
2333	.truncate	= shmem_truncate,
2334	.setattr	= shmem_notify_change,
2335	.truncate_range	= shmem_truncate_range,
2336#ifdef CONFIG_TMPFS_POSIX_ACL
2337	.setxattr	= generic_setxattr,
2338	.getxattr	= generic_getxattr,
2339	.listxattr	= generic_listxattr,
2340	.removexattr	= generic_removexattr,
2341	.permission	= shmem_permission,
2342#endif
2343
2344};
2345
2346static struct inode_operations shmem_dir_inode_operations = {
2347#ifdef CONFIG_TMPFS
2348	.create		= shmem_create,
2349	.lookup		= simple_lookup,
2350	.link		= shmem_link,
2351	.unlink		= shmem_unlink,
2352	.symlink	= shmem_symlink,
2353	.mkdir		= shmem_mkdir,
2354	.rmdir		= shmem_rmdir,
2355	.mknod		= shmem_mknod,
2356	.rename		= shmem_rename,
2357#endif
2358#ifdef CONFIG_TMPFS_POSIX_ACL
2359	.setattr	= shmem_notify_change,
2360	.setxattr	= generic_setxattr,
2361	.getxattr	= generic_getxattr,
2362	.listxattr	= generic_listxattr,
2363	.removexattr	= generic_removexattr,
2364	.permission	= shmem_permission,
2365#endif
2366};
2367
2368static struct inode_operations shmem_special_inode_operations = {
2369#ifdef CONFIG_TMPFS_POSIX_ACL
2370	.setattr	= shmem_notify_change,
2371	.setxattr	= generic_setxattr,
2372	.getxattr	= generic_getxattr,
2373	.listxattr	= generic_listxattr,
2374	.removexattr	= generic_removexattr,
2375	.permission	= shmem_permission,
2376#endif
2377};
2378
2379static struct super_operations shmem_ops = {
2380	.alloc_inode	= shmem_alloc_inode,
2381	.destroy_inode	= shmem_destroy_inode,
2382#ifdef CONFIG_TMPFS
2383	.statfs		= shmem_statfs,
2384	.remount_fs	= shmem_remount_fs,
2385#endif
2386	.delete_inode	= shmem_delete_inode,
2387	.drop_inode	= generic_delete_inode,
2388	.put_super	= shmem_put_super,
2389};
2390
2391static struct vm_operations_struct shmem_vm_ops = {
2392	.nopage		= shmem_nopage,
2393	.populate	= shmem_populate,
2394#ifdef CONFIG_NUMA
2395	.set_policy     = shmem_set_policy,
2396	.get_policy     = shmem_get_policy,
2397#endif
2398};
2399
2400
2401static int shmem_get_sb(struct file_system_type *fs_type,
2402	int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2403{
2404	return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2405}
2406
2407static struct file_system_type tmpfs_fs_type = {
2408	.owner		= THIS_MODULE,
2409	.name		= "tmpfs",
2410	.get_sb		= shmem_get_sb,
2411	.kill_sb	= kill_litter_super,
2412};
2413static struct vfsmount *shm_mnt;
2414
2415static int __init init_tmpfs(void)
2416{
2417	int error;
2418
2419	error = init_inodecache();
2420	if (error)
2421		goto out3;
2422
2423	error = register_filesystem(&tmpfs_fs_type);
2424	if (error) {
2425		printk(KERN_ERR "Could not register tmpfs\n");
2426		goto out2;
2427	}
2428
2429	shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2430				tmpfs_fs_type.name, NULL);
2431	if (IS_ERR(shm_mnt)) {
2432		error = PTR_ERR(shm_mnt);
2433		printk(KERN_ERR "Could not kern_mount tmpfs\n");
2434		goto out1;
2435	}
2436	return 0;
2437
2438out1:
2439	unregister_filesystem(&tmpfs_fs_type);
2440out2:
2441	destroy_inodecache();
2442out3:
2443	shm_mnt = ERR_PTR(error);
2444	return error;
2445}
2446module_init(init_tmpfs)
2447
2448/*
2449 * shmem_file_setup - get an unlinked file living in tmpfs
2450 *
2451 * @name: name for dentry (to be seen in /proc/<pid>/maps
2452 * @size: size to be set for the file
2453 *
2454 */
2455struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2456{
2457	int error;
2458	struct file *file;
2459	struct inode *inode;
2460	struct dentry *dentry, *root;
2461	struct qstr this;
2462
2463	if (IS_ERR(shm_mnt))
2464		return (void *)shm_mnt;
2465
2466	if (size < 0 || size > SHMEM_MAX_BYTES)
2467		return ERR_PTR(-EINVAL);
2468
2469	if (shmem_acct_size(flags, size))
2470		return ERR_PTR(-ENOMEM);
2471
2472	error = -ENOMEM;
2473	this.name = name;
2474	this.len = strlen(name);
2475	this.hash = 0; /* will go */
2476	root = shm_mnt->mnt_root;
2477	dentry = d_alloc(root, &this);
2478	if (!dentry)
2479		goto put_memory;
2480
2481	error = -ENFILE;
2482	file = get_empty_filp();
2483	if (!file)
2484		goto put_dentry;
2485
2486	error = -ENOSPC;
2487	inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2488	if (!inode)
2489		goto close_file;
2490
2491	SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2492	d_instantiate(dentry, inode);
2493	inode->i_size = size;
2494	inode->i_nlink = 0;	/* It is unlinked */
2495	file->f_vfsmnt = mntget(shm_mnt);
2496	file->f_dentry = dentry;
2497	file->f_mapping = inode->i_mapping;
2498	file->f_op = &shmem_file_operations;
2499	file->f_mode = FMODE_WRITE | FMODE_READ;
2500	return file;
2501
2502close_file:
2503	put_filp(file);
2504put_dentry:
2505	dput(dentry);
2506put_memory:
2507	shmem_unacct_size(flags, size);
2508	return ERR_PTR(error);
2509}
2510
2511/*
2512 * shmem_zero_setup - setup a shared anonymous mapping
2513 *
2514 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2515 */
2516int shmem_zero_setup(struct vm_area_struct *vma)
2517{
2518	struct file *file;
2519	loff_t size = vma->vm_end - vma->vm_start;
2520
2521	file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2522	if (IS_ERR(file))
2523		return PTR_ERR(file);
2524
2525	if (vma->vm_file)
2526		fput(vma->vm_file);
2527	vma->vm_file = file;
2528	vma->vm_ops = &shmem_vm_ops;
2529	return 0;
2530}
2531