shmem.c revision 46017e954826ac59e91df76341a3f76b45467847
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 *		 2000 Transmeta Corp.
6 *		 2000-2001 Christoph Rohland
7 *		 2000-2001 SAP AG
8 *		 2002 Red Hat Inc.
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
12 *
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16 *
17 * This file is released under the GPL.
18 */
19
20/*
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/fs.h>
29#include <linux/xattr.h>
30#include <linux/exportfs.h>
31#include <linux/generic_acl.h>
32#include <linux/mm.h>
33#include <linux/mman.h>
34#include <linux/file.h>
35#include <linux/swap.h>
36#include <linux/pagemap.h>
37#include <linux/string.h>
38#include <linux/slab.h>
39#include <linux/backing-dev.h>
40#include <linux/shmem_fs.h>
41#include <linux/mount.h>
42#include <linux/writeback.h>
43#include <linux/vfs.h>
44#include <linux/blkdev.h>
45#include <linux/security.h>
46#include <linux/swapops.h>
47#include <linux/mempolicy.h>
48#include <linux/namei.h>
49#include <linux/ctype.h>
50#include <linux/migrate.h>
51#include <linux/highmem.h>
52
53#include <asm/uaccess.h>
54#include <asm/div64.h>
55#include <asm/pgtable.h>
56
57/* This magic number is used in glibc for posix shared memory */
58#define TMPFS_MAGIC	0x01021994
59
60#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
62#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
63
64#define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
65#define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
66
67#define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
68
69/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
70#define SHMEM_PAGEIN	 VM_READ
71#define SHMEM_TRUNCATE	 VM_WRITE
72
73/* Definition to limit shmem_truncate's steps between cond_rescheds */
74#define LATENCY_LIMIT	 64
75
76/* Pretend that each entry is of this size in directory's i_size */
77#define BOGO_DIRENT_SIZE 20
78
79/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
80enum sgp_type {
81	SGP_QUICK,	/* don't try more than file page cache lookup */
82	SGP_READ,	/* don't exceed i_size, don't allocate page */
83	SGP_CACHE,	/* don't exceed i_size, may allocate page */
84	SGP_WRITE,	/* may exceed i_size, may allocate page */
85	SGP_FAULT,	/* same as SGP_CACHE, return with page locked */
86};
87
88static int shmem_getpage(struct inode *inode, unsigned long idx,
89			 struct page **pagep, enum sgp_type sgp, int *type);
90
91static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
92{
93	/*
94	 * The above definition of ENTRIES_PER_PAGE, and the use of
95	 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
96	 * might be reconsidered if it ever diverges from PAGE_SIZE.
97	 *
98	 * Mobility flags are masked out as swap vectors cannot move
99	 */
100	return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
101				PAGE_CACHE_SHIFT-PAGE_SHIFT);
102}
103
104static inline void shmem_dir_free(struct page *page)
105{
106	__free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
107}
108
109static struct page **shmem_dir_map(struct page *page)
110{
111	return (struct page **)kmap_atomic(page, KM_USER0);
112}
113
114static inline void shmem_dir_unmap(struct page **dir)
115{
116	kunmap_atomic(dir, KM_USER0);
117}
118
119static swp_entry_t *shmem_swp_map(struct page *page)
120{
121	return (swp_entry_t *)kmap_atomic(page, KM_USER1);
122}
123
124static inline void shmem_swp_balance_unmap(void)
125{
126	/*
127	 * When passing a pointer to an i_direct entry, to code which
128	 * also handles indirect entries and so will shmem_swp_unmap,
129	 * we must arrange for the preempt count to remain in balance.
130	 * What kmap_atomic of a lowmem page does depends on config
131	 * and architecture, so pretend to kmap_atomic some lowmem page.
132	 */
133	(void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
134}
135
136static inline void shmem_swp_unmap(swp_entry_t *entry)
137{
138	kunmap_atomic(entry, KM_USER1);
139}
140
141static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
142{
143	return sb->s_fs_info;
144}
145
146/*
147 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
148 * for shared memory and for shared anonymous (/dev/zero) mappings
149 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
150 * consistent with the pre-accounting of private mappings ...
151 */
152static inline int shmem_acct_size(unsigned long flags, loff_t size)
153{
154	return (flags & VM_ACCOUNT)?
155		security_vm_enough_memory(VM_ACCT(size)): 0;
156}
157
158static inline void shmem_unacct_size(unsigned long flags, loff_t size)
159{
160	if (flags & VM_ACCOUNT)
161		vm_unacct_memory(VM_ACCT(size));
162}
163
164/*
165 * ... whereas tmpfs objects are accounted incrementally as
166 * pages are allocated, in order to allow huge sparse files.
167 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
168 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
169 */
170static inline int shmem_acct_block(unsigned long flags)
171{
172	return (flags & VM_ACCOUNT)?
173		0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
174}
175
176static inline void shmem_unacct_blocks(unsigned long flags, long pages)
177{
178	if (!(flags & VM_ACCOUNT))
179		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
180}
181
182static const struct super_operations shmem_ops;
183static const struct address_space_operations shmem_aops;
184static const struct file_operations shmem_file_operations;
185static const struct inode_operations shmem_inode_operations;
186static const struct inode_operations shmem_dir_inode_operations;
187static const struct inode_operations shmem_special_inode_operations;
188static struct vm_operations_struct shmem_vm_ops;
189
190static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
191	.ra_pages	= 0,	/* No readahead */
192	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
193	.unplug_io_fn	= default_unplug_io_fn,
194};
195
196static LIST_HEAD(shmem_swaplist);
197static DEFINE_SPINLOCK(shmem_swaplist_lock);
198
199static void shmem_free_blocks(struct inode *inode, long pages)
200{
201	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
202	if (sbinfo->max_blocks) {
203		spin_lock(&sbinfo->stat_lock);
204		sbinfo->free_blocks += pages;
205		inode->i_blocks -= pages*BLOCKS_PER_PAGE;
206		spin_unlock(&sbinfo->stat_lock);
207	}
208}
209
210/*
211 * shmem_recalc_inode - recalculate the size of an inode
212 *
213 * @inode: inode to recalc
214 *
215 * We have to calculate the free blocks since the mm can drop
216 * undirtied hole pages behind our back.
217 *
218 * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
219 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
220 *
221 * It has to be called with the spinlock held.
222 */
223static void shmem_recalc_inode(struct inode *inode)
224{
225	struct shmem_inode_info *info = SHMEM_I(inode);
226	long freed;
227
228	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
229	if (freed > 0) {
230		info->alloced -= freed;
231		shmem_unacct_blocks(info->flags, freed);
232		shmem_free_blocks(inode, freed);
233	}
234}
235
236/*
237 * shmem_swp_entry - find the swap vector position in the info structure
238 *
239 * @info:  info structure for the inode
240 * @index: index of the page to find
241 * @page:  optional page to add to the structure. Has to be preset to
242 *         all zeros
243 *
244 * If there is no space allocated yet it will return NULL when
245 * page is NULL, else it will use the page for the needed block,
246 * setting it to NULL on return to indicate that it has been used.
247 *
248 * The swap vector is organized the following way:
249 *
250 * There are SHMEM_NR_DIRECT entries directly stored in the
251 * shmem_inode_info structure. So small files do not need an addional
252 * allocation.
253 *
254 * For pages with index > SHMEM_NR_DIRECT there is the pointer
255 * i_indirect which points to a page which holds in the first half
256 * doubly indirect blocks, in the second half triple indirect blocks:
257 *
258 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
259 * following layout (for SHMEM_NR_DIRECT == 16):
260 *
261 * i_indirect -> dir --> 16-19
262 * 	      |	     +-> 20-23
263 * 	      |
264 * 	      +-->dir2 --> 24-27
265 * 	      |	       +-> 28-31
266 * 	      |	       +-> 32-35
267 * 	      |	       +-> 36-39
268 * 	      |
269 * 	      +-->dir3 --> 40-43
270 * 	       	       +-> 44-47
271 * 	      	       +-> 48-51
272 * 	      	       +-> 52-55
273 */
274static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
275{
276	unsigned long offset;
277	struct page **dir;
278	struct page *subdir;
279
280	if (index < SHMEM_NR_DIRECT) {
281		shmem_swp_balance_unmap();
282		return info->i_direct+index;
283	}
284	if (!info->i_indirect) {
285		if (page) {
286			info->i_indirect = *page;
287			*page = NULL;
288		}
289		return NULL;			/* need another page */
290	}
291
292	index -= SHMEM_NR_DIRECT;
293	offset = index % ENTRIES_PER_PAGE;
294	index /= ENTRIES_PER_PAGE;
295	dir = shmem_dir_map(info->i_indirect);
296
297	if (index >= ENTRIES_PER_PAGE/2) {
298		index -= ENTRIES_PER_PAGE/2;
299		dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
300		index %= ENTRIES_PER_PAGE;
301		subdir = *dir;
302		if (!subdir) {
303			if (page) {
304				*dir = *page;
305				*page = NULL;
306			}
307			shmem_dir_unmap(dir);
308			return NULL;		/* need another page */
309		}
310		shmem_dir_unmap(dir);
311		dir = shmem_dir_map(subdir);
312	}
313
314	dir += index;
315	subdir = *dir;
316	if (!subdir) {
317		if (!page || !(subdir = *page)) {
318			shmem_dir_unmap(dir);
319			return NULL;		/* need a page */
320		}
321		*dir = subdir;
322		*page = NULL;
323	}
324	shmem_dir_unmap(dir);
325	return shmem_swp_map(subdir) + offset;
326}
327
328static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
329{
330	long incdec = value? 1: -1;
331
332	entry->val = value;
333	info->swapped += incdec;
334	if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
335		struct page *page = kmap_atomic_to_page(entry);
336		set_page_private(page, page_private(page) + incdec);
337	}
338}
339
340/*
341 * shmem_swp_alloc - get the position of the swap entry for the page.
342 *                   If it does not exist allocate the entry.
343 *
344 * @info:	info structure for the inode
345 * @index:	index of the page to find
346 * @sgp:	check and recheck i_size? skip allocation?
347 */
348static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
349{
350	struct inode *inode = &info->vfs_inode;
351	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
352	struct page *page = NULL;
353	swp_entry_t *entry;
354
355	if (sgp != SGP_WRITE &&
356	    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
357		return ERR_PTR(-EINVAL);
358
359	while (!(entry = shmem_swp_entry(info, index, &page))) {
360		if (sgp == SGP_READ)
361			return shmem_swp_map(ZERO_PAGE(0));
362		/*
363		 * Test free_blocks against 1 not 0, since we have 1 data
364		 * page (and perhaps indirect index pages) yet to allocate:
365		 * a waste to allocate index if we cannot allocate data.
366		 */
367		if (sbinfo->max_blocks) {
368			spin_lock(&sbinfo->stat_lock);
369			if (sbinfo->free_blocks <= 1) {
370				spin_unlock(&sbinfo->stat_lock);
371				return ERR_PTR(-ENOSPC);
372			}
373			sbinfo->free_blocks--;
374			inode->i_blocks += BLOCKS_PER_PAGE;
375			spin_unlock(&sbinfo->stat_lock);
376		}
377
378		spin_unlock(&info->lock);
379		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
380		if (page)
381			set_page_private(page, 0);
382		spin_lock(&info->lock);
383
384		if (!page) {
385			shmem_free_blocks(inode, 1);
386			return ERR_PTR(-ENOMEM);
387		}
388		if (sgp != SGP_WRITE &&
389		    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
390			entry = ERR_PTR(-EINVAL);
391			break;
392		}
393		if (info->next_index <= index)
394			info->next_index = index + 1;
395	}
396	if (page) {
397		/* another task gave its page, or truncated the file */
398		shmem_free_blocks(inode, 1);
399		shmem_dir_free(page);
400	}
401	if (info->next_index <= index && !IS_ERR(entry))
402		info->next_index = index + 1;
403	return entry;
404}
405
406/*
407 * shmem_free_swp - free some swap entries in a directory
408 *
409 * @dir:        pointer to the directory
410 * @edir:       pointer after last entry of the directory
411 * @punch_lock: pointer to spinlock when needed for the holepunch case
412 */
413static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
414						spinlock_t *punch_lock)
415{
416	spinlock_t *punch_unlock = NULL;
417	swp_entry_t *ptr;
418	int freed = 0;
419
420	for (ptr = dir; ptr < edir; ptr++) {
421		if (ptr->val) {
422			if (unlikely(punch_lock)) {
423				punch_unlock = punch_lock;
424				punch_lock = NULL;
425				spin_lock(punch_unlock);
426				if (!ptr->val)
427					continue;
428			}
429			free_swap_and_cache(*ptr);
430			*ptr = (swp_entry_t){0};
431			freed++;
432		}
433	}
434	if (punch_unlock)
435		spin_unlock(punch_unlock);
436	return freed;
437}
438
439static int shmem_map_and_free_swp(struct page *subdir, int offset,
440		int limit, struct page ***dir, spinlock_t *punch_lock)
441{
442	swp_entry_t *ptr;
443	int freed = 0;
444
445	ptr = shmem_swp_map(subdir);
446	for (; offset < limit; offset += LATENCY_LIMIT) {
447		int size = limit - offset;
448		if (size > LATENCY_LIMIT)
449			size = LATENCY_LIMIT;
450		freed += shmem_free_swp(ptr+offset, ptr+offset+size,
451							punch_lock);
452		if (need_resched()) {
453			shmem_swp_unmap(ptr);
454			if (*dir) {
455				shmem_dir_unmap(*dir);
456				*dir = NULL;
457			}
458			cond_resched();
459			ptr = shmem_swp_map(subdir);
460		}
461	}
462	shmem_swp_unmap(ptr);
463	return freed;
464}
465
466static void shmem_free_pages(struct list_head *next)
467{
468	struct page *page;
469	int freed = 0;
470
471	do {
472		page = container_of(next, struct page, lru);
473		next = next->next;
474		shmem_dir_free(page);
475		freed++;
476		if (freed >= LATENCY_LIMIT) {
477			cond_resched();
478			freed = 0;
479		}
480	} while (next);
481}
482
483static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
484{
485	struct shmem_inode_info *info = SHMEM_I(inode);
486	unsigned long idx;
487	unsigned long size;
488	unsigned long limit;
489	unsigned long stage;
490	unsigned long diroff;
491	struct page **dir;
492	struct page *topdir;
493	struct page *middir;
494	struct page *subdir;
495	swp_entry_t *ptr;
496	LIST_HEAD(pages_to_free);
497	long nr_pages_to_free = 0;
498	long nr_swaps_freed = 0;
499	int offset;
500	int freed;
501	int punch_hole;
502	spinlock_t *needs_lock;
503	spinlock_t *punch_lock;
504	unsigned long upper_limit;
505
506	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
507	idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
508	if (idx >= info->next_index)
509		return;
510
511	spin_lock(&info->lock);
512	info->flags |= SHMEM_TRUNCATE;
513	if (likely(end == (loff_t) -1)) {
514		limit = info->next_index;
515		upper_limit = SHMEM_MAX_INDEX;
516		info->next_index = idx;
517		needs_lock = NULL;
518		punch_hole = 0;
519	} else {
520		if (end + 1 >= inode->i_size) {	/* we may free a little more */
521			limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
522							PAGE_CACHE_SHIFT;
523			upper_limit = SHMEM_MAX_INDEX;
524		} else {
525			limit = (end + 1) >> PAGE_CACHE_SHIFT;
526			upper_limit = limit;
527		}
528		needs_lock = &info->lock;
529		punch_hole = 1;
530	}
531
532	topdir = info->i_indirect;
533	if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
534		info->i_indirect = NULL;
535		nr_pages_to_free++;
536		list_add(&topdir->lru, &pages_to_free);
537	}
538	spin_unlock(&info->lock);
539
540	if (info->swapped && idx < SHMEM_NR_DIRECT) {
541		ptr = info->i_direct;
542		size = limit;
543		if (size > SHMEM_NR_DIRECT)
544			size = SHMEM_NR_DIRECT;
545		nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
546	}
547
548	/*
549	 * If there are no indirect blocks or we are punching a hole
550	 * below indirect blocks, nothing to be done.
551	 */
552	if (!topdir || limit <= SHMEM_NR_DIRECT)
553		goto done2;
554
555	/*
556	 * The truncation case has already dropped info->lock, and we're safe
557	 * because i_size and next_index have already been lowered, preventing
558	 * access beyond.  But in the punch_hole case, we still need to take
559	 * the lock when updating the swap directory, because there might be
560	 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
561	 * shmem_writepage.  However, whenever we find we can remove a whole
562	 * directory page (not at the misaligned start or end of the range),
563	 * we first NULLify its pointer in the level above, and then have no
564	 * need to take the lock when updating its contents: needs_lock and
565	 * punch_lock (either pointing to info->lock or NULL) manage this.
566	 */
567
568	upper_limit -= SHMEM_NR_DIRECT;
569	limit -= SHMEM_NR_DIRECT;
570	idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
571	offset = idx % ENTRIES_PER_PAGE;
572	idx -= offset;
573
574	dir = shmem_dir_map(topdir);
575	stage = ENTRIES_PER_PAGEPAGE/2;
576	if (idx < ENTRIES_PER_PAGEPAGE/2) {
577		middir = topdir;
578		diroff = idx/ENTRIES_PER_PAGE;
579	} else {
580		dir += ENTRIES_PER_PAGE/2;
581		dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
582		while (stage <= idx)
583			stage += ENTRIES_PER_PAGEPAGE;
584		middir = *dir;
585		if (*dir) {
586			diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
587				ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
588			if (!diroff && !offset && upper_limit >= stage) {
589				if (needs_lock) {
590					spin_lock(needs_lock);
591					*dir = NULL;
592					spin_unlock(needs_lock);
593					needs_lock = NULL;
594				} else
595					*dir = NULL;
596				nr_pages_to_free++;
597				list_add(&middir->lru, &pages_to_free);
598			}
599			shmem_dir_unmap(dir);
600			dir = shmem_dir_map(middir);
601		} else {
602			diroff = 0;
603			offset = 0;
604			idx = stage;
605		}
606	}
607
608	for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
609		if (unlikely(idx == stage)) {
610			shmem_dir_unmap(dir);
611			dir = shmem_dir_map(topdir) +
612			    ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
613			while (!*dir) {
614				dir++;
615				idx += ENTRIES_PER_PAGEPAGE;
616				if (idx >= limit)
617					goto done1;
618			}
619			stage = idx + ENTRIES_PER_PAGEPAGE;
620			middir = *dir;
621			if (punch_hole)
622				needs_lock = &info->lock;
623			if (upper_limit >= stage) {
624				if (needs_lock) {
625					spin_lock(needs_lock);
626					*dir = NULL;
627					spin_unlock(needs_lock);
628					needs_lock = NULL;
629				} else
630					*dir = NULL;
631				nr_pages_to_free++;
632				list_add(&middir->lru, &pages_to_free);
633			}
634			shmem_dir_unmap(dir);
635			cond_resched();
636			dir = shmem_dir_map(middir);
637			diroff = 0;
638		}
639		punch_lock = needs_lock;
640		subdir = dir[diroff];
641		if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
642			if (needs_lock) {
643				spin_lock(needs_lock);
644				dir[diroff] = NULL;
645				spin_unlock(needs_lock);
646				punch_lock = NULL;
647			} else
648				dir[diroff] = NULL;
649			nr_pages_to_free++;
650			list_add(&subdir->lru, &pages_to_free);
651		}
652		if (subdir && page_private(subdir) /* has swap entries */) {
653			size = limit - idx;
654			if (size > ENTRIES_PER_PAGE)
655				size = ENTRIES_PER_PAGE;
656			freed = shmem_map_and_free_swp(subdir,
657					offset, size, &dir, punch_lock);
658			if (!dir)
659				dir = shmem_dir_map(middir);
660			nr_swaps_freed += freed;
661			if (offset || punch_lock) {
662				spin_lock(&info->lock);
663				set_page_private(subdir,
664					page_private(subdir) - freed);
665				spin_unlock(&info->lock);
666			} else
667				BUG_ON(page_private(subdir) != freed);
668		}
669		offset = 0;
670	}
671done1:
672	shmem_dir_unmap(dir);
673done2:
674	if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
675		/*
676		 * Call truncate_inode_pages again: racing shmem_unuse_inode
677		 * may have swizzled a page in from swap since vmtruncate or
678		 * generic_delete_inode did it, before we lowered next_index.
679		 * Also, though shmem_getpage checks i_size before adding to
680		 * cache, no recheck after: so fix the narrow window there too.
681		 *
682		 * Recalling truncate_inode_pages_range and unmap_mapping_range
683		 * every time for punch_hole (which never got a chance to clear
684		 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
685		 * yet hardly ever necessary: try to optimize them out later.
686		 */
687		truncate_inode_pages_range(inode->i_mapping, start, end);
688		if (punch_hole)
689			unmap_mapping_range(inode->i_mapping, start,
690							end - start, 1);
691	}
692
693	spin_lock(&info->lock);
694	info->flags &= ~SHMEM_TRUNCATE;
695	info->swapped -= nr_swaps_freed;
696	if (nr_pages_to_free)
697		shmem_free_blocks(inode, nr_pages_to_free);
698	shmem_recalc_inode(inode);
699	spin_unlock(&info->lock);
700
701	/*
702	 * Empty swap vector directory pages to be freed?
703	 */
704	if (!list_empty(&pages_to_free)) {
705		pages_to_free.prev->next = NULL;
706		shmem_free_pages(pages_to_free.next);
707	}
708}
709
710static void shmem_truncate(struct inode *inode)
711{
712	shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
713}
714
715static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
716{
717	struct inode *inode = dentry->d_inode;
718	struct page *page = NULL;
719	int error;
720
721	if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
722		if (attr->ia_size < inode->i_size) {
723			/*
724			 * If truncating down to a partial page, then
725			 * if that page is already allocated, hold it
726			 * in memory until the truncation is over, so
727			 * truncate_partial_page cannnot miss it were
728			 * it assigned to swap.
729			 */
730			if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
731				(void) shmem_getpage(inode,
732					attr->ia_size>>PAGE_CACHE_SHIFT,
733						&page, SGP_READ, NULL);
734			}
735			/*
736			 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
737			 * detect if any pages might have been added to cache
738			 * after truncate_inode_pages.  But we needn't bother
739			 * if it's being fully truncated to zero-length: the
740			 * nrpages check is efficient enough in that case.
741			 */
742			if (attr->ia_size) {
743				struct shmem_inode_info *info = SHMEM_I(inode);
744				spin_lock(&info->lock);
745				info->flags &= ~SHMEM_PAGEIN;
746				spin_unlock(&info->lock);
747			}
748		}
749	}
750
751	error = inode_change_ok(inode, attr);
752	if (!error)
753		error = inode_setattr(inode, attr);
754#ifdef CONFIG_TMPFS_POSIX_ACL
755	if (!error && (attr->ia_valid & ATTR_MODE))
756		error = generic_acl_chmod(inode, &shmem_acl_ops);
757#endif
758	if (page)
759		page_cache_release(page);
760	return error;
761}
762
763static void shmem_delete_inode(struct inode *inode)
764{
765	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
766	struct shmem_inode_info *info = SHMEM_I(inode);
767
768	if (inode->i_op->truncate == shmem_truncate) {
769		truncate_inode_pages(inode->i_mapping, 0);
770		shmem_unacct_size(info->flags, inode->i_size);
771		inode->i_size = 0;
772		shmem_truncate(inode);
773		if (!list_empty(&info->swaplist)) {
774			spin_lock(&shmem_swaplist_lock);
775			list_del_init(&info->swaplist);
776			spin_unlock(&shmem_swaplist_lock);
777		}
778	}
779	BUG_ON(inode->i_blocks);
780	if (sbinfo->max_inodes) {
781		spin_lock(&sbinfo->stat_lock);
782		sbinfo->free_inodes++;
783		spin_unlock(&sbinfo->stat_lock);
784	}
785	clear_inode(inode);
786}
787
788static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
789{
790	swp_entry_t *ptr;
791
792	for (ptr = dir; ptr < edir; ptr++) {
793		if (ptr->val == entry.val)
794			return ptr - dir;
795	}
796	return -1;
797}
798
799static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
800{
801	struct inode *inode;
802	unsigned long idx;
803	unsigned long size;
804	unsigned long limit;
805	unsigned long stage;
806	struct page **dir;
807	struct page *subdir;
808	swp_entry_t *ptr;
809	int offset;
810
811	idx = 0;
812	ptr = info->i_direct;
813	spin_lock(&info->lock);
814	limit = info->next_index;
815	size = limit;
816	if (size > SHMEM_NR_DIRECT)
817		size = SHMEM_NR_DIRECT;
818	offset = shmem_find_swp(entry, ptr, ptr+size);
819	if (offset >= 0) {
820		shmem_swp_balance_unmap();
821		goto found;
822	}
823	if (!info->i_indirect)
824		goto lost2;
825
826	dir = shmem_dir_map(info->i_indirect);
827	stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
828
829	for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
830		if (unlikely(idx == stage)) {
831			shmem_dir_unmap(dir-1);
832			dir = shmem_dir_map(info->i_indirect) +
833			    ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
834			while (!*dir) {
835				dir++;
836				idx += ENTRIES_PER_PAGEPAGE;
837				if (idx >= limit)
838					goto lost1;
839			}
840			stage = idx + ENTRIES_PER_PAGEPAGE;
841			subdir = *dir;
842			shmem_dir_unmap(dir);
843			dir = shmem_dir_map(subdir);
844		}
845		subdir = *dir;
846		if (subdir && page_private(subdir)) {
847			ptr = shmem_swp_map(subdir);
848			size = limit - idx;
849			if (size > ENTRIES_PER_PAGE)
850				size = ENTRIES_PER_PAGE;
851			offset = shmem_find_swp(entry, ptr, ptr+size);
852			if (offset >= 0) {
853				shmem_dir_unmap(dir);
854				goto found;
855			}
856			shmem_swp_unmap(ptr);
857		}
858	}
859lost1:
860	shmem_dir_unmap(dir-1);
861lost2:
862	spin_unlock(&info->lock);
863	return 0;
864found:
865	idx += offset;
866	inode = &info->vfs_inode;
867	if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
868		info->flags |= SHMEM_PAGEIN;
869		shmem_swp_set(info, ptr + offset, 0);
870	}
871	shmem_swp_unmap(ptr);
872	spin_unlock(&info->lock);
873	/*
874	 * Decrement swap count even when the entry is left behind:
875	 * try_to_unuse will skip over mms, then reincrement count.
876	 */
877	swap_free(entry);
878	return 1;
879}
880
881/*
882 * shmem_unuse() search for an eventually swapped out shmem page.
883 */
884int shmem_unuse(swp_entry_t entry, struct page *page)
885{
886	struct list_head *p, *next;
887	struct shmem_inode_info *info;
888	int found = 0;
889
890	spin_lock(&shmem_swaplist_lock);
891	list_for_each_safe(p, next, &shmem_swaplist) {
892		info = list_entry(p, struct shmem_inode_info, swaplist);
893		if (!info->swapped)
894			list_del_init(&info->swaplist);
895		else if (shmem_unuse_inode(info, entry, page)) {
896			/* move head to start search for next from here */
897			list_move_tail(&shmem_swaplist, &info->swaplist);
898			found = 1;
899			break;
900		}
901	}
902	spin_unlock(&shmem_swaplist_lock);
903	return found;
904}
905
906/*
907 * Move the page from the page cache to the swap cache.
908 */
909static int shmem_writepage(struct page *page, struct writeback_control *wbc)
910{
911	struct shmem_inode_info *info;
912	swp_entry_t *entry, swap;
913	struct address_space *mapping;
914	unsigned long index;
915	struct inode *inode;
916
917	BUG_ON(!PageLocked(page));
918	/*
919	 * shmem_backing_dev_info's capabilities prevent regular writeback or
920	 * sync from ever calling shmem_writepage; but a stacking filesystem
921	 * may use the ->writepage of its underlying filesystem, in which case
922	 * we want to do nothing when that underlying filesystem is tmpfs
923	 * (writing out to swap is useful as a response to memory pressure, but
924	 * of no use to stabilize the data) - just redirty the page, unlock it
925	 * and claim success in this case.  AOP_WRITEPAGE_ACTIVATE, and the
926	 * page_mapped check below, must be avoided unless we're in reclaim.
927	 */
928	if (!wbc->for_reclaim) {
929		set_page_dirty(page);
930		unlock_page(page);
931		return 0;
932	}
933	BUG_ON(page_mapped(page));
934
935	mapping = page->mapping;
936	index = page->index;
937	inode = mapping->host;
938	info = SHMEM_I(inode);
939	if (info->flags & VM_LOCKED)
940		goto redirty;
941	swap = get_swap_page();
942	if (!swap.val)
943		goto redirty;
944
945	spin_lock(&info->lock);
946	shmem_recalc_inode(inode);
947	if (index >= info->next_index) {
948		BUG_ON(!(info->flags & SHMEM_TRUNCATE));
949		goto unlock;
950	}
951	entry = shmem_swp_entry(info, index, NULL);
952	BUG_ON(!entry);
953	BUG_ON(entry->val);
954
955	if (move_to_swap_cache(page, swap) == 0) {
956		shmem_swp_set(info, entry, swap.val);
957		shmem_swp_unmap(entry);
958		spin_unlock(&info->lock);
959		if (list_empty(&info->swaplist)) {
960			spin_lock(&shmem_swaplist_lock);
961			/* move instead of add in case we're racing */
962			list_move_tail(&info->swaplist, &shmem_swaplist);
963			spin_unlock(&shmem_swaplist_lock);
964		}
965		unlock_page(page);
966		return 0;
967	}
968
969	shmem_swp_unmap(entry);
970unlock:
971	spin_unlock(&info->lock);
972	swap_free(swap);
973redirty:
974	set_page_dirty(page);
975	return AOP_WRITEPAGE_ACTIVATE;	/* Return with the page locked */
976}
977
978#ifdef CONFIG_NUMA
979static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
980{
981	char *nodelist = strchr(value, ':');
982	int err = 1;
983
984	if (nodelist) {
985		/* NUL-terminate policy string */
986		*nodelist++ = '\0';
987		if (nodelist_parse(nodelist, *policy_nodes))
988			goto out;
989		if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
990			goto out;
991	}
992	if (!strcmp(value, "default")) {
993		*policy = MPOL_DEFAULT;
994		/* Don't allow a nodelist */
995		if (!nodelist)
996			err = 0;
997	} else if (!strcmp(value, "prefer")) {
998		*policy = MPOL_PREFERRED;
999		/* Insist on a nodelist of one node only */
1000		if (nodelist) {
1001			char *rest = nodelist;
1002			while (isdigit(*rest))
1003				rest++;
1004			if (!*rest)
1005				err = 0;
1006		}
1007	} else if (!strcmp(value, "bind")) {
1008		*policy = MPOL_BIND;
1009		/* Insist on a nodelist */
1010		if (nodelist)
1011			err = 0;
1012	} else if (!strcmp(value, "interleave")) {
1013		*policy = MPOL_INTERLEAVE;
1014		/*
1015		 * Default to online nodes with memory if no nodelist
1016		 */
1017		if (!nodelist)
1018			*policy_nodes = node_states[N_HIGH_MEMORY];
1019		err = 0;
1020	}
1021out:
1022	/* Restore string for error message */
1023	if (nodelist)
1024		*--nodelist = ':';
1025	return err;
1026}
1027
1028static struct page *shmem_swapin(struct shmem_inode_info *info,
1029				       swp_entry_t entry, unsigned long idx)
1030{
1031	struct vm_area_struct pvma;
1032	struct page *page;
1033
1034	/* Create a pseudo vma that just contains the policy */
1035	pvma.vm_start = 0;
1036	pvma.vm_pgoff = idx;
1037	pvma.vm_ops = NULL;
1038	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1039	page = swapin_readahead(entry, &pvma, 0);
1040	mpol_free(pvma.vm_policy);
1041	return page;
1042}
1043
1044static struct page *shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
1045					unsigned long idx)
1046{
1047	struct vm_area_struct pvma;
1048	struct page *page;
1049
1050	/* Create a pseudo vma that just contains the policy */
1051	pvma.vm_start = 0;
1052	pvma.vm_pgoff = idx;
1053	pvma.vm_ops = NULL;
1054	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
1055	page = alloc_page_vma(gfp, &pvma, 0);
1056	mpol_free(pvma.vm_policy);
1057	return page;
1058}
1059#else
1060static inline int shmem_parse_mpol(char *value, int *policy,
1061						nodemask_t *policy_nodes)
1062{
1063	return 1;
1064}
1065
1066static inline struct page *
1067shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
1068{
1069	return swapin_readahead(entry, NULL, 0);
1070}
1071
1072static inline struct page *
1073shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
1074{
1075	return alloc_page(gfp);
1076}
1077#endif
1078
1079/*
1080 * shmem_getpage - either get the page from swap or allocate a new one
1081 *
1082 * If we allocate a new one we do not mark it dirty. That's up to the
1083 * vm. If we swap it in we mark it dirty since we also free the swap
1084 * entry since a page cannot live in both the swap and page cache
1085 */
1086static int shmem_getpage(struct inode *inode, unsigned long idx,
1087			struct page **pagep, enum sgp_type sgp, int *type)
1088{
1089	struct address_space *mapping = inode->i_mapping;
1090	struct shmem_inode_info *info = SHMEM_I(inode);
1091	struct shmem_sb_info *sbinfo;
1092	struct page *filepage = *pagep;
1093	struct page *swappage;
1094	swp_entry_t *entry;
1095	swp_entry_t swap;
1096	int error;
1097
1098	if (idx >= SHMEM_MAX_INDEX)
1099		return -EFBIG;
1100
1101	if (type)
1102		*type = 0;
1103
1104	/*
1105	 * Normally, filepage is NULL on entry, and either found
1106	 * uptodate immediately, or allocated and zeroed, or read
1107	 * in under swappage, which is then assigned to filepage.
1108	 * But shmem_readpage and shmem_write_begin pass in a locked
1109	 * filepage, which may be found not uptodate by other callers
1110	 * too, and may need to be copied from the swappage read in.
1111	 */
1112repeat:
1113	if (!filepage)
1114		filepage = find_lock_page(mapping, idx);
1115	if (filepage && PageUptodate(filepage))
1116		goto done;
1117	error = 0;
1118	if (sgp == SGP_QUICK)
1119		goto failed;
1120
1121	spin_lock(&info->lock);
1122	shmem_recalc_inode(inode);
1123	entry = shmem_swp_alloc(info, idx, sgp);
1124	if (IS_ERR(entry)) {
1125		spin_unlock(&info->lock);
1126		error = PTR_ERR(entry);
1127		goto failed;
1128	}
1129	swap = *entry;
1130
1131	if (swap.val) {
1132		/* Look it up and read it in.. */
1133		swappage = lookup_swap_cache(swap);
1134		if (!swappage) {
1135			shmem_swp_unmap(entry);
1136			/* here we actually do the io */
1137			if (type && !(*type & VM_FAULT_MAJOR)) {
1138				__count_vm_event(PGMAJFAULT);
1139				*type |= VM_FAULT_MAJOR;
1140			}
1141			spin_unlock(&info->lock);
1142			swappage = shmem_swapin(info, swap, idx);
1143			if (!swappage) {
1144				spin_lock(&info->lock);
1145				entry = shmem_swp_alloc(info, idx, sgp);
1146				if (IS_ERR(entry))
1147					error = PTR_ERR(entry);
1148				else {
1149					if (entry->val == swap.val)
1150						error = -ENOMEM;
1151					shmem_swp_unmap(entry);
1152				}
1153				spin_unlock(&info->lock);
1154				if (error)
1155					goto failed;
1156				goto repeat;
1157			}
1158			wait_on_page_locked(swappage);
1159			page_cache_release(swappage);
1160			goto repeat;
1161		}
1162
1163		/* We have to do this with page locked to prevent races */
1164		if (TestSetPageLocked(swappage)) {
1165			shmem_swp_unmap(entry);
1166			spin_unlock(&info->lock);
1167			wait_on_page_locked(swappage);
1168			page_cache_release(swappage);
1169			goto repeat;
1170		}
1171		if (PageWriteback(swappage)) {
1172			shmem_swp_unmap(entry);
1173			spin_unlock(&info->lock);
1174			wait_on_page_writeback(swappage);
1175			unlock_page(swappage);
1176			page_cache_release(swappage);
1177			goto repeat;
1178		}
1179		if (!PageUptodate(swappage)) {
1180			shmem_swp_unmap(entry);
1181			spin_unlock(&info->lock);
1182			unlock_page(swappage);
1183			page_cache_release(swappage);
1184			error = -EIO;
1185			goto failed;
1186		}
1187
1188		if (filepage) {
1189			shmem_swp_set(info, entry, 0);
1190			shmem_swp_unmap(entry);
1191			delete_from_swap_cache(swappage);
1192			spin_unlock(&info->lock);
1193			copy_highpage(filepage, swappage);
1194			unlock_page(swappage);
1195			page_cache_release(swappage);
1196			flush_dcache_page(filepage);
1197			SetPageUptodate(filepage);
1198			set_page_dirty(filepage);
1199			swap_free(swap);
1200		} else if (!(error = move_from_swap_cache(
1201				swappage, idx, mapping))) {
1202			info->flags |= SHMEM_PAGEIN;
1203			shmem_swp_set(info, entry, 0);
1204			shmem_swp_unmap(entry);
1205			spin_unlock(&info->lock);
1206			filepage = swappage;
1207			swap_free(swap);
1208		} else {
1209			shmem_swp_unmap(entry);
1210			spin_unlock(&info->lock);
1211			unlock_page(swappage);
1212			page_cache_release(swappage);
1213			if (error == -ENOMEM) {
1214				/* let kswapd refresh zone for GFP_ATOMICs */
1215				congestion_wait(WRITE, HZ/50);
1216			}
1217			goto repeat;
1218		}
1219	} else if (sgp == SGP_READ && !filepage) {
1220		shmem_swp_unmap(entry);
1221		filepage = find_get_page(mapping, idx);
1222		if (filepage &&
1223		    (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1224			spin_unlock(&info->lock);
1225			wait_on_page_locked(filepage);
1226			page_cache_release(filepage);
1227			filepage = NULL;
1228			goto repeat;
1229		}
1230		spin_unlock(&info->lock);
1231	} else {
1232		shmem_swp_unmap(entry);
1233		sbinfo = SHMEM_SB(inode->i_sb);
1234		if (sbinfo->max_blocks) {
1235			spin_lock(&sbinfo->stat_lock);
1236			if (sbinfo->free_blocks == 0 ||
1237			    shmem_acct_block(info->flags)) {
1238				spin_unlock(&sbinfo->stat_lock);
1239				spin_unlock(&info->lock);
1240				error = -ENOSPC;
1241				goto failed;
1242			}
1243			sbinfo->free_blocks--;
1244			inode->i_blocks += BLOCKS_PER_PAGE;
1245			spin_unlock(&sbinfo->stat_lock);
1246		} else if (shmem_acct_block(info->flags)) {
1247			spin_unlock(&info->lock);
1248			error = -ENOSPC;
1249			goto failed;
1250		}
1251
1252		if (!filepage) {
1253			spin_unlock(&info->lock);
1254			filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1255						    info,
1256						    idx);
1257			if (!filepage) {
1258				shmem_unacct_blocks(info->flags, 1);
1259				shmem_free_blocks(inode, 1);
1260				error = -ENOMEM;
1261				goto failed;
1262			}
1263
1264			spin_lock(&info->lock);
1265			entry = shmem_swp_alloc(info, idx, sgp);
1266			if (IS_ERR(entry))
1267				error = PTR_ERR(entry);
1268			else {
1269				swap = *entry;
1270				shmem_swp_unmap(entry);
1271			}
1272			if (error || swap.val || 0 != add_to_page_cache_lru(
1273					filepage, mapping, idx, GFP_ATOMIC)) {
1274				spin_unlock(&info->lock);
1275				page_cache_release(filepage);
1276				shmem_unacct_blocks(info->flags, 1);
1277				shmem_free_blocks(inode, 1);
1278				filepage = NULL;
1279				if (error)
1280					goto failed;
1281				goto repeat;
1282			}
1283			info->flags |= SHMEM_PAGEIN;
1284		}
1285
1286		info->alloced++;
1287		spin_unlock(&info->lock);
1288		clear_highpage(filepage);
1289		flush_dcache_page(filepage);
1290		SetPageUptodate(filepage);
1291	}
1292done:
1293	if (*pagep != filepage) {
1294		*pagep = filepage;
1295		if (sgp != SGP_FAULT)
1296			unlock_page(filepage);
1297
1298	}
1299	return 0;
1300
1301failed:
1302	if (*pagep != filepage) {
1303		unlock_page(filepage);
1304		page_cache_release(filepage);
1305	}
1306	return error;
1307}
1308
1309static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1310{
1311	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1312	int error;
1313	int ret;
1314
1315	if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1316		return VM_FAULT_SIGBUS;
1317
1318	error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret);
1319	if (error)
1320		return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1321
1322	mark_page_accessed(vmf->page);
1323	return ret | VM_FAULT_LOCKED;
1324}
1325
1326#ifdef CONFIG_NUMA
1327static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1328{
1329	struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1330	return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1331}
1332
1333static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1334					  unsigned long addr)
1335{
1336	struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1337	unsigned long idx;
1338
1339	idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1340	return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1341}
1342#endif
1343
1344int shmem_lock(struct file *file, int lock, struct user_struct *user)
1345{
1346	struct inode *inode = file->f_path.dentry->d_inode;
1347	struct shmem_inode_info *info = SHMEM_I(inode);
1348	int retval = -ENOMEM;
1349
1350	spin_lock(&info->lock);
1351	if (lock && !(info->flags & VM_LOCKED)) {
1352		if (!user_shm_lock(inode->i_size, user))
1353			goto out_nomem;
1354		info->flags |= VM_LOCKED;
1355	}
1356	if (!lock && (info->flags & VM_LOCKED) && user) {
1357		user_shm_unlock(inode->i_size, user);
1358		info->flags &= ~VM_LOCKED;
1359	}
1360	retval = 0;
1361out_nomem:
1362	spin_unlock(&info->lock);
1363	return retval;
1364}
1365
1366static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1367{
1368	file_accessed(file);
1369	vma->vm_ops = &shmem_vm_ops;
1370	vma->vm_flags |= VM_CAN_NONLINEAR;
1371	return 0;
1372}
1373
1374static struct inode *
1375shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1376{
1377	struct inode *inode;
1378	struct shmem_inode_info *info;
1379	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1380
1381	if (sbinfo->max_inodes) {
1382		spin_lock(&sbinfo->stat_lock);
1383		if (!sbinfo->free_inodes) {
1384			spin_unlock(&sbinfo->stat_lock);
1385			return NULL;
1386		}
1387		sbinfo->free_inodes--;
1388		spin_unlock(&sbinfo->stat_lock);
1389	}
1390
1391	inode = new_inode(sb);
1392	if (inode) {
1393		inode->i_mode = mode;
1394		inode->i_uid = current->fsuid;
1395		inode->i_gid = current->fsgid;
1396		inode->i_blocks = 0;
1397		inode->i_mapping->a_ops = &shmem_aops;
1398		inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1399		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1400		inode->i_generation = get_seconds();
1401		info = SHMEM_I(inode);
1402		memset(info, 0, (char *)inode - (char *)info);
1403		spin_lock_init(&info->lock);
1404		INIT_LIST_HEAD(&info->swaplist);
1405
1406		switch (mode & S_IFMT) {
1407		default:
1408			inode->i_op = &shmem_special_inode_operations;
1409			init_special_inode(inode, mode, dev);
1410			break;
1411		case S_IFREG:
1412			inode->i_op = &shmem_inode_operations;
1413			inode->i_fop = &shmem_file_operations;
1414			mpol_shared_policy_init(&info->policy, sbinfo->policy,
1415							&sbinfo->policy_nodes);
1416			break;
1417		case S_IFDIR:
1418			inc_nlink(inode);
1419			/* Some things misbehave if size == 0 on a directory */
1420			inode->i_size = 2 * BOGO_DIRENT_SIZE;
1421			inode->i_op = &shmem_dir_inode_operations;
1422			inode->i_fop = &simple_dir_operations;
1423			break;
1424		case S_IFLNK:
1425			/*
1426			 * Must not load anything in the rbtree,
1427			 * mpol_free_shared_policy will not be called.
1428			 */
1429			mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
1430						NULL);
1431			break;
1432		}
1433	} else if (sbinfo->max_inodes) {
1434		spin_lock(&sbinfo->stat_lock);
1435		sbinfo->free_inodes++;
1436		spin_unlock(&sbinfo->stat_lock);
1437	}
1438	return inode;
1439}
1440
1441#ifdef CONFIG_TMPFS
1442static const struct inode_operations shmem_symlink_inode_operations;
1443static const struct inode_operations shmem_symlink_inline_operations;
1444
1445/*
1446 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1447 * but providing them allows a tmpfs file to be used for splice, sendfile, and
1448 * below the loop driver, in the generic fashion that many filesystems support.
1449 */
1450static int shmem_readpage(struct file *file, struct page *page)
1451{
1452	struct inode *inode = page->mapping->host;
1453	int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1454	unlock_page(page);
1455	return error;
1456}
1457
1458static int
1459shmem_write_begin(struct file *file, struct address_space *mapping,
1460			loff_t pos, unsigned len, unsigned flags,
1461			struct page **pagep, void **fsdata)
1462{
1463	struct inode *inode = mapping->host;
1464	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1465	*pagep = NULL;
1466	return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1467}
1468
1469static int
1470shmem_write_end(struct file *file, struct address_space *mapping,
1471			loff_t pos, unsigned len, unsigned copied,
1472			struct page *page, void *fsdata)
1473{
1474	struct inode *inode = mapping->host;
1475
1476	set_page_dirty(page);
1477	page_cache_release(page);
1478
1479	if (pos+copied > inode->i_size)
1480		i_size_write(inode, pos+copied);
1481
1482	return copied;
1483}
1484
1485static ssize_t
1486shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1487{
1488	struct inode	*inode = file->f_path.dentry->d_inode;
1489	loff_t		pos;
1490	unsigned long	written;
1491	ssize_t		err;
1492
1493	if ((ssize_t) count < 0)
1494		return -EINVAL;
1495
1496	if (!access_ok(VERIFY_READ, buf, count))
1497		return -EFAULT;
1498
1499	mutex_lock(&inode->i_mutex);
1500
1501	pos = *ppos;
1502	written = 0;
1503
1504	err = generic_write_checks(file, &pos, &count, 0);
1505	if (err || !count)
1506		goto out;
1507
1508	err = remove_suid(file->f_path.dentry);
1509	if (err)
1510		goto out;
1511
1512	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1513
1514	do {
1515		struct page *page = NULL;
1516		unsigned long bytes, index, offset;
1517		char *kaddr;
1518		int left;
1519
1520		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1521		index = pos >> PAGE_CACHE_SHIFT;
1522		bytes = PAGE_CACHE_SIZE - offset;
1523		if (bytes > count)
1524			bytes = count;
1525
1526		/*
1527		 * We don't hold page lock across copy from user -
1528		 * what would it guard against? - so no deadlock here.
1529		 * But it still may be a good idea to prefault below.
1530		 */
1531
1532		err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1533		if (err)
1534			break;
1535
1536		left = bytes;
1537		if (PageHighMem(page)) {
1538			volatile unsigned char dummy;
1539			__get_user(dummy, buf);
1540			__get_user(dummy, buf + bytes - 1);
1541
1542			kaddr = kmap_atomic(page, KM_USER0);
1543			left = __copy_from_user_inatomic(kaddr + offset,
1544							buf, bytes);
1545			kunmap_atomic(kaddr, KM_USER0);
1546		}
1547		if (left) {
1548			kaddr = kmap(page);
1549			left = __copy_from_user(kaddr + offset, buf, bytes);
1550			kunmap(page);
1551		}
1552
1553		written += bytes;
1554		count -= bytes;
1555		pos += bytes;
1556		buf += bytes;
1557		if (pos > inode->i_size)
1558			i_size_write(inode, pos);
1559
1560		flush_dcache_page(page);
1561		set_page_dirty(page);
1562		mark_page_accessed(page);
1563		page_cache_release(page);
1564
1565		if (left) {
1566			pos -= left;
1567			written -= left;
1568			err = -EFAULT;
1569			break;
1570		}
1571
1572		/*
1573		 * Our dirty pages are not counted in nr_dirty,
1574		 * and we do not attempt to balance dirty pages.
1575		 */
1576
1577		cond_resched();
1578	} while (count);
1579
1580	*ppos = pos;
1581	if (written)
1582		err = written;
1583out:
1584	mutex_unlock(&inode->i_mutex);
1585	return err;
1586}
1587
1588static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1589{
1590	struct inode *inode = filp->f_path.dentry->d_inode;
1591	struct address_space *mapping = inode->i_mapping;
1592	unsigned long index, offset;
1593
1594	index = *ppos >> PAGE_CACHE_SHIFT;
1595	offset = *ppos & ~PAGE_CACHE_MASK;
1596
1597	for (;;) {
1598		struct page *page = NULL;
1599		unsigned long end_index, nr, ret;
1600		loff_t i_size = i_size_read(inode);
1601
1602		end_index = i_size >> PAGE_CACHE_SHIFT;
1603		if (index > end_index)
1604			break;
1605		if (index == end_index) {
1606			nr = i_size & ~PAGE_CACHE_MASK;
1607			if (nr <= offset)
1608				break;
1609		}
1610
1611		desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1612		if (desc->error) {
1613			if (desc->error == -EINVAL)
1614				desc->error = 0;
1615			break;
1616		}
1617
1618		/*
1619		 * We must evaluate after, since reads (unlike writes)
1620		 * are called without i_mutex protection against truncate
1621		 */
1622		nr = PAGE_CACHE_SIZE;
1623		i_size = i_size_read(inode);
1624		end_index = i_size >> PAGE_CACHE_SHIFT;
1625		if (index == end_index) {
1626			nr = i_size & ~PAGE_CACHE_MASK;
1627			if (nr <= offset) {
1628				if (page)
1629					page_cache_release(page);
1630				break;
1631			}
1632		}
1633		nr -= offset;
1634
1635		if (page) {
1636			/*
1637			 * If users can be writing to this page using arbitrary
1638			 * virtual addresses, take care about potential aliasing
1639			 * before reading the page on the kernel side.
1640			 */
1641			if (mapping_writably_mapped(mapping))
1642				flush_dcache_page(page);
1643			/*
1644			 * Mark the page accessed if we read the beginning.
1645			 */
1646			if (!offset)
1647				mark_page_accessed(page);
1648		} else {
1649			page = ZERO_PAGE(0);
1650			page_cache_get(page);
1651		}
1652
1653		/*
1654		 * Ok, we have the page, and it's up-to-date, so
1655		 * now we can copy it to user space...
1656		 *
1657		 * The actor routine returns how many bytes were actually used..
1658		 * NOTE! This may not be the same as how much of a user buffer
1659		 * we filled up (we may be padding etc), so we can only update
1660		 * "pos" here (the actor routine has to update the user buffer
1661		 * pointers and the remaining count).
1662		 */
1663		ret = actor(desc, page, offset, nr);
1664		offset += ret;
1665		index += offset >> PAGE_CACHE_SHIFT;
1666		offset &= ~PAGE_CACHE_MASK;
1667
1668		page_cache_release(page);
1669		if (ret != nr || !desc->count)
1670			break;
1671
1672		cond_resched();
1673	}
1674
1675	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1676	file_accessed(filp);
1677}
1678
1679static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1680{
1681	read_descriptor_t desc;
1682
1683	if ((ssize_t) count < 0)
1684		return -EINVAL;
1685	if (!access_ok(VERIFY_WRITE, buf, count))
1686		return -EFAULT;
1687	if (!count)
1688		return 0;
1689
1690	desc.written = 0;
1691	desc.count = count;
1692	desc.arg.buf = buf;
1693	desc.error = 0;
1694
1695	do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1696	if (desc.written)
1697		return desc.written;
1698	return desc.error;
1699}
1700
1701static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1702{
1703	struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1704
1705	buf->f_type = TMPFS_MAGIC;
1706	buf->f_bsize = PAGE_CACHE_SIZE;
1707	buf->f_namelen = NAME_MAX;
1708	spin_lock(&sbinfo->stat_lock);
1709	if (sbinfo->max_blocks) {
1710		buf->f_blocks = sbinfo->max_blocks;
1711		buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1712	}
1713	if (sbinfo->max_inodes) {
1714		buf->f_files = sbinfo->max_inodes;
1715		buf->f_ffree = sbinfo->free_inodes;
1716	}
1717	/* else leave those fields 0 like simple_statfs */
1718	spin_unlock(&sbinfo->stat_lock);
1719	return 0;
1720}
1721
1722/*
1723 * File creation. Allocate an inode, and we're done..
1724 */
1725static int
1726shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1727{
1728	struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1729	int error = -ENOSPC;
1730
1731	if (inode) {
1732		error = security_inode_init_security(inode, dir, NULL, NULL,
1733						     NULL);
1734		if (error) {
1735			if (error != -EOPNOTSUPP) {
1736				iput(inode);
1737				return error;
1738			}
1739		}
1740		error = shmem_acl_init(inode, dir);
1741		if (error) {
1742			iput(inode);
1743			return error;
1744		}
1745		if (dir->i_mode & S_ISGID) {
1746			inode->i_gid = dir->i_gid;
1747			if (S_ISDIR(mode))
1748				inode->i_mode |= S_ISGID;
1749		}
1750		dir->i_size += BOGO_DIRENT_SIZE;
1751		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1752		d_instantiate(dentry, inode);
1753		dget(dentry); /* Extra count - pin the dentry in core */
1754	}
1755	return error;
1756}
1757
1758static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1759{
1760	int error;
1761
1762	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1763		return error;
1764	inc_nlink(dir);
1765	return 0;
1766}
1767
1768static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1769		struct nameidata *nd)
1770{
1771	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1772}
1773
1774/*
1775 * Link a file..
1776 */
1777static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1778{
1779	struct inode *inode = old_dentry->d_inode;
1780	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1781
1782	/*
1783	 * No ordinary (disk based) filesystem counts links as inodes;
1784	 * but each new link needs a new dentry, pinning lowmem, and
1785	 * tmpfs dentries cannot be pruned until they are unlinked.
1786	 */
1787	if (sbinfo->max_inodes) {
1788		spin_lock(&sbinfo->stat_lock);
1789		if (!sbinfo->free_inodes) {
1790			spin_unlock(&sbinfo->stat_lock);
1791			return -ENOSPC;
1792		}
1793		sbinfo->free_inodes--;
1794		spin_unlock(&sbinfo->stat_lock);
1795	}
1796
1797	dir->i_size += BOGO_DIRENT_SIZE;
1798	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1799	inc_nlink(inode);
1800	atomic_inc(&inode->i_count);	/* New dentry reference */
1801	dget(dentry);		/* Extra pinning count for the created dentry */
1802	d_instantiate(dentry, inode);
1803	return 0;
1804}
1805
1806static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1807{
1808	struct inode *inode = dentry->d_inode;
1809
1810	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1811		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1812		if (sbinfo->max_inodes) {
1813			spin_lock(&sbinfo->stat_lock);
1814			sbinfo->free_inodes++;
1815			spin_unlock(&sbinfo->stat_lock);
1816		}
1817	}
1818
1819	dir->i_size -= BOGO_DIRENT_SIZE;
1820	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1821	drop_nlink(inode);
1822	dput(dentry);	/* Undo the count from "create" - this does all the work */
1823	return 0;
1824}
1825
1826static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1827{
1828	if (!simple_empty(dentry))
1829		return -ENOTEMPTY;
1830
1831	drop_nlink(dentry->d_inode);
1832	drop_nlink(dir);
1833	return shmem_unlink(dir, dentry);
1834}
1835
1836/*
1837 * The VFS layer already does all the dentry stuff for rename,
1838 * we just have to decrement the usage count for the target if
1839 * it exists so that the VFS layer correctly free's it when it
1840 * gets overwritten.
1841 */
1842static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1843{
1844	struct inode *inode = old_dentry->d_inode;
1845	int they_are_dirs = S_ISDIR(inode->i_mode);
1846
1847	if (!simple_empty(new_dentry))
1848		return -ENOTEMPTY;
1849
1850	if (new_dentry->d_inode) {
1851		(void) shmem_unlink(new_dir, new_dentry);
1852		if (they_are_dirs)
1853			drop_nlink(old_dir);
1854	} else if (they_are_dirs) {
1855		drop_nlink(old_dir);
1856		inc_nlink(new_dir);
1857	}
1858
1859	old_dir->i_size -= BOGO_DIRENT_SIZE;
1860	new_dir->i_size += BOGO_DIRENT_SIZE;
1861	old_dir->i_ctime = old_dir->i_mtime =
1862	new_dir->i_ctime = new_dir->i_mtime =
1863	inode->i_ctime = CURRENT_TIME;
1864	return 0;
1865}
1866
1867static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1868{
1869	int error;
1870	int len;
1871	struct inode *inode;
1872	struct page *page = NULL;
1873	char *kaddr;
1874	struct shmem_inode_info *info;
1875
1876	len = strlen(symname) + 1;
1877	if (len > PAGE_CACHE_SIZE)
1878		return -ENAMETOOLONG;
1879
1880	inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1881	if (!inode)
1882		return -ENOSPC;
1883
1884	error = security_inode_init_security(inode, dir, NULL, NULL,
1885					     NULL);
1886	if (error) {
1887		if (error != -EOPNOTSUPP) {
1888			iput(inode);
1889			return error;
1890		}
1891		error = 0;
1892	}
1893
1894	info = SHMEM_I(inode);
1895	inode->i_size = len-1;
1896	if (len <= (char *)inode - (char *)info) {
1897		/* do it inline */
1898		memcpy(info, symname, len);
1899		inode->i_op = &shmem_symlink_inline_operations;
1900	} else {
1901		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1902		if (error) {
1903			iput(inode);
1904			return error;
1905		}
1906		inode->i_op = &shmem_symlink_inode_operations;
1907		kaddr = kmap_atomic(page, KM_USER0);
1908		memcpy(kaddr, symname, len);
1909		kunmap_atomic(kaddr, KM_USER0);
1910		set_page_dirty(page);
1911		page_cache_release(page);
1912	}
1913	if (dir->i_mode & S_ISGID)
1914		inode->i_gid = dir->i_gid;
1915	dir->i_size += BOGO_DIRENT_SIZE;
1916	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1917	d_instantiate(dentry, inode);
1918	dget(dentry);
1919	return 0;
1920}
1921
1922static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1923{
1924	nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1925	return NULL;
1926}
1927
1928static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1929{
1930	struct page *page = NULL;
1931	int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1932	nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1933	return page;
1934}
1935
1936static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1937{
1938	if (!IS_ERR(nd_get_link(nd))) {
1939		struct page *page = cookie;
1940		kunmap(page);
1941		mark_page_accessed(page);
1942		page_cache_release(page);
1943	}
1944}
1945
1946static const struct inode_operations shmem_symlink_inline_operations = {
1947	.readlink	= generic_readlink,
1948	.follow_link	= shmem_follow_link_inline,
1949};
1950
1951static const struct inode_operations shmem_symlink_inode_operations = {
1952	.truncate	= shmem_truncate,
1953	.readlink	= generic_readlink,
1954	.follow_link	= shmem_follow_link,
1955	.put_link	= shmem_put_link,
1956};
1957
1958#ifdef CONFIG_TMPFS_POSIX_ACL
1959/**
1960 * Superblocks without xattr inode operations will get security.* xattr
1961 * support from the VFS "for free". As soon as we have any other xattrs
1962 * like ACLs, we also need to implement the security.* handlers at
1963 * filesystem level, though.
1964 */
1965
1966static size_t shmem_xattr_security_list(struct inode *inode, char *list,
1967					size_t list_len, const char *name,
1968					size_t name_len)
1969{
1970	return security_inode_listsecurity(inode, list, list_len);
1971}
1972
1973static int shmem_xattr_security_get(struct inode *inode, const char *name,
1974				    void *buffer, size_t size)
1975{
1976	if (strcmp(name, "") == 0)
1977		return -EINVAL;
1978	return security_inode_getsecurity(inode, name, buffer, size,
1979					  -EOPNOTSUPP);
1980}
1981
1982static int shmem_xattr_security_set(struct inode *inode, const char *name,
1983				    const void *value, size_t size, int flags)
1984{
1985	if (strcmp(name, "") == 0)
1986		return -EINVAL;
1987	return security_inode_setsecurity(inode, name, value, size, flags);
1988}
1989
1990static struct xattr_handler shmem_xattr_security_handler = {
1991	.prefix = XATTR_SECURITY_PREFIX,
1992	.list   = shmem_xattr_security_list,
1993	.get    = shmem_xattr_security_get,
1994	.set    = shmem_xattr_security_set,
1995};
1996
1997static struct xattr_handler *shmem_xattr_handlers[] = {
1998	&shmem_xattr_acl_access_handler,
1999	&shmem_xattr_acl_default_handler,
2000	&shmem_xattr_security_handler,
2001	NULL
2002};
2003#endif
2004
2005static struct dentry *shmem_get_parent(struct dentry *child)
2006{
2007	return ERR_PTR(-ESTALE);
2008}
2009
2010static int shmem_match(struct inode *ino, void *vfh)
2011{
2012	__u32 *fh = vfh;
2013	__u64 inum = fh[2];
2014	inum = (inum << 32) | fh[1];
2015	return ino->i_ino == inum && fh[0] == ino->i_generation;
2016}
2017
2018static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2019		struct fid *fid, int fh_len, int fh_type)
2020{
2021	struct inode *inode;
2022	struct dentry *dentry = NULL;
2023	u64 inum = fid->raw[2];
2024	inum = (inum << 32) | fid->raw[1];
2025
2026	if (fh_len < 3)
2027		return NULL;
2028
2029	inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2030			shmem_match, fid->raw);
2031	if (inode) {
2032		dentry = d_find_alias(inode);
2033		iput(inode);
2034	}
2035
2036	return dentry;
2037}
2038
2039static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2040				int connectable)
2041{
2042	struct inode *inode = dentry->d_inode;
2043
2044	if (*len < 3)
2045		return 255;
2046
2047	if (hlist_unhashed(&inode->i_hash)) {
2048		/* Unfortunately insert_inode_hash is not idempotent,
2049		 * so as we hash inodes here rather than at creation
2050		 * time, we need a lock to ensure we only try
2051		 * to do it once
2052		 */
2053		static DEFINE_SPINLOCK(lock);
2054		spin_lock(&lock);
2055		if (hlist_unhashed(&inode->i_hash))
2056			__insert_inode_hash(inode,
2057					    inode->i_ino + inode->i_generation);
2058		spin_unlock(&lock);
2059	}
2060
2061	fh[0] = inode->i_generation;
2062	fh[1] = inode->i_ino;
2063	fh[2] = ((__u64)inode->i_ino) >> 32;
2064
2065	*len = 3;
2066	return 1;
2067}
2068
2069static const struct export_operations shmem_export_ops = {
2070	.get_parent     = shmem_get_parent,
2071	.encode_fh      = shmem_encode_fh,
2072	.fh_to_dentry	= shmem_fh_to_dentry,
2073};
2074
2075static int shmem_parse_options(char *options, int *mode, uid_t *uid,
2076	gid_t *gid, unsigned long *blocks, unsigned long *inodes,
2077	int *policy, nodemask_t *policy_nodes)
2078{
2079	char *this_char, *value, *rest;
2080
2081	while (options != NULL) {
2082		this_char = options;
2083		for (;;) {
2084			/*
2085			 * NUL-terminate this option: unfortunately,
2086			 * mount options form a comma-separated list,
2087			 * but mpol's nodelist may also contain commas.
2088			 */
2089			options = strchr(options, ',');
2090			if (options == NULL)
2091				break;
2092			options++;
2093			if (!isdigit(*options)) {
2094				options[-1] = '\0';
2095				break;
2096			}
2097		}
2098		if (!*this_char)
2099			continue;
2100		if ((value = strchr(this_char,'=')) != NULL) {
2101			*value++ = 0;
2102		} else {
2103			printk(KERN_ERR
2104			    "tmpfs: No value for mount option '%s'\n",
2105			    this_char);
2106			return 1;
2107		}
2108
2109		if (!strcmp(this_char,"size")) {
2110			unsigned long long size;
2111			size = memparse(value,&rest);
2112			if (*rest == '%') {
2113				size <<= PAGE_SHIFT;
2114				size *= totalram_pages;
2115				do_div(size, 100);
2116				rest++;
2117			}
2118			if (*rest)
2119				goto bad_val;
2120			*blocks = size >> PAGE_CACHE_SHIFT;
2121		} else if (!strcmp(this_char,"nr_blocks")) {
2122			*blocks = memparse(value,&rest);
2123			if (*rest)
2124				goto bad_val;
2125		} else if (!strcmp(this_char,"nr_inodes")) {
2126			*inodes = memparse(value,&rest);
2127			if (*rest)
2128				goto bad_val;
2129		} else if (!strcmp(this_char,"mode")) {
2130			if (!mode)
2131				continue;
2132			*mode = simple_strtoul(value,&rest,8);
2133			if (*rest)
2134				goto bad_val;
2135		} else if (!strcmp(this_char,"uid")) {
2136			if (!uid)
2137				continue;
2138			*uid = simple_strtoul(value,&rest,0);
2139			if (*rest)
2140				goto bad_val;
2141		} else if (!strcmp(this_char,"gid")) {
2142			if (!gid)
2143				continue;
2144			*gid = simple_strtoul(value,&rest,0);
2145			if (*rest)
2146				goto bad_val;
2147		} else if (!strcmp(this_char,"mpol")) {
2148			if (shmem_parse_mpol(value,policy,policy_nodes))
2149				goto bad_val;
2150		} else {
2151			printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2152			       this_char);
2153			return 1;
2154		}
2155	}
2156	return 0;
2157
2158bad_val:
2159	printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2160	       value, this_char);
2161	return 1;
2162
2163}
2164
2165static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2166{
2167	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2168	unsigned long max_blocks = sbinfo->max_blocks;
2169	unsigned long max_inodes = sbinfo->max_inodes;
2170	int policy = sbinfo->policy;
2171	nodemask_t policy_nodes = sbinfo->policy_nodes;
2172	unsigned long blocks;
2173	unsigned long inodes;
2174	int error = -EINVAL;
2175
2176	if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks,
2177				&max_inodes, &policy, &policy_nodes))
2178		return error;
2179
2180	spin_lock(&sbinfo->stat_lock);
2181	blocks = sbinfo->max_blocks - sbinfo->free_blocks;
2182	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
2183	if (max_blocks < blocks)
2184		goto out;
2185	if (max_inodes < inodes)
2186		goto out;
2187	/*
2188	 * Those tests also disallow limited->unlimited while any are in
2189	 * use, so i_blocks will always be zero when max_blocks is zero;
2190	 * but we must separately disallow unlimited->limited, because
2191	 * in that case we have no record of how much is already in use.
2192	 */
2193	if (max_blocks && !sbinfo->max_blocks)
2194		goto out;
2195	if (max_inodes && !sbinfo->max_inodes)
2196		goto out;
2197
2198	error = 0;
2199	sbinfo->max_blocks  = max_blocks;
2200	sbinfo->free_blocks = max_blocks - blocks;
2201	sbinfo->max_inodes  = max_inodes;
2202	sbinfo->free_inodes = max_inodes - inodes;
2203	sbinfo->policy = policy;
2204	sbinfo->policy_nodes = policy_nodes;
2205out:
2206	spin_unlock(&sbinfo->stat_lock);
2207	return error;
2208}
2209#endif
2210
2211static void shmem_put_super(struct super_block *sb)
2212{
2213	kfree(sb->s_fs_info);
2214	sb->s_fs_info = NULL;
2215}
2216
2217static int shmem_fill_super(struct super_block *sb,
2218			    void *data, int silent)
2219{
2220	struct inode *inode;
2221	struct dentry *root;
2222	int mode   = S_IRWXUGO | S_ISVTX;
2223	uid_t uid = current->fsuid;
2224	gid_t gid = current->fsgid;
2225	int err = -ENOMEM;
2226	struct shmem_sb_info *sbinfo;
2227	unsigned long blocks = 0;
2228	unsigned long inodes = 0;
2229	int policy = MPOL_DEFAULT;
2230	nodemask_t policy_nodes = node_states[N_HIGH_MEMORY];
2231
2232#ifdef CONFIG_TMPFS
2233	/*
2234	 * Per default we only allow half of the physical ram per
2235	 * tmpfs instance, limiting inodes to one per page of lowmem;
2236	 * but the internal instance is left unlimited.
2237	 */
2238	if (!(sb->s_flags & MS_NOUSER)) {
2239		blocks = totalram_pages / 2;
2240		inodes = totalram_pages - totalhigh_pages;
2241		if (inodes > blocks)
2242			inodes = blocks;
2243		if (shmem_parse_options(data, &mode, &uid, &gid, &blocks,
2244					&inodes, &policy, &policy_nodes))
2245			return -EINVAL;
2246	}
2247	sb->s_export_op = &shmem_export_ops;
2248#else
2249	sb->s_flags |= MS_NOUSER;
2250#endif
2251
2252	/* Round up to L1_CACHE_BYTES to resist false sharing */
2253	sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
2254				L1_CACHE_BYTES), GFP_KERNEL);
2255	if (!sbinfo)
2256		return -ENOMEM;
2257
2258	spin_lock_init(&sbinfo->stat_lock);
2259	sbinfo->max_blocks = blocks;
2260	sbinfo->free_blocks = blocks;
2261	sbinfo->max_inodes = inodes;
2262	sbinfo->free_inodes = inodes;
2263	sbinfo->policy = policy;
2264	sbinfo->policy_nodes = policy_nodes;
2265
2266	sb->s_fs_info = sbinfo;
2267	sb->s_maxbytes = SHMEM_MAX_BYTES;
2268	sb->s_blocksize = PAGE_CACHE_SIZE;
2269	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2270	sb->s_magic = TMPFS_MAGIC;
2271	sb->s_op = &shmem_ops;
2272	sb->s_time_gran = 1;
2273#ifdef CONFIG_TMPFS_POSIX_ACL
2274	sb->s_xattr = shmem_xattr_handlers;
2275	sb->s_flags |= MS_POSIXACL;
2276#endif
2277
2278	inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2279	if (!inode)
2280		goto failed;
2281	inode->i_uid = uid;
2282	inode->i_gid = gid;
2283	root = d_alloc_root(inode);
2284	if (!root)
2285		goto failed_iput;
2286	sb->s_root = root;
2287	return 0;
2288
2289failed_iput:
2290	iput(inode);
2291failed:
2292	shmem_put_super(sb);
2293	return err;
2294}
2295
2296static struct kmem_cache *shmem_inode_cachep;
2297
2298static struct inode *shmem_alloc_inode(struct super_block *sb)
2299{
2300	struct shmem_inode_info *p;
2301	p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2302	if (!p)
2303		return NULL;
2304	return &p->vfs_inode;
2305}
2306
2307static void shmem_destroy_inode(struct inode *inode)
2308{
2309	if ((inode->i_mode & S_IFMT) == S_IFREG) {
2310		/* only struct inode is valid if it's an inline symlink */
2311		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2312	}
2313	shmem_acl_destroy_inode(inode);
2314	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2315}
2316
2317static void init_once(struct kmem_cache *cachep, void *foo)
2318{
2319	struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2320
2321	inode_init_once(&p->vfs_inode);
2322#ifdef CONFIG_TMPFS_POSIX_ACL
2323	p->i_acl = NULL;
2324	p->i_default_acl = NULL;
2325#endif
2326}
2327
2328static int init_inodecache(void)
2329{
2330	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2331				sizeof(struct shmem_inode_info),
2332				0, SLAB_PANIC, init_once);
2333	return 0;
2334}
2335
2336static void destroy_inodecache(void)
2337{
2338	kmem_cache_destroy(shmem_inode_cachep);
2339}
2340
2341static const struct address_space_operations shmem_aops = {
2342	.writepage	= shmem_writepage,
2343	.set_page_dirty	= __set_page_dirty_no_writeback,
2344#ifdef CONFIG_TMPFS
2345	.readpage	= shmem_readpage,
2346	.write_begin	= shmem_write_begin,
2347	.write_end	= shmem_write_end,
2348#endif
2349	.migratepage	= migrate_page,
2350};
2351
2352static const struct file_operations shmem_file_operations = {
2353	.mmap		= shmem_mmap,
2354#ifdef CONFIG_TMPFS
2355	.llseek		= generic_file_llseek,
2356	.read		= shmem_file_read,
2357	.write		= shmem_file_write,
2358	.fsync		= simple_sync_file,
2359	.splice_read	= generic_file_splice_read,
2360	.splice_write	= generic_file_splice_write,
2361#endif
2362};
2363
2364static const struct inode_operations shmem_inode_operations = {
2365	.truncate	= shmem_truncate,
2366	.setattr	= shmem_notify_change,
2367	.truncate_range	= shmem_truncate_range,
2368#ifdef CONFIG_TMPFS_POSIX_ACL
2369	.setxattr	= generic_setxattr,
2370	.getxattr	= generic_getxattr,
2371	.listxattr	= generic_listxattr,
2372	.removexattr	= generic_removexattr,
2373	.permission	= shmem_permission,
2374#endif
2375
2376};
2377
2378static const struct inode_operations shmem_dir_inode_operations = {
2379#ifdef CONFIG_TMPFS
2380	.create		= shmem_create,
2381	.lookup		= simple_lookup,
2382	.link		= shmem_link,
2383	.unlink		= shmem_unlink,
2384	.symlink	= shmem_symlink,
2385	.mkdir		= shmem_mkdir,
2386	.rmdir		= shmem_rmdir,
2387	.mknod		= shmem_mknod,
2388	.rename		= shmem_rename,
2389#endif
2390#ifdef CONFIG_TMPFS_POSIX_ACL
2391	.setattr	= shmem_notify_change,
2392	.setxattr	= generic_setxattr,
2393	.getxattr	= generic_getxattr,
2394	.listxattr	= generic_listxattr,
2395	.removexattr	= generic_removexattr,
2396	.permission	= shmem_permission,
2397#endif
2398};
2399
2400static const struct inode_operations shmem_special_inode_operations = {
2401#ifdef CONFIG_TMPFS_POSIX_ACL
2402	.setattr	= shmem_notify_change,
2403	.setxattr	= generic_setxattr,
2404	.getxattr	= generic_getxattr,
2405	.listxattr	= generic_listxattr,
2406	.removexattr	= generic_removexattr,
2407	.permission	= shmem_permission,
2408#endif
2409};
2410
2411static const struct super_operations shmem_ops = {
2412	.alloc_inode	= shmem_alloc_inode,
2413	.destroy_inode	= shmem_destroy_inode,
2414#ifdef CONFIG_TMPFS
2415	.statfs		= shmem_statfs,
2416	.remount_fs	= shmem_remount_fs,
2417#endif
2418	.delete_inode	= shmem_delete_inode,
2419	.drop_inode	= generic_delete_inode,
2420	.put_super	= shmem_put_super,
2421};
2422
2423static struct vm_operations_struct shmem_vm_ops = {
2424	.fault		= shmem_fault,
2425#ifdef CONFIG_NUMA
2426	.set_policy     = shmem_set_policy,
2427	.get_policy     = shmem_get_policy,
2428#endif
2429};
2430
2431
2432static int shmem_get_sb(struct file_system_type *fs_type,
2433	int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2434{
2435	return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt);
2436}
2437
2438static struct file_system_type tmpfs_fs_type = {
2439	.owner		= THIS_MODULE,
2440	.name		= "tmpfs",
2441	.get_sb		= shmem_get_sb,
2442	.kill_sb	= kill_litter_super,
2443};
2444static struct vfsmount *shm_mnt;
2445
2446static int __init init_tmpfs(void)
2447{
2448	int error;
2449
2450	error = bdi_init(&shmem_backing_dev_info);
2451	if (error)
2452		goto out4;
2453
2454	error = init_inodecache();
2455	if (error)
2456		goto out3;
2457
2458	error = register_filesystem(&tmpfs_fs_type);
2459	if (error) {
2460		printk(KERN_ERR "Could not register tmpfs\n");
2461		goto out2;
2462	}
2463
2464	shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
2465				tmpfs_fs_type.name, NULL);
2466	if (IS_ERR(shm_mnt)) {
2467		error = PTR_ERR(shm_mnt);
2468		printk(KERN_ERR "Could not kern_mount tmpfs\n");
2469		goto out1;
2470	}
2471	return 0;
2472
2473out1:
2474	unregister_filesystem(&tmpfs_fs_type);
2475out2:
2476	destroy_inodecache();
2477out3:
2478	bdi_destroy(&shmem_backing_dev_info);
2479out4:
2480	shm_mnt = ERR_PTR(error);
2481	return error;
2482}
2483module_init(init_tmpfs)
2484
2485/*
2486 * shmem_file_setup - get an unlinked file living in tmpfs
2487 *
2488 * @name: name for dentry (to be seen in /proc/<pid>/maps
2489 * @size: size to be set for the file
2490 *
2491 */
2492struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2493{
2494	int error;
2495	struct file *file;
2496	struct inode *inode;
2497	struct dentry *dentry, *root;
2498	struct qstr this;
2499
2500	if (IS_ERR(shm_mnt))
2501		return (void *)shm_mnt;
2502
2503	if (size < 0 || size > SHMEM_MAX_BYTES)
2504		return ERR_PTR(-EINVAL);
2505
2506	if (shmem_acct_size(flags, size))
2507		return ERR_PTR(-ENOMEM);
2508
2509	error = -ENOMEM;
2510	this.name = name;
2511	this.len = strlen(name);
2512	this.hash = 0; /* will go */
2513	root = shm_mnt->mnt_root;
2514	dentry = d_alloc(root, &this);
2515	if (!dentry)
2516		goto put_memory;
2517
2518	error = -ENFILE;
2519	file = get_empty_filp();
2520	if (!file)
2521		goto put_dentry;
2522
2523	error = -ENOSPC;
2524	inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2525	if (!inode)
2526		goto close_file;
2527
2528	SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2529	d_instantiate(dentry, inode);
2530	inode->i_size = size;
2531	inode->i_nlink = 0;	/* It is unlinked */
2532	init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ,
2533			&shmem_file_operations);
2534	return file;
2535
2536close_file:
2537	put_filp(file);
2538put_dentry:
2539	dput(dentry);
2540put_memory:
2541	shmem_unacct_size(flags, size);
2542	return ERR_PTR(error);
2543}
2544
2545/*
2546 * shmem_zero_setup - setup a shared anonymous mapping
2547 *
2548 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2549 */
2550int shmem_zero_setup(struct vm_area_struct *vma)
2551{
2552	struct file *file;
2553	loff_t size = vma->vm_end - vma->vm_start;
2554
2555	file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2556	if (IS_ERR(file))
2557		return PTR_ERR(file);
2558
2559	if (vma->vm_file)
2560		fput(vma->vm_file);
2561	vma->vm_file = file;
2562	vma->vm_ops = &shmem_vm_ops;
2563	return 0;
2564}
2565