shmem.c revision 1b1dcc1b57a49136f118a0f16367256ff9994a69
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 *		 2000 Transmeta Corp.
6 *		 2000-2001 Christoph Rohland
7 *		 2000-2001 SAP AG
8 *		 2002 Red Hat Inc.
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
12 *
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16 *
17 * This file is released under the GPL.
18 */
19
20/*
21 * This virtual memory filesystem is heavily based on the ramfs. It
22 * extends ramfs by the ability to use swap and honor resource limits
23 * which makes it a completely usable filesystem.
24 */
25
26#include <linux/config.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/devfs_fs_kernel.h>
30#include <linux/fs.h>
31#include <linux/mm.h>
32#include <linux/mman.h>
33#include <linux/file.h>
34#include <linux/swap.h>
35#include <linux/pagemap.h>
36#include <linux/string.h>
37#include <linux/slab.h>
38#include <linux/backing-dev.h>
39#include <linux/shmem_fs.h>
40#include <linux/mount.h>
41#include <linux/writeback.h>
42#include <linux/vfs.h>
43#include <linux/blkdev.h>
44#include <linux/security.h>
45#include <linux/swapops.h>
46#include <linux/mempolicy.h>
47#include <linux/namei.h>
48#include <asm/uaccess.h>
49#include <asm/div64.h>
50#include <asm/pgtable.h>
51
52/* This magic number is used in glibc for posix shared memory */
53#define TMPFS_MAGIC	0x01021994
54
55#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
56#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
57#define BLOCKS_PER_PAGE  (PAGE_CACHE_SIZE/512)
58
59#define SHMEM_MAX_INDEX  (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
60#define SHMEM_MAX_BYTES  ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
61
62#define VM_ACCT(size)    (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
63
64/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
65#define SHMEM_PAGEIN	 VM_READ
66#define SHMEM_TRUNCATE	 VM_WRITE
67
68/* Definition to limit shmem_truncate's steps between cond_rescheds */
69#define LATENCY_LIMIT	 64
70
71/* Pretend that each entry is of this size in directory's i_size */
72#define BOGO_DIRENT_SIZE 20
73
74/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
75enum sgp_type {
76	SGP_QUICK,	/* don't try more than file page cache lookup */
77	SGP_READ,	/* don't exceed i_size, don't allocate page */
78	SGP_CACHE,	/* don't exceed i_size, may allocate page */
79	SGP_WRITE,	/* may exceed i_size, may allocate page */
80};
81
82static int shmem_getpage(struct inode *inode, unsigned long idx,
83			 struct page **pagep, enum sgp_type sgp, int *type);
84
85static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
86{
87	/*
88	 * The above definition of ENTRIES_PER_PAGE, and the use of
89	 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
90	 * might be reconsidered if it ever diverges from PAGE_SIZE.
91	 */
92	return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
93}
94
95static inline void shmem_dir_free(struct page *page)
96{
97	__free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
98}
99
100static struct page **shmem_dir_map(struct page *page)
101{
102	return (struct page **)kmap_atomic(page, KM_USER0);
103}
104
105static inline void shmem_dir_unmap(struct page **dir)
106{
107	kunmap_atomic(dir, KM_USER0);
108}
109
110static swp_entry_t *shmem_swp_map(struct page *page)
111{
112	return (swp_entry_t *)kmap_atomic(page, KM_USER1);
113}
114
115static inline void shmem_swp_balance_unmap(void)
116{
117	/*
118	 * When passing a pointer to an i_direct entry, to code which
119	 * also handles indirect entries and so will shmem_swp_unmap,
120	 * we must arrange for the preempt count to remain in balance.
121	 * What kmap_atomic of a lowmem page does depends on config
122	 * and architecture, so pretend to kmap_atomic some lowmem page.
123	 */
124	(void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
125}
126
127static inline void shmem_swp_unmap(swp_entry_t *entry)
128{
129	kunmap_atomic(entry, KM_USER1);
130}
131
132static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
133{
134	return sb->s_fs_info;
135}
136
137/*
138 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
139 * for shared memory and for shared anonymous (/dev/zero) mappings
140 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
141 * consistent with the pre-accounting of private mappings ...
142 */
143static inline int shmem_acct_size(unsigned long flags, loff_t size)
144{
145	return (flags & VM_ACCOUNT)?
146		security_vm_enough_memory(VM_ACCT(size)): 0;
147}
148
149static inline void shmem_unacct_size(unsigned long flags, loff_t size)
150{
151	if (flags & VM_ACCOUNT)
152		vm_unacct_memory(VM_ACCT(size));
153}
154
155/*
156 * ... whereas tmpfs objects are accounted incrementally as
157 * pages are allocated, in order to allow huge sparse files.
158 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
159 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
160 */
161static inline int shmem_acct_block(unsigned long flags)
162{
163	return (flags & VM_ACCOUNT)?
164		0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
165}
166
167static inline void shmem_unacct_blocks(unsigned long flags, long pages)
168{
169	if (!(flags & VM_ACCOUNT))
170		vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
171}
172
173static struct super_operations shmem_ops;
174static struct address_space_operations shmem_aops;
175static struct file_operations shmem_file_operations;
176static struct inode_operations shmem_inode_operations;
177static struct inode_operations shmem_dir_inode_operations;
178static struct vm_operations_struct shmem_vm_ops;
179
180static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
181	.ra_pages	= 0,	/* No readahead */
182	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
183	.unplug_io_fn	= default_unplug_io_fn,
184};
185
186static LIST_HEAD(shmem_swaplist);
187static DEFINE_SPINLOCK(shmem_swaplist_lock);
188
189static void shmem_free_blocks(struct inode *inode, long pages)
190{
191	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
192	if (sbinfo->max_blocks) {
193		spin_lock(&sbinfo->stat_lock);
194		sbinfo->free_blocks += pages;
195		inode->i_blocks -= pages*BLOCKS_PER_PAGE;
196		spin_unlock(&sbinfo->stat_lock);
197	}
198}
199
200/*
201 * shmem_recalc_inode - recalculate the size of an inode
202 *
203 * @inode: inode to recalc
204 *
205 * We have to calculate the free blocks since the mm can drop
206 * undirtied hole pages behind our back.
207 *
208 * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
209 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
210 *
211 * It has to be called with the spinlock held.
212 */
213static void shmem_recalc_inode(struct inode *inode)
214{
215	struct shmem_inode_info *info = SHMEM_I(inode);
216	long freed;
217
218	freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
219	if (freed > 0) {
220		info->alloced -= freed;
221		shmem_unacct_blocks(info->flags, freed);
222		shmem_free_blocks(inode, freed);
223	}
224}
225
226/*
227 * shmem_swp_entry - find the swap vector position in the info structure
228 *
229 * @info:  info structure for the inode
230 * @index: index of the page to find
231 * @page:  optional page to add to the structure. Has to be preset to
232 *         all zeros
233 *
234 * If there is no space allocated yet it will return NULL when
235 * page is NULL, else it will use the page for the needed block,
236 * setting it to NULL on return to indicate that it has been used.
237 *
238 * The swap vector is organized the following way:
239 *
240 * There are SHMEM_NR_DIRECT entries directly stored in the
241 * shmem_inode_info structure. So small files do not need an addional
242 * allocation.
243 *
244 * For pages with index > SHMEM_NR_DIRECT there is the pointer
245 * i_indirect which points to a page which holds in the first half
246 * doubly indirect blocks, in the second half triple indirect blocks:
247 *
248 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
249 * following layout (for SHMEM_NR_DIRECT == 16):
250 *
251 * i_indirect -> dir --> 16-19
252 * 	      |	     +-> 20-23
253 * 	      |
254 * 	      +-->dir2 --> 24-27
255 * 	      |	       +-> 28-31
256 * 	      |	       +-> 32-35
257 * 	      |	       +-> 36-39
258 * 	      |
259 * 	      +-->dir3 --> 40-43
260 * 	       	       +-> 44-47
261 * 	      	       +-> 48-51
262 * 	      	       +-> 52-55
263 */
264static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
265{
266	unsigned long offset;
267	struct page **dir;
268	struct page *subdir;
269
270	if (index < SHMEM_NR_DIRECT) {
271		shmem_swp_balance_unmap();
272		return info->i_direct+index;
273	}
274	if (!info->i_indirect) {
275		if (page) {
276			info->i_indirect = *page;
277			*page = NULL;
278		}
279		return NULL;			/* need another page */
280	}
281
282	index -= SHMEM_NR_DIRECT;
283	offset = index % ENTRIES_PER_PAGE;
284	index /= ENTRIES_PER_PAGE;
285	dir = shmem_dir_map(info->i_indirect);
286
287	if (index >= ENTRIES_PER_PAGE/2) {
288		index -= ENTRIES_PER_PAGE/2;
289		dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
290		index %= ENTRIES_PER_PAGE;
291		subdir = *dir;
292		if (!subdir) {
293			if (page) {
294				*dir = *page;
295				*page = NULL;
296			}
297			shmem_dir_unmap(dir);
298			return NULL;		/* need another page */
299		}
300		shmem_dir_unmap(dir);
301		dir = shmem_dir_map(subdir);
302	}
303
304	dir += index;
305	subdir = *dir;
306	if (!subdir) {
307		if (!page || !(subdir = *page)) {
308			shmem_dir_unmap(dir);
309			return NULL;		/* need a page */
310		}
311		*dir = subdir;
312		*page = NULL;
313	}
314	shmem_dir_unmap(dir);
315	return shmem_swp_map(subdir) + offset;
316}
317
318static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
319{
320	long incdec = value? 1: -1;
321
322	entry->val = value;
323	info->swapped += incdec;
324	if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
325		struct page *page = kmap_atomic_to_page(entry);
326		set_page_private(page, page_private(page) + incdec);
327	}
328}
329
330/*
331 * shmem_swp_alloc - get the position of the swap entry for the page.
332 *                   If it does not exist allocate the entry.
333 *
334 * @info:	info structure for the inode
335 * @index:	index of the page to find
336 * @sgp:	check and recheck i_size? skip allocation?
337 */
338static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
339{
340	struct inode *inode = &info->vfs_inode;
341	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
342	struct page *page = NULL;
343	swp_entry_t *entry;
344
345	if (sgp != SGP_WRITE &&
346	    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
347		return ERR_PTR(-EINVAL);
348
349	while (!(entry = shmem_swp_entry(info, index, &page))) {
350		if (sgp == SGP_READ)
351			return shmem_swp_map(ZERO_PAGE(0));
352		/*
353		 * Test free_blocks against 1 not 0, since we have 1 data
354		 * page (and perhaps indirect index pages) yet to allocate:
355		 * a waste to allocate index if we cannot allocate data.
356		 */
357		if (sbinfo->max_blocks) {
358			spin_lock(&sbinfo->stat_lock);
359			if (sbinfo->free_blocks <= 1) {
360				spin_unlock(&sbinfo->stat_lock);
361				return ERR_PTR(-ENOSPC);
362			}
363			sbinfo->free_blocks--;
364			inode->i_blocks += BLOCKS_PER_PAGE;
365			spin_unlock(&sbinfo->stat_lock);
366		}
367
368		spin_unlock(&info->lock);
369		page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
370		if (page)
371			set_page_private(page, 0);
372		spin_lock(&info->lock);
373
374		if (!page) {
375			shmem_free_blocks(inode, 1);
376			return ERR_PTR(-ENOMEM);
377		}
378		if (sgp != SGP_WRITE &&
379		    ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
380			entry = ERR_PTR(-EINVAL);
381			break;
382		}
383		if (info->next_index <= index)
384			info->next_index = index + 1;
385	}
386	if (page) {
387		/* another task gave its page, or truncated the file */
388		shmem_free_blocks(inode, 1);
389		shmem_dir_free(page);
390	}
391	if (info->next_index <= index && !IS_ERR(entry))
392		info->next_index = index + 1;
393	return entry;
394}
395
396/*
397 * shmem_free_swp - free some swap entries in a directory
398 *
399 * @dir:   pointer to the directory
400 * @edir:  pointer after last entry of the directory
401 */
402static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
403{
404	swp_entry_t *ptr;
405	int freed = 0;
406
407	for (ptr = dir; ptr < edir; ptr++) {
408		if (ptr->val) {
409			free_swap_and_cache(*ptr);
410			*ptr = (swp_entry_t){0};
411			freed++;
412		}
413	}
414	return freed;
415}
416
417static int shmem_map_and_free_swp(struct page *subdir,
418		int offset, int limit, struct page ***dir)
419{
420	swp_entry_t *ptr;
421	int freed = 0;
422
423	ptr = shmem_swp_map(subdir);
424	for (; offset < limit; offset += LATENCY_LIMIT) {
425		int size = limit - offset;
426		if (size > LATENCY_LIMIT)
427			size = LATENCY_LIMIT;
428		freed += shmem_free_swp(ptr+offset, ptr+offset+size);
429		if (need_resched()) {
430			shmem_swp_unmap(ptr);
431			if (*dir) {
432				shmem_dir_unmap(*dir);
433				*dir = NULL;
434			}
435			cond_resched();
436			ptr = shmem_swp_map(subdir);
437		}
438	}
439	shmem_swp_unmap(ptr);
440	return freed;
441}
442
443static void shmem_free_pages(struct list_head *next)
444{
445	struct page *page;
446	int freed = 0;
447
448	do {
449		page = container_of(next, struct page, lru);
450		next = next->next;
451		shmem_dir_free(page);
452		freed++;
453		if (freed >= LATENCY_LIMIT) {
454			cond_resched();
455			freed = 0;
456		}
457	} while (next);
458}
459
460static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
461{
462	struct shmem_inode_info *info = SHMEM_I(inode);
463	unsigned long idx;
464	unsigned long size;
465	unsigned long limit;
466	unsigned long stage;
467	unsigned long diroff;
468	struct page **dir;
469	struct page *topdir;
470	struct page *middir;
471	struct page *subdir;
472	swp_entry_t *ptr;
473	LIST_HEAD(pages_to_free);
474	long nr_pages_to_free = 0;
475	long nr_swaps_freed = 0;
476	int offset;
477	int freed;
478	int punch_hole = 0;
479
480	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
481	idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
482	if (idx >= info->next_index)
483		return;
484
485	spin_lock(&info->lock);
486	info->flags |= SHMEM_TRUNCATE;
487	if (likely(end == (loff_t) -1)) {
488		limit = info->next_index;
489		info->next_index = idx;
490	} else {
491		limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
492		if (limit > info->next_index)
493			limit = info->next_index;
494		punch_hole = 1;
495	}
496
497	topdir = info->i_indirect;
498	if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
499		info->i_indirect = NULL;
500		nr_pages_to_free++;
501		list_add(&topdir->lru, &pages_to_free);
502	}
503	spin_unlock(&info->lock);
504
505	if (info->swapped && idx < SHMEM_NR_DIRECT) {
506		ptr = info->i_direct;
507		size = limit;
508		if (size > SHMEM_NR_DIRECT)
509			size = SHMEM_NR_DIRECT;
510		nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
511	}
512	if (!topdir)
513		goto done2;
514
515	BUG_ON(limit <= SHMEM_NR_DIRECT);
516	limit -= SHMEM_NR_DIRECT;
517	idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
518	offset = idx % ENTRIES_PER_PAGE;
519	idx -= offset;
520
521	dir = shmem_dir_map(topdir);
522	stage = ENTRIES_PER_PAGEPAGE/2;
523	if (idx < ENTRIES_PER_PAGEPAGE/2) {
524		middir = topdir;
525		diroff = idx/ENTRIES_PER_PAGE;
526	} else {
527		dir += ENTRIES_PER_PAGE/2;
528		dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
529		while (stage <= idx)
530			stage += ENTRIES_PER_PAGEPAGE;
531		middir = *dir;
532		if (*dir) {
533			diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
534				ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
535			if (!diroff && !offset) {
536				*dir = NULL;
537				nr_pages_to_free++;
538				list_add(&middir->lru, &pages_to_free);
539			}
540			shmem_dir_unmap(dir);
541			dir = shmem_dir_map(middir);
542		} else {
543			diroff = 0;
544			offset = 0;
545			idx = stage;
546		}
547	}
548
549	for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
550		if (unlikely(idx == stage)) {
551			shmem_dir_unmap(dir);
552			dir = shmem_dir_map(topdir) +
553			    ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
554			while (!*dir) {
555				dir++;
556				idx += ENTRIES_PER_PAGEPAGE;
557				if (idx >= limit)
558					goto done1;
559			}
560			stage = idx + ENTRIES_PER_PAGEPAGE;
561			middir = *dir;
562			*dir = NULL;
563			nr_pages_to_free++;
564			list_add(&middir->lru, &pages_to_free);
565			shmem_dir_unmap(dir);
566			cond_resched();
567			dir = shmem_dir_map(middir);
568			diroff = 0;
569		}
570		subdir = dir[diroff];
571		if (subdir && page_private(subdir)) {
572			size = limit - idx;
573			if (size > ENTRIES_PER_PAGE)
574				size = ENTRIES_PER_PAGE;
575			freed = shmem_map_and_free_swp(subdir,
576						offset, size, &dir);
577			if (!dir)
578				dir = shmem_dir_map(middir);
579			nr_swaps_freed += freed;
580			if (offset)
581				spin_lock(&info->lock);
582			set_page_private(subdir, page_private(subdir) - freed);
583			if (offset)
584				spin_unlock(&info->lock);
585			if (!punch_hole)
586				BUG_ON(page_private(subdir) > offset);
587		}
588		if (offset)
589			offset = 0;
590		else if (subdir && !page_private(subdir)) {
591			dir[diroff] = NULL;
592			nr_pages_to_free++;
593			list_add(&subdir->lru, &pages_to_free);
594		}
595	}
596done1:
597	shmem_dir_unmap(dir);
598done2:
599	if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
600		/*
601		 * Call truncate_inode_pages again: racing shmem_unuse_inode
602		 * may have swizzled a page in from swap since vmtruncate or
603		 * generic_delete_inode did it, before we lowered next_index.
604		 * Also, though shmem_getpage checks i_size before adding to
605		 * cache, no recheck after: so fix the narrow window there too.
606		 */
607		truncate_inode_pages_range(inode->i_mapping, start, end);
608	}
609
610	spin_lock(&info->lock);
611	info->flags &= ~SHMEM_TRUNCATE;
612	info->swapped -= nr_swaps_freed;
613	if (nr_pages_to_free)
614		shmem_free_blocks(inode, nr_pages_to_free);
615	shmem_recalc_inode(inode);
616	spin_unlock(&info->lock);
617
618	/*
619	 * Empty swap vector directory pages to be freed?
620	 */
621	if (!list_empty(&pages_to_free)) {
622		pages_to_free.prev->next = NULL;
623		shmem_free_pages(pages_to_free.next);
624	}
625}
626
627static void shmem_truncate(struct inode *inode)
628{
629	shmem_truncate_range(inode, inode->i_size, (loff_t)-1);
630}
631
632static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
633{
634	struct inode *inode = dentry->d_inode;
635	struct page *page = NULL;
636	int error;
637
638	if (attr->ia_valid & ATTR_SIZE) {
639		if (attr->ia_size < inode->i_size) {
640			/*
641			 * If truncating down to a partial page, then
642			 * if that page is already allocated, hold it
643			 * in memory until the truncation is over, so
644			 * truncate_partial_page cannnot miss it were
645			 * it assigned to swap.
646			 */
647			if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
648				(void) shmem_getpage(inode,
649					attr->ia_size>>PAGE_CACHE_SHIFT,
650						&page, SGP_READ, NULL);
651			}
652			/*
653			 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
654			 * detect if any pages might have been added to cache
655			 * after truncate_inode_pages.  But we needn't bother
656			 * if it's being fully truncated to zero-length: the
657			 * nrpages check is efficient enough in that case.
658			 */
659			if (attr->ia_size) {
660				struct shmem_inode_info *info = SHMEM_I(inode);
661				spin_lock(&info->lock);
662				info->flags &= ~SHMEM_PAGEIN;
663				spin_unlock(&info->lock);
664			}
665		}
666	}
667
668	error = inode_change_ok(inode, attr);
669	if (!error)
670		error = inode_setattr(inode, attr);
671	if (page)
672		page_cache_release(page);
673	return error;
674}
675
676static void shmem_delete_inode(struct inode *inode)
677{
678	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
679	struct shmem_inode_info *info = SHMEM_I(inode);
680
681	if (inode->i_op->truncate == shmem_truncate) {
682		truncate_inode_pages(inode->i_mapping, 0);
683		shmem_unacct_size(info->flags, inode->i_size);
684		inode->i_size = 0;
685		shmem_truncate(inode);
686		if (!list_empty(&info->swaplist)) {
687			spin_lock(&shmem_swaplist_lock);
688			list_del_init(&info->swaplist);
689			spin_unlock(&shmem_swaplist_lock);
690		}
691	}
692	BUG_ON(inode->i_blocks);
693	if (sbinfo->max_inodes) {
694		spin_lock(&sbinfo->stat_lock);
695		sbinfo->free_inodes++;
696		spin_unlock(&sbinfo->stat_lock);
697	}
698	clear_inode(inode);
699}
700
701static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
702{
703	swp_entry_t *ptr;
704
705	for (ptr = dir; ptr < edir; ptr++) {
706		if (ptr->val == entry.val)
707			return ptr - dir;
708	}
709	return -1;
710}
711
712static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
713{
714	struct inode *inode;
715	unsigned long idx;
716	unsigned long size;
717	unsigned long limit;
718	unsigned long stage;
719	struct page **dir;
720	struct page *subdir;
721	swp_entry_t *ptr;
722	int offset;
723
724	idx = 0;
725	ptr = info->i_direct;
726	spin_lock(&info->lock);
727	limit = info->next_index;
728	size = limit;
729	if (size > SHMEM_NR_DIRECT)
730		size = SHMEM_NR_DIRECT;
731	offset = shmem_find_swp(entry, ptr, ptr+size);
732	if (offset >= 0) {
733		shmem_swp_balance_unmap();
734		goto found;
735	}
736	if (!info->i_indirect)
737		goto lost2;
738
739	dir = shmem_dir_map(info->i_indirect);
740	stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
741
742	for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
743		if (unlikely(idx == stage)) {
744			shmem_dir_unmap(dir-1);
745			dir = shmem_dir_map(info->i_indirect) +
746			    ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
747			while (!*dir) {
748				dir++;
749				idx += ENTRIES_PER_PAGEPAGE;
750				if (idx >= limit)
751					goto lost1;
752			}
753			stage = idx + ENTRIES_PER_PAGEPAGE;
754			subdir = *dir;
755			shmem_dir_unmap(dir);
756			dir = shmem_dir_map(subdir);
757		}
758		subdir = *dir;
759		if (subdir && page_private(subdir)) {
760			ptr = shmem_swp_map(subdir);
761			size = limit - idx;
762			if (size > ENTRIES_PER_PAGE)
763				size = ENTRIES_PER_PAGE;
764			offset = shmem_find_swp(entry, ptr, ptr+size);
765			if (offset >= 0) {
766				shmem_dir_unmap(dir);
767				goto found;
768			}
769			shmem_swp_unmap(ptr);
770		}
771	}
772lost1:
773	shmem_dir_unmap(dir-1);
774lost2:
775	spin_unlock(&info->lock);
776	return 0;
777found:
778	idx += offset;
779	inode = &info->vfs_inode;
780	if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
781		info->flags |= SHMEM_PAGEIN;
782		shmem_swp_set(info, ptr + offset, 0);
783	}
784	shmem_swp_unmap(ptr);
785	spin_unlock(&info->lock);
786	/*
787	 * Decrement swap count even when the entry is left behind:
788	 * try_to_unuse will skip over mms, then reincrement count.
789	 */
790	swap_free(entry);
791	return 1;
792}
793
794/*
795 * shmem_unuse() search for an eventually swapped out shmem page.
796 */
797int shmem_unuse(swp_entry_t entry, struct page *page)
798{
799	struct list_head *p, *next;
800	struct shmem_inode_info *info;
801	int found = 0;
802
803	spin_lock(&shmem_swaplist_lock);
804	list_for_each_safe(p, next, &shmem_swaplist) {
805		info = list_entry(p, struct shmem_inode_info, swaplist);
806		if (!info->swapped)
807			list_del_init(&info->swaplist);
808		else if (shmem_unuse_inode(info, entry, page)) {
809			/* move head to start search for next from here */
810			list_move_tail(&shmem_swaplist, &info->swaplist);
811			found = 1;
812			break;
813		}
814	}
815	spin_unlock(&shmem_swaplist_lock);
816	return found;
817}
818
819/*
820 * Move the page from the page cache to the swap cache.
821 */
822static int shmem_writepage(struct page *page, struct writeback_control *wbc)
823{
824	struct shmem_inode_info *info;
825	swp_entry_t *entry, swap;
826	struct address_space *mapping;
827	unsigned long index;
828	struct inode *inode;
829
830	BUG_ON(!PageLocked(page));
831	BUG_ON(page_mapped(page));
832
833	mapping = page->mapping;
834	index = page->index;
835	inode = mapping->host;
836	info = SHMEM_I(inode);
837	if (info->flags & VM_LOCKED)
838		goto redirty;
839	swap = get_swap_page();
840	if (!swap.val)
841		goto redirty;
842
843	spin_lock(&info->lock);
844	shmem_recalc_inode(inode);
845	if (index >= info->next_index) {
846		BUG_ON(!(info->flags & SHMEM_TRUNCATE));
847		goto unlock;
848	}
849	entry = shmem_swp_entry(info, index, NULL);
850	BUG_ON(!entry);
851	BUG_ON(entry->val);
852
853	if (move_to_swap_cache(page, swap) == 0) {
854		shmem_swp_set(info, entry, swap.val);
855		shmem_swp_unmap(entry);
856		spin_unlock(&info->lock);
857		if (list_empty(&info->swaplist)) {
858			spin_lock(&shmem_swaplist_lock);
859			/* move instead of add in case we're racing */
860			list_move_tail(&info->swaplist, &shmem_swaplist);
861			spin_unlock(&shmem_swaplist_lock);
862		}
863		unlock_page(page);
864		return 0;
865	}
866
867	shmem_swp_unmap(entry);
868unlock:
869	spin_unlock(&info->lock);
870	swap_free(swap);
871redirty:
872	set_page_dirty(page);
873	return AOP_WRITEPAGE_ACTIVATE;	/* Return with the page locked */
874}
875
876#ifdef CONFIG_NUMA
877static struct page *shmem_swapin_async(struct shared_policy *p,
878				       swp_entry_t entry, unsigned long idx)
879{
880	struct page *page;
881	struct vm_area_struct pvma;
882
883	/* Create a pseudo vma that just contains the policy */
884	memset(&pvma, 0, sizeof(struct vm_area_struct));
885	pvma.vm_end = PAGE_SIZE;
886	pvma.vm_pgoff = idx;
887	pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
888	page = read_swap_cache_async(entry, &pvma, 0);
889	mpol_free(pvma.vm_policy);
890	return page;
891}
892
893struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
894			  unsigned long idx)
895{
896	struct shared_policy *p = &info->policy;
897	int i, num;
898	struct page *page;
899	unsigned long offset;
900
901	num = valid_swaphandles(entry, &offset);
902	for (i = 0; i < num; offset++, i++) {
903		page = shmem_swapin_async(p,
904				swp_entry(swp_type(entry), offset), idx);
905		if (!page)
906			break;
907		page_cache_release(page);
908	}
909	lru_add_drain();	/* Push any new pages onto the LRU now */
910	return shmem_swapin_async(p, entry, idx);
911}
912
913static struct page *
914shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info,
915		 unsigned long idx)
916{
917	struct vm_area_struct pvma;
918	struct page *page;
919
920	memset(&pvma, 0, sizeof(struct vm_area_struct));
921	pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
922	pvma.vm_pgoff = idx;
923	pvma.vm_end = PAGE_SIZE;
924	page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
925	mpol_free(pvma.vm_policy);
926	return page;
927}
928#else
929static inline struct page *
930shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
931{
932	swapin_readahead(entry, 0, NULL);
933	return read_swap_cache_async(entry, NULL, 0);
934}
935
936static inline struct page *
937shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
938{
939	return alloc_page(gfp | __GFP_ZERO);
940}
941#endif
942
943/*
944 * shmem_getpage - either get the page from swap or allocate a new one
945 *
946 * If we allocate a new one we do not mark it dirty. That's up to the
947 * vm. If we swap it in we mark it dirty since we also free the swap
948 * entry since a page cannot live in both the swap and page cache
949 */
950static int shmem_getpage(struct inode *inode, unsigned long idx,
951			struct page **pagep, enum sgp_type sgp, int *type)
952{
953	struct address_space *mapping = inode->i_mapping;
954	struct shmem_inode_info *info = SHMEM_I(inode);
955	struct shmem_sb_info *sbinfo;
956	struct page *filepage = *pagep;
957	struct page *swappage;
958	swp_entry_t *entry;
959	swp_entry_t swap;
960	int error;
961
962	if (idx >= SHMEM_MAX_INDEX)
963		return -EFBIG;
964	/*
965	 * Normally, filepage is NULL on entry, and either found
966	 * uptodate immediately, or allocated and zeroed, or read
967	 * in under swappage, which is then assigned to filepage.
968	 * But shmem_prepare_write passes in a locked filepage,
969	 * which may be found not uptodate by other callers too,
970	 * and may need to be copied from the swappage read in.
971	 */
972repeat:
973	if (!filepage)
974		filepage = find_lock_page(mapping, idx);
975	if (filepage && PageUptodate(filepage))
976		goto done;
977	error = 0;
978	if (sgp == SGP_QUICK)
979		goto failed;
980
981	spin_lock(&info->lock);
982	shmem_recalc_inode(inode);
983	entry = shmem_swp_alloc(info, idx, sgp);
984	if (IS_ERR(entry)) {
985		spin_unlock(&info->lock);
986		error = PTR_ERR(entry);
987		goto failed;
988	}
989	swap = *entry;
990
991	if (swap.val) {
992		/* Look it up and read it in.. */
993		swappage = lookup_swap_cache(swap);
994		if (!swappage) {
995			shmem_swp_unmap(entry);
996			spin_unlock(&info->lock);
997			/* here we actually do the io */
998			if (type && *type == VM_FAULT_MINOR) {
999				inc_page_state(pgmajfault);
1000				*type = VM_FAULT_MAJOR;
1001			}
1002			swappage = shmem_swapin(info, swap, idx);
1003			if (!swappage) {
1004				spin_lock(&info->lock);
1005				entry = shmem_swp_alloc(info, idx, sgp);
1006				if (IS_ERR(entry))
1007					error = PTR_ERR(entry);
1008				else {
1009					if (entry->val == swap.val)
1010						error = -ENOMEM;
1011					shmem_swp_unmap(entry);
1012				}
1013				spin_unlock(&info->lock);
1014				if (error)
1015					goto failed;
1016				goto repeat;
1017			}
1018			wait_on_page_locked(swappage);
1019			page_cache_release(swappage);
1020			goto repeat;
1021		}
1022
1023		/* We have to do this with page locked to prevent races */
1024		if (TestSetPageLocked(swappage)) {
1025			shmem_swp_unmap(entry);
1026			spin_unlock(&info->lock);
1027			wait_on_page_locked(swappage);
1028			page_cache_release(swappage);
1029			goto repeat;
1030		}
1031		if (PageWriteback(swappage)) {
1032			shmem_swp_unmap(entry);
1033			spin_unlock(&info->lock);
1034			wait_on_page_writeback(swappage);
1035			unlock_page(swappage);
1036			page_cache_release(swappage);
1037			goto repeat;
1038		}
1039		if (!PageUptodate(swappage)) {
1040			shmem_swp_unmap(entry);
1041			spin_unlock(&info->lock);
1042			unlock_page(swappage);
1043			page_cache_release(swappage);
1044			error = -EIO;
1045			goto failed;
1046		}
1047
1048		if (filepage) {
1049			shmem_swp_set(info, entry, 0);
1050			shmem_swp_unmap(entry);
1051			delete_from_swap_cache(swappage);
1052			spin_unlock(&info->lock);
1053			copy_highpage(filepage, swappage);
1054			unlock_page(swappage);
1055			page_cache_release(swappage);
1056			flush_dcache_page(filepage);
1057			SetPageUptodate(filepage);
1058			set_page_dirty(filepage);
1059			swap_free(swap);
1060		} else if (!(error = move_from_swap_cache(
1061				swappage, idx, mapping))) {
1062			info->flags |= SHMEM_PAGEIN;
1063			shmem_swp_set(info, entry, 0);
1064			shmem_swp_unmap(entry);
1065			spin_unlock(&info->lock);
1066			filepage = swappage;
1067			swap_free(swap);
1068		} else {
1069			shmem_swp_unmap(entry);
1070			spin_unlock(&info->lock);
1071			unlock_page(swappage);
1072			page_cache_release(swappage);
1073			if (error == -ENOMEM) {
1074				/* let kswapd refresh zone for GFP_ATOMICs */
1075				blk_congestion_wait(WRITE, HZ/50);
1076			}
1077			goto repeat;
1078		}
1079	} else if (sgp == SGP_READ && !filepage) {
1080		shmem_swp_unmap(entry);
1081		filepage = find_get_page(mapping, idx);
1082		if (filepage &&
1083		    (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
1084			spin_unlock(&info->lock);
1085			wait_on_page_locked(filepage);
1086			page_cache_release(filepage);
1087			filepage = NULL;
1088			goto repeat;
1089		}
1090		spin_unlock(&info->lock);
1091	} else {
1092		shmem_swp_unmap(entry);
1093		sbinfo = SHMEM_SB(inode->i_sb);
1094		if (sbinfo->max_blocks) {
1095			spin_lock(&sbinfo->stat_lock);
1096			if (sbinfo->free_blocks == 0 ||
1097			    shmem_acct_block(info->flags)) {
1098				spin_unlock(&sbinfo->stat_lock);
1099				spin_unlock(&info->lock);
1100				error = -ENOSPC;
1101				goto failed;
1102			}
1103			sbinfo->free_blocks--;
1104			inode->i_blocks += BLOCKS_PER_PAGE;
1105			spin_unlock(&sbinfo->stat_lock);
1106		} else if (shmem_acct_block(info->flags)) {
1107			spin_unlock(&info->lock);
1108			error = -ENOSPC;
1109			goto failed;
1110		}
1111
1112		if (!filepage) {
1113			spin_unlock(&info->lock);
1114			filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
1115						    info,
1116						    idx);
1117			if (!filepage) {
1118				shmem_unacct_blocks(info->flags, 1);
1119				shmem_free_blocks(inode, 1);
1120				error = -ENOMEM;
1121				goto failed;
1122			}
1123
1124			spin_lock(&info->lock);
1125			entry = shmem_swp_alloc(info, idx, sgp);
1126			if (IS_ERR(entry))
1127				error = PTR_ERR(entry);
1128			else {
1129				swap = *entry;
1130				shmem_swp_unmap(entry);
1131			}
1132			if (error || swap.val || 0 != add_to_page_cache_lru(
1133					filepage, mapping, idx, GFP_ATOMIC)) {
1134				spin_unlock(&info->lock);
1135				page_cache_release(filepage);
1136				shmem_unacct_blocks(info->flags, 1);
1137				shmem_free_blocks(inode, 1);
1138				filepage = NULL;
1139				if (error)
1140					goto failed;
1141				goto repeat;
1142			}
1143			info->flags |= SHMEM_PAGEIN;
1144		}
1145
1146		info->alloced++;
1147		spin_unlock(&info->lock);
1148		flush_dcache_page(filepage);
1149		SetPageUptodate(filepage);
1150	}
1151done:
1152	if (*pagep != filepage) {
1153		unlock_page(filepage);
1154		*pagep = filepage;
1155	}
1156	return 0;
1157
1158failed:
1159	if (*pagep != filepage) {
1160		unlock_page(filepage);
1161		page_cache_release(filepage);
1162	}
1163	return error;
1164}
1165
1166struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
1167{
1168	struct inode *inode = vma->vm_file->f_dentry->d_inode;
1169	struct page *page = NULL;
1170	unsigned long idx;
1171	int error;
1172
1173	idx = (address - vma->vm_start) >> PAGE_SHIFT;
1174	idx += vma->vm_pgoff;
1175	idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
1176	if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1177		return NOPAGE_SIGBUS;
1178
1179	error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
1180	if (error)
1181		return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
1182
1183	mark_page_accessed(page);
1184	return page;
1185}
1186
1187static int shmem_populate(struct vm_area_struct *vma,
1188	unsigned long addr, unsigned long len,
1189	pgprot_t prot, unsigned long pgoff, int nonblock)
1190{
1191	struct inode *inode = vma->vm_file->f_dentry->d_inode;
1192	struct mm_struct *mm = vma->vm_mm;
1193	enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
1194	unsigned long size;
1195
1196	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1197	if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
1198		return -EINVAL;
1199
1200	while ((long) len > 0) {
1201		struct page *page = NULL;
1202		int err;
1203		/*
1204		 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
1205		 */
1206		err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
1207		if (err)
1208			return err;
1209		/* Page may still be null, but only if nonblock was set. */
1210		if (page) {
1211			mark_page_accessed(page);
1212			err = install_page(mm, vma, addr, page, prot);
1213			if (err) {
1214				page_cache_release(page);
1215				return err;
1216			}
1217		} else if (vma->vm_flags & VM_NONLINEAR) {
1218			/* No page was found just because we can't read it in
1219			 * now (being here implies nonblock != 0), but the page
1220			 * may exist, so set the PTE to fault it in later. */
1221    			err = install_file_pte(mm, vma, addr, pgoff, prot);
1222			if (err)
1223	    			return err;
1224		}
1225
1226		len -= PAGE_SIZE;
1227		addr += PAGE_SIZE;
1228		pgoff++;
1229	}
1230	return 0;
1231}
1232
1233#ifdef CONFIG_NUMA
1234int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1235{
1236	struct inode *i = vma->vm_file->f_dentry->d_inode;
1237	return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1238}
1239
1240struct mempolicy *
1241shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
1242{
1243	struct inode *i = vma->vm_file->f_dentry->d_inode;
1244	unsigned long idx;
1245
1246	idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1247	return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1248}
1249#endif
1250
1251int shmem_lock(struct file *file, int lock, struct user_struct *user)
1252{
1253	struct inode *inode = file->f_dentry->d_inode;
1254	struct shmem_inode_info *info = SHMEM_I(inode);
1255	int retval = -ENOMEM;
1256
1257	spin_lock(&info->lock);
1258	if (lock && !(info->flags & VM_LOCKED)) {
1259		if (!user_shm_lock(inode->i_size, user))
1260			goto out_nomem;
1261		info->flags |= VM_LOCKED;
1262	}
1263	if (!lock && (info->flags & VM_LOCKED) && user) {
1264		user_shm_unlock(inode->i_size, user);
1265		info->flags &= ~VM_LOCKED;
1266	}
1267	retval = 0;
1268out_nomem:
1269	spin_unlock(&info->lock);
1270	return retval;
1271}
1272
1273int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1274{
1275	file_accessed(file);
1276	vma->vm_ops = &shmem_vm_ops;
1277	return 0;
1278}
1279
1280static struct inode *
1281shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
1282{
1283	struct inode *inode;
1284	struct shmem_inode_info *info;
1285	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1286
1287	if (sbinfo->max_inodes) {
1288		spin_lock(&sbinfo->stat_lock);
1289		if (!sbinfo->free_inodes) {
1290			spin_unlock(&sbinfo->stat_lock);
1291			return NULL;
1292		}
1293		sbinfo->free_inodes--;
1294		spin_unlock(&sbinfo->stat_lock);
1295	}
1296
1297	inode = new_inode(sb);
1298	if (inode) {
1299		inode->i_mode = mode;
1300		inode->i_uid = current->fsuid;
1301		inode->i_gid = current->fsgid;
1302		inode->i_blksize = PAGE_CACHE_SIZE;
1303		inode->i_blocks = 0;
1304		inode->i_mapping->a_ops = &shmem_aops;
1305		inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1306		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1307		info = SHMEM_I(inode);
1308		memset(info, 0, (char *)inode - (char *)info);
1309		spin_lock_init(&info->lock);
1310		INIT_LIST_HEAD(&info->swaplist);
1311
1312		switch (mode & S_IFMT) {
1313		default:
1314			init_special_inode(inode, mode, dev);
1315			break;
1316		case S_IFREG:
1317			inode->i_op = &shmem_inode_operations;
1318			inode->i_fop = &shmem_file_operations;
1319			mpol_shared_policy_init(&info->policy);
1320			break;
1321		case S_IFDIR:
1322			inode->i_nlink++;
1323			/* Some things misbehave if size == 0 on a directory */
1324			inode->i_size = 2 * BOGO_DIRENT_SIZE;
1325			inode->i_op = &shmem_dir_inode_operations;
1326			inode->i_fop = &simple_dir_operations;
1327			break;
1328		case S_IFLNK:
1329			/*
1330			 * Must not load anything in the rbtree,
1331			 * mpol_free_shared_policy will not be called.
1332			 */
1333			mpol_shared_policy_init(&info->policy);
1334			break;
1335		}
1336	} else if (sbinfo->max_inodes) {
1337		spin_lock(&sbinfo->stat_lock);
1338		sbinfo->free_inodes++;
1339		spin_unlock(&sbinfo->stat_lock);
1340	}
1341	return inode;
1342}
1343
1344#ifdef CONFIG_TMPFS
1345static struct inode_operations shmem_symlink_inode_operations;
1346static struct inode_operations shmem_symlink_inline_operations;
1347
1348/*
1349 * Normally tmpfs makes no use of shmem_prepare_write, but it
1350 * lets a tmpfs file be used read-write below the loop driver.
1351 */
1352static int
1353shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
1354{
1355	struct inode *inode = page->mapping->host;
1356	return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
1357}
1358
1359static ssize_t
1360shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
1361{
1362	struct inode	*inode = file->f_dentry->d_inode;
1363	loff_t		pos;
1364	unsigned long	written;
1365	ssize_t		err;
1366
1367	if ((ssize_t) count < 0)
1368		return -EINVAL;
1369
1370	if (!access_ok(VERIFY_READ, buf, count))
1371		return -EFAULT;
1372
1373	mutex_lock(&inode->i_mutex);
1374
1375	pos = *ppos;
1376	written = 0;
1377
1378	err = generic_write_checks(file, &pos, &count, 0);
1379	if (err || !count)
1380		goto out;
1381
1382	err = remove_suid(file->f_dentry);
1383	if (err)
1384		goto out;
1385
1386	inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1387
1388	do {
1389		struct page *page = NULL;
1390		unsigned long bytes, index, offset;
1391		char *kaddr;
1392		int left;
1393
1394		offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
1395		index = pos >> PAGE_CACHE_SHIFT;
1396		bytes = PAGE_CACHE_SIZE - offset;
1397		if (bytes > count)
1398			bytes = count;
1399
1400		/*
1401		 * We don't hold page lock across copy from user -
1402		 * what would it guard against? - so no deadlock here.
1403		 * But it still may be a good idea to prefault below.
1404		 */
1405
1406		err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
1407		if (err)
1408			break;
1409
1410		left = bytes;
1411		if (PageHighMem(page)) {
1412			volatile unsigned char dummy;
1413			__get_user(dummy, buf);
1414			__get_user(dummy, buf + bytes - 1);
1415
1416			kaddr = kmap_atomic(page, KM_USER0);
1417			left = __copy_from_user_inatomic(kaddr + offset,
1418							buf, bytes);
1419			kunmap_atomic(kaddr, KM_USER0);
1420		}
1421		if (left) {
1422			kaddr = kmap(page);
1423			left = __copy_from_user(kaddr + offset, buf, bytes);
1424			kunmap(page);
1425		}
1426
1427		written += bytes;
1428		count -= bytes;
1429		pos += bytes;
1430		buf += bytes;
1431		if (pos > inode->i_size)
1432			i_size_write(inode, pos);
1433
1434		flush_dcache_page(page);
1435		set_page_dirty(page);
1436		mark_page_accessed(page);
1437		page_cache_release(page);
1438
1439		if (left) {
1440			pos -= left;
1441			written -= left;
1442			err = -EFAULT;
1443			break;
1444		}
1445
1446		/*
1447		 * Our dirty pages are not counted in nr_dirty,
1448		 * and we do not attempt to balance dirty pages.
1449		 */
1450
1451		cond_resched();
1452	} while (count);
1453
1454	*ppos = pos;
1455	if (written)
1456		err = written;
1457out:
1458	mutex_unlock(&inode->i_mutex);
1459	return err;
1460}
1461
1462static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1463{
1464	struct inode *inode = filp->f_dentry->d_inode;
1465	struct address_space *mapping = inode->i_mapping;
1466	unsigned long index, offset;
1467
1468	index = *ppos >> PAGE_CACHE_SHIFT;
1469	offset = *ppos & ~PAGE_CACHE_MASK;
1470
1471	for (;;) {
1472		struct page *page = NULL;
1473		unsigned long end_index, nr, ret;
1474		loff_t i_size = i_size_read(inode);
1475
1476		end_index = i_size >> PAGE_CACHE_SHIFT;
1477		if (index > end_index)
1478			break;
1479		if (index == end_index) {
1480			nr = i_size & ~PAGE_CACHE_MASK;
1481			if (nr <= offset)
1482				break;
1483		}
1484
1485		desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
1486		if (desc->error) {
1487			if (desc->error == -EINVAL)
1488				desc->error = 0;
1489			break;
1490		}
1491
1492		/*
1493		 * We must evaluate after, since reads (unlike writes)
1494		 * are called without i_mutex protection against truncate
1495		 */
1496		nr = PAGE_CACHE_SIZE;
1497		i_size = i_size_read(inode);
1498		end_index = i_size >> PAGE_CACHE_SHIFT;
1499		if (index == end_index) {
1500			nr = i_size & ~PAGE_CACHE_MASK;
1501			if (nr <= offset) {
1502				if (page)
1503					page_cache_release(page);
1504				break;
1505			}
1506		}
1507		nr -= offset;
1508
1509		if (page) {
1510			/*
1511			 * If users can be writing to this page using arbitrary
1512			 * virtual addresses, take care about potential aliasing
1513			 * before reading the page on the kernel side.
1514			 */
1515			if (mapping_writably_mapped(mapping))
1516				flush_dcache_page(page);
1517			/*
1518			 * Mark the page accessed if we read the beginning.
1519			 */
1520			if (!offset)
1521				mark_page_accessed(page);
1522		} else {
1523			page = ZERO_PAGE(0);
1524			page_cache_get(page);
1525		}
1526
1527		/*
1528		 * Ok, we have the page, and it's up-to-date, so
1529		 * now we can copy it to user space...
1530		 *
1531		 * The actor routine returns how many bytes were actually used..
1532		 * NOTE! This may not be the same as how much of a user buffer
1533		 * we filled up (we may be padding etc), so we can only update
1534		 * "pos" here (the actor routine has to update the user buffer
1535		 * pointers and the remaining count).
1536		 */
1537		ret = actor(desc, page, offset, nr);
1538		offset += ret;
1539		index += offset >> PAGE_CACHE_SHIFT;
1540		offset &= ~PAGE_CACHE_MASK;
1541
1542		page_cache_release(page);
1543		if (ret != nr || !desc->count)
1544			break;
1545
1546		cond_resched();
1547	}
1548
1549	*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1550	file_accessed(filp);
1551}
1552
1553static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
1554{
1555	read_descriptor_t desc;
1556
1557	if ((ssize_t) count < 0)
1558		return -EINVAL;
1559	if (!access_ok(VERIFY_WRITE, buf, count))
1560		return -EFAULT;
1561	if (!count)
1562		return 0;
1563
1564	desc.written = 0;
1565	desc.count = count;
1566	desc.arg.buf = buf;
1567	desc.error = 0;
1568
1569	do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1570	if (desc.written)
1571		return desc.written;
1572	return desc.error;
1573}
1574
1575static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
1576			 size_t count, read_actor_t actor, void *target)
1577{
1578	read_descriptor_t desc;
1579
1580	if (!count)
1581		return 0;
1582
1583	desc.written = 0;
1584	desc.count = count;
1585	desc.arg.data = target;
1586	desc.error = 0;
1587
1588	do_shmem_file_read(in_file, ppos, &desc, actor);
1589	if (desc.written)
1590		return desc.written;
1591	return desc.error;
1592}
1593
1594static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
1595{
1596	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1597
1598	buf->f_type = TMPFS_MAGIC;
1599	buf->f_bsize = PAGE_CACHE_SIZE;
1600	buf->f_namelen = NAME_MAX;
1601	spin_lock(&sbinfo->stat_lock);
1602	if (sbinfo->max_blocks) {
1603		buf->f_blocks = sbinfo->max_blocks;
1604		buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
1605	}
1606	if (sbinfo->max_inodes) {
1607		buf->f_files = sbinfo->max_inodes;
1608		buf->f_ffree = sbinfo->free_inodes;
1609	}
1610	/* else leave those fields 0 like simple_statfs */
1611	spin_unlock(&sbinfo->stat_lock);
1612	return 0;
1613}
1614
1615/*
1616 * File creation. Allocate an inode, and we're done..
1617 */
1618static int
1619shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1620{
1621	struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
1622	int error = -ENOSPC;
1623
1624	if (inode) {
1625		error = security_inode_init_security(inode, dir, NULL, NULL,
1626						     NULL);
1627		if (error) {
1628			if (error != -EOPNOTSUPP) {
1629				iput(inode);
1630				return error;
1631			}
1632			error = 0;
1633		}
1634		if (dir->i_mode & S_ISGID) {
1635			inode->i_gid = dir->i_gid;
1636			if (S_ISDIR(mode))
1637				inode->i_mode |= S_ISGID;
1638		}
1639		dir->i_size += BOGO_DIRENT_SIZE;
1640		dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1641		d_instantiate(dentry, inode);
1642		dget(dentry); /* Extra count - pin the dentry in core */
1643	}
1644	return error;
1645}
1646
1647static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1648{
1649	int error;
1650
1651	if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1652		return error;
1653	dir->i_nlink++;
1654	return 0;
1655}
1656
1657static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1658		struct nameidata *nd)
1659{
1660	return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1661}
1662
1663/*
1664 * Link a file..
1665 */
1666static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1667{
1668	struct inode *inode = old_dentry->d_inode;
1669	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1670
1671	/*
1672	 * No ordinary (disk based) filesystem counts links as inodes;
1673	 * but each new link needs a new dentry, pinning lowmem, and
1674	 * tmpfs dentries cannot be pruned until they are unlinked.
1675	 */
1676	if (sbinfo->max_inodes) {
1677		spin_lock(&sbinfo->stat_lock);
1678		if (!sbinfo->free_inodes) {
1679			spin_unlock(&sbinfo->stat_lock);
1680			return -ENOSPC;
1681		}
1682		sbinfo->free_inodes--;
1683		spin_unlock(&sbinfo->stat_lock);
1684	}
1685
1686	dir->i_size += BOGO_DIRENT_SIZE;
1687	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1688	inode->i_nlink++;
1689	atomic_inc(&inode->i_count);	/* New dentry reference */
1690	dget(dentry);		/* Extra pinning count for the created dentry */
1691	d_instantiate(dentry, inode);
1692	return 0;
1693}
1694
1695static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1696{
1697	struct inode *inode = dentry->d_inode;
1698
1699	if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
1700		struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1701		if (sbinfo->max_inodes) {
1702			spin_lock(&sbinfo->stat_lock);
1703			sbinfo->free_inodes++;
1704			spin_unlock(&sbinfo->stat_lock);
1705		}
1706	}
1707
1708	dir->i_size -= BOGO_DIRENT_SIZE;
1709	inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1710	inode->i_nlink--;
1711	dput(dentry);	/* Undo the count from "create" - this does all the work */
1712	return 0;
1713}
1714
1715static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1716{
1717	if (!simple_empty(dentry))
1718		return -ENOTEMPTY;
1719
1720	dir->i_nlink--;
1721	return shmem_unlink(dir, dentry);
1722}
1723
1724/*
1725 * The VFS layer already does all the dentry stuff for rename,
1726 * we just have to decrement the usage count for the target if
1727 * it exists so that the VFS layer correctly free's it when it
1728 * gets overwritten.
1729 */
1730static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1731{
1732	struct inode *inode = old_dentry->d_inode;
1733	int they_are_dirs = S_ISDIR(inode->i_mode);
1734
1735	if (!simple_empty(new_dentry))
1736		return -ENOTEMPTY;
1737
1738	if (new_dentry->d_inode) {
1739		(void) shmem_unlink(new_dir, new_dentry);
1740		if (they_are_dirs)
1741			old_dir->i_nlink--;
1742	} else if (they_are_dirs) {
1743		old_dir->i_nlink--;
1744		new_dir->i_nlink++;
1745	}
1746
1747	old_dir->i_size -= BOGO_DIRENT_SIZE;
1748	new_dir->i_size += BOGO_DIRENT_SIZE;
1749	old_dir->i_ctime = old_dir->i_mtime =
1750	new_dir->i_ctime = new_dir->i_mtime =
1751	inode->i_ctime = CURRENT_TIME;
1752	return 0;
1753}
1754
1755static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1756{
1757	int error;
1758	int len;
1759	struct inode *inode;
1760	struct page *page = NULL;
1761	char *kaddr;
1762	struct shmem_inode_info *info;
1763
1764	len = strlen(symname) + 1;
1765	if (len > PAGE_CACHE_SIZE)
1766		return -ENAMETOOLONG;
1767
1768	inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
1769	if (!inode)
1770		return -ENOSPC;
1771
1772	error = security_inode_init_security(inode, dir, NULL, NULL,
1773					     NULL);
1774	if (error) {
1775		if (error != -EOPNOTSUPP) {
1776			iput(inode);
1777			return error;
1778		}
1779		error = 0;
1780	}
1781
1782	info = SHMEM_I(inode);
1783	inode->i_size = len-1;
1784	if (len <= (char *)inode - (char *)info) {
1785		/* do it inline */
1786		memcpy(info, symname, len);
1787		inode->i_op = &shmem_symlink_inline_operations;
1788	} else {
1789		error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1790		if (error) {
1791			iput(inode);
1792			return error;
1793		}
1794		inode->i_op = &shmem_symlink_inode_operations;
1795		kaddr = kmap_atomic(page, KM_USER0);
1796		memcpy(kaddr, symname, len);
1797		kunmap_atomic(kaddr, KM_USER0);
1798		set_page_dirty(page);
1799		page_cache_release(page);
1800	}
1801	if (dir->i_mode & S_ISGID)
1802		inode->i_gid = dir->i_gid;
1803	dir->i_size += BOGO_DIRENT_SIZE;
1804	dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1805	d_instantiate(dentry, inode);
1806	dget(dentry);
1807	return 0;
1808}
1809
1810static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1811{
1812	nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
1813	return NULL;
1814}
1815
1816static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1817{
1818	struct page *page = NULL;
1819	int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1820	nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
1821	return page;
1822}
1823
1824static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1825{
1826	if (!IS_ERR(nd_get_link(nd))) {
1827		struct page *page = cookie;
1828		kunmap(page);
1829		mark_page_accessed(page);
1830		page_cache_release(page);
1831	}
1832}
1833
1834static struct inode_operations shmem_symlink_inline_operations = {
1835	.readlink	= generic_readlink,
1836	.follow_link	= shmem_follow_link_inline,
1837};
1838
1839static struct inode_operations shmem_symlink_inode_operations = {
1840	.truncate	= shmem_truncate,
1841	.readlink	= generic_readlink,
1842	.follow_link	= shmem_follow_link,
1843	.put_link	= shmem_put_link,
1844};
1845
1846static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes)
1847{
1848	char *this_char, *value, *rest;
1849
1850	while ((this_char = strsep(&options, ",")) != NULL) {
1851		if (!*this_char)
1852			continue;
1853		if ((value = strchr(this_char,'=')) != NULL) {
1854			*value++ = 0;
1855		} else {
1856			printk(KERN_ERR
1857			    "tmpfs: No value for mount option '%s'\n",
1858			    this_char);
1859			return 1;
1860		}
1861
1862		if (!strcmp(this_char,"size")) {
1863			unsigned long long size;
1864			size = memparse(value,&rest);
1865			if (*rest == '%') {
1866				size <<= PAGE_SHIFT;
1867				size *= totalram_pages;
1868				do_div(size, 100);
1869				rest++;
1870			}
1871			if (*rest)
1872				goto bad_val;
1873			*blocks = size >> PAGE_CACHE_SHIFT;
1874		} else if (!strcmp(this_char,"nr_blocks")) {
1875			*blocks = memparse(value,&rest);
1876			if (*rest)
1877				goto bad_val;
1878		} else if (!strcmp(this_char,"nr_inodes")) {
1879			*inodes = memparse(value,&rest);
1880			if (*rest)
1881				goto bad_val;
1882		} else if (!strcmp(this_char,"mode")) {
1883			if (!mode)
1884				continue;
1885			*mode = simple_strtoul(value,&rest,8);
1886			if (*rest)
1887				goto bad_val;
1888		} else if (!strcmp(this_char,"uid")) {
1889			if (!uid)
1890				continue;
1891			*uid = simple_strtoul(value,&rest,0);
1892			if (*rest)
1893				goto bad_val;
1894		} else if (!strcmp(this_char,"gid")) {
1895			if (!gid)
1896				continue;
1897			*gid = simple_strtoul(value,&rest,0);
1898			if (*rest)
1899				goto bad_val;
1900		} else {
1901			printk(KERN_ERR "tmpfs: Bad mount option %s\n",
1902			       this_char);
1903			return 1;
1904		}
1905	}
1906	return 0;
1907
1908bad_val:
1909	printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
1910	       value, this_char);
1911	return 1;
1912
1913}
1914
1915static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
1916{
1917	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1918	unsigned long max_blocks = sbinfo->max_blocks;
1919	unsigned long max_inodes = sbinfo->max_inodes;
1920	unsigned long blocks;
1921	unsigned long inodes;
1922	int error = -EINVAL;
1923
1924	if (shmem_parse_options(data, NULL, NULL, NULL,
1925				&max_blocks, &max_inodes))
1926		return error;
1927
1928	spin_lock(&sbinfo->stat_lock);
1929	blocks = sbinfo->max_blocks - sbinfo->free_blocks;
1930	inodes = sbinfo->max_inodes - sbinfo->free_inodes;
1931	if (max_blocks < blocks)
1932		goto out;
1933	if (max_inodes < inodes)
1934		goto out;
1935	/*
1936	 * Those tests also disallow limited->unlimited while any are in
1937	 * use, so i_blocks will always be zero when max_blocks is zero;
1938	 * but we must separately disallow unlimited->limited, because
1939	 * in that case we have no record of how much is already in use.
1940	 */
1941	if (max_blocks && !sbinfo->max_blocks)
1942		goto out;
1943	if (max_inodes && !sbinfo->max_inodes)
1944		goto out;
1945
1946	error = 0;
1947	sbinfo->max_blocks  = max_blocks;
1948	sbinfo->free_blocks = max_blocks - blocks;
1949	sbinfo->max_inodes  = max_inodes;
1950	sbinfo->free_inodes = max_inodes - inodes;
1951out:
1952	spin_unlock(&sbinfo->stat_lock);
1953	return error;
1954}
1955#endif
1956
1957static void shmem_put_super(struct super_block *sb)
1958{
1959	kfree(sb->s_fs_info);
1960	sb->s_fs_info = NULL;
1961}
1962
1963static int shmem_fill_super(struct super_block *sb,
1964			    void *data, int silent)
1965{
1966	struct inode *inode;
1967	struct dentry *root;
1968	int mode   = S_IRWXUGO | S_ISVTX;
1969	uid_t uid = current->fsuid;
1970	gid_t gid = current->fsgid;
1971	int err = -ENOMEM;
1972	struct shmem_sb_info *sbinfo;
1973	unsigned long blocks = 0;
1974	unsigned long inodes = 0;
1975
1976#ifdef CONFIG_TMPFS
1977	/*
1978	 * Per default we only allow half of the physical ram per
1979	 * tmpfs instance, limiting inodes to one per page of lowmem;
1980	 * but the internal instance is left unlimited.
1981	 */
1982	if (!(sb->s_flags & MS_NOUSER)) {
1983		blocks = totalram_pages / 2;
1984		inodes = totalram_pages - totalhigh_pages;
1985		if (inodes > blocks)
1986			inodes = blocks;
1987		if (shmem_parse_options(data, &mode, &uid, &gid,
1988					&blocks, &inodes))
1989			return -EINVAL;
1990	}
1991#else
1992	sb->s_flags |= MS_NOUSER;
1993#endif
1994
1995	/* Round up to L1_CACHE_BYTES to resist false sharing */
1996	sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
1997				L1_CACHE_BYTES), GFP_KERNEL);
1998	if (!sbinfo)
1999		return -ENOMEM;
2000
2001	spin_lock_init(&sbinfo->stat_lock);
2002	sbinfo->max_blocks = blocks;
2003	sbinfo->free_blocks = blocks;
2004	sbinfo->max_inodes = inodes;
2005	sbinfo->free_inodes = inodes;
2006
2007	sb->s_fs_info = sbinfo;
2008	sb->s_maxbytes = SHMEM_MAX_BYTES;
2009	sb->s_blocksize = PAGE_CACHE_SIZE;
2010	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2011	sb->s_magic = TMPFS_MAGIC;
2012	sb->s_op = &shmem_ops;
2013
2014	inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
2015	if (!inode)
2016		goto failed;
2017	inode->i_uid = uid;
2018	inode->i_gid = gid;
2019	root = d_alloc_root(inode);
2020	if (!root)
2021		goto failed_iput;
2022	sb->s_root = root;
2023	return 0;
2024
2025failed_iput:
2026	iput(inode);
2027failed:
2028	shmem_put_super(sb);
2029	return err;
2030}
2031
2032static kmem_cache_t *shmem_inode_cachep;
2033
2034static struct inode *shmem_alloc_inode(struct super_block *sb)
2035{
2036	struct shmem_inode_info *p;
2037	p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
2038	if (!p)
2039		return NULL;
2040	return &p->vfs_inode;
2041}
2042
2043static void shmem_destroy_inode(struct inode *inode)
2044{
2045	if ((inode->i_mode & S_IFMT) == S_IFREG) {
2046		/* only struct inode is valid if it's an inline symlink */
2047		mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2048	}
2049	kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2050}
2051
2052static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
2053{
2054	struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2055
2056	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2057	    SLAB_CTOR_CONSTRUCTOR) {
2058		inode_init_once(&p->vfs_inode);
2059	}
2060}
2061
2062static int init_inodecache(void)
2063{
2064	shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2065				sizeof(struct shmem_inode_info),
2066				0, 0, init_once, NULL);
2067	if (shmem_inode_cachep == NULL)
2068		return -ENOMEM;
2069	return 0;
2070}
2071
2072static void destroy_inodecache(void)
2073{
2074	if (kmem_cache_destroy(shmem_inode_cachep))
2075		printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n");
2076}
2077
2078static struct address_space_operations shmem_aops = {
2079	.writepage	= shmem_writepage,
2080	.set_page_dirty	= __set_page_dirty_nobuffers,
2081#ifdef CONFIG_TMPFS
2082	.prepare_write	= shmem_prepare_write,
2083	.commit_write	= simple_commit_write,
2084#endif
2085};
2086
2087static struct file_operations shmem_file_operations = {
2088	.mmap		= shmem_mmap,
2089#ifdef CONFIG_TMPFS
2090	.llseek		= generic_file_llseek,
2091	.read		= shmem_file_read,
2092	.write		= shmem_file_write,
2093	.fsync		= simple_sync_file,
2094	.sendfile	= shmem_file_sendfile,
2095#endif
2096};
2097
2098static struct inode_operations shmem_inode_operations = {
2099	.truncate	= shmem_truncate,
2100	.setattr	= shmem_notify_change,
2101	.truncate_range	= shmem_truncate_range,
2102};
2103
2104static struct inode_operations shmem_dir_inode_operations = {
2105#ifdef CONFIG_TMPFS
2106	.create		= shmem_create,
2107	.lookup		= simple_lookup,
2108	.link		= shmem_link,
2109	.unlink		= shmem_unlink,
2110	.symlink	= shmem_symlink,
2111	.mkdir		= shmem_mkdir,
2112	.rmdir		= shmem_rmdir,
2113	.mknod		= shmem_mknod,
2114	.rename		= shmem_rename,
2115#endif
2116};
2117
2118static struct super_operations shmem_ops = {
2119	.alloc_inode	= shmem_alloc_inode,
2120	.destroy_inode	= shmem_destroy_inode,
2121#ifdef CONFIG_TMPFS
2122	.statfs		= shmem_statfs,
2123	.remount_fs	= shmem_remount_fs,
2124#endif
2125	.delete_inode	= shmem_delete_inode,
2126	.drop_inode	= generic_delete_inode,
2127	.put_super	= shmem_put_super,
2128};
2129
2130static struct vm_operations_struct shmem_vm_ops = {
2131	.nopage		= shmem_nopage,
2132	.populate	= shmem_populate,
2133#ifdef CONFIG_NUMA
2134	.set_policy     = shmem_set_policy,
2135	.get_policy     = shmem_get_policy,
2136#endif
2137};
2138
2139
2140static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
2141	int flags, const char *dev_name, void *data)
2142{
2143	return get_sb_nodev(fs_type, flags, data, shmem_fill_super);
2144}
2145
2146static struct file_system_type tmpfs_fs_type = {
2147	.owner		= THIS_MODULE,
2148	.name		= "tmpfs",
2149	.get_sb		= shmem_get_sb,
2150	.kill_sb	= kill_litter_super,
2151};
2152static struct vfsmount *shm_mnt;
2153
2154static int __init init_tmpfs(void)
2155{
2156	int error;
2157
2158	error = init_inodecache();
2159	if (error)
2160		goto out3;
2161
2162	error = register_filesystem(&tmpfs_fs_type);
2163	if (error) {
2164		printk(KERN_ERR "Could not register tmpfs\n");
2165		goto out2;
2166	}
2167#ifdef CONFIG_TMPFS
2168	devfs_mk_dir("shm");
2169#endif
2170	shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER,
2171				tmpfs_fs_type.name, NULL);
2172	if (IS_ERR(shm_mnt)) {
2173		error = PTR_ERR(shm_mnt);
2174		printk(KERN_ERR "Could not kern_mount tmpfs\n");
2175		goto out1;
2176	}
2177	return 0;
2178
2179out1:
2180	unregister_filesystem(&tmpfs_fs_type);
2181out2:
2182	destroy_inodecache();
2183out3:
2184	shm_mnt = ERR_PTR(error);
2185	return error;
2186}
2187module_init(init_tmpfs)
2188
2189/*
2190 * shmem_file_setup - get an unlinked file living in tmpfs
2191 *
2192 * @name: name for dentry (to be seen in /proc/<pid>/maps
2193 * @size: size to be set for the file
2194 *
2195 */
2196struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
2197{
2198	int error;
2199	struct file *file;
2200	struct inode *inode;
2201	struct dentry *dentry, *root;
2202	struct qstr this;
2203
2204	if (IS_ERR(shm_mnt))
2205		return (void *)shm_mnt;
2206
2207	if (size < 0 || size > SHMEM_MAX_BYTES)
2208		return ERR_PTR(-EINVAL);
2209
2210	if (shmem_acct_size(flags, size))
2211		return ERR_PTR(-ENOMEM);
2212
2213	error = -ENOMEM;
2214	this.name = name;
2215	this.len = strlen(name);
2216	this.hash = 0; /* will go */
2217	root = shm_mnt->mnt_root;
2218	dentry = d_alloc(root, &this);
2219	if (!dentry)
2220		goto put_memory;
2221
2222	error = -ENFILE;
2223	file = get_empty_filp();
2224	if (!file)
2225		goto put_dentry;
2226
2227	error = -ENOSPC;
2228	inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
2229	if (!inode)
2230		goto close_file;
2231
2232	SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
2233	d_instantiate(dentry, inode);
2234	inode->i_size = size;
2235	inode->i_nlink = 0;	/* It is unlinked */
2236	file->f_vfsmnt = mntget(shm_mnt);
2237	file->f_dentry = dentry;
2238	file->f_mapping = inode->i_mapping;
2239	file->f_op = &shmem_file_operations;
2240	file->f_mode = FMODE_WRITE | FMODE_READ;
2241	return file;
2242
2243close_file:
2244	put_filp(file);
2245put_dentry:
2246	dput(dentry);
2247put_memory:
2248	shmem_unacct_size(flags, size);
2249	return ERR_PTR(error);
2250}
2251
2252/*
2253 * shmem_zero_setup - setup a shared anonymous mapping
2254 *
2255 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2256 */
2257int shmem_zero_setup(struct vm_area_struct *vma)
2258{
2259	struct file *file;
2260	loff_t size = vma->vm_end - vma->vm_start;
2261
2262	file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2263	if (IS_ERR(file))
2264		return PTR_ERR(file);
2265
2266	if (vma->vm_file)
2267		fput(vma->vm_file);
2268	vma->vm_file = file;
2269	vma->vm_ops = &shmem_vm_ops;
2270	return 0;
2271}
2272