shmem.c revision 46f65ec15c6878a2b4a49f6e01b20b201b46a9e4
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2005 Hugh Dickins. 10 * Copyright (C) 2002-2005 VERITAS Software Corporation. 11 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * 13 * Extended attribute support for tmpfs: 14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 16 * 17 * tiny-shmem: 18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 19 * 20 * This file is released under the GPL. 21 */ 22 23#include <linux/fs.h> 24#include <linux/init.h> 25#include <linux/vfs.h> 26#include <linux/mount.h> 27#include <linux/pagemap.h> 28#include <linux/file.h> 29#include <linux/mm.h> 30#include <linux/module.h> 31#include <linux/swap.h> 32 33static struct vfsmount *shm_mnt; 34 35#ifdef CONFIG_SHMEM 36/* 37 * This virtual memory filesystem is heavily based on the ramfs. It 38 * extends ramfs by the ability to use swap and honor resource limits 39 * which makes it a completely usable filesystem. 40 */ 41 42#include <linux/xattr.h> 43#include <linux/exportfs.h> 44#include <linux/posix_acl.h> 45#include <linux/generic_acl.h> 46#include <linux/mman.h> 47#include <linux/string.h> 48#include <linux/slab.h> 49#include <linux/backing-dev.h> 50#include <linux/shmem_fs.h> 51#include <linux/writeback.h> 52#include <linux/blkdev.h> 53#include <linux/pagevec.h> 54#include <linux/percpu_counter.h> 55#include <linux/splice.h> 56#include <linux/security.h> 57#include <linux/swapops.h> 58#include <linux/mempolicy.h> 59#include <linux/namei.h> 60#include <linux/ctype.h> 61#include <linux/migrate.h> 62#include <linux/highmem.h> 63#include <linux/seq_file.h> 64#include <linux/magic.h> 65 66#include <asm/uaccess.h> 67#include <asm/pgtable.h> 68 69#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 70#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 71 72/* Pretend that each entry is of this size in directory's i_size */ 73#define BOGO_DIRENT_SIZE 20 74 75struct shmem_xattr { 76 struct list_head list; /* anchored by shmem_inode_info->xattr_list */ 77 char *name; /* xattr name */ 78 size_t size; 79 char value[0]; 80}; 81 82/* Flag allocation requirements to shmem_getpage */ 83enum sgp_type { 84 SGP_READ, /* don't exceed i_size, don't allocate page */ 85 SGP_CACHE, /* don't exceed i_size, may allocate page */ 86 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 87 SGP_WRITE, /* may exceed i_size, may allocate page */ 88}; 89 90#ifdef CONFIG_TMPFS 91static unsigned long shmem_default_max_blocks(void) 92{ 93 return totalram_pages / 2; 94} 95 96static unsigned long shmem_default_max_inodes(void) 97{ 98 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 99} 100#endif 101 102static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 103 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 104 105static inline int shmem_getpage(struct inode *inode, pgoff_t index, 106 struct page **pagep, enum sgp_type sgp, int *fault_type) 107{ 108 return shmem_getpage_gfp(inode, index, pagep, sgp, 109 mapping_gfp_mask(inode->i_mapping), fault_type); 110} 111 112static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 113{ 114 return sb->s_fs_info; 115} 116 117/* 118 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 119 * for shared memory and for shared anonymous (/dev/zero) mappings 120 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 121 * consistent with the pre-accounting of private mappings ... 122 */ 123static inline int shmem_acct_size(unsigned long flags, loff_t size) 124{ 125 return (flags & VM_NORESERVE) ? 126 0 : security_vm_enough_memory_kern(VM_ACCT(size)); 127} 128 129static inline void shmem_unacct_size(unsigned long flags, loff_t size) 130{ 131 if (!(flags & VM_NORESERVE)) 132 vm_unacct_memory(VM_ACCT(size)); 133} 134 135/* 136 * ... whereas tmpfs objects are accounted incrementally as 137 * pages are allocated, in order to allow huge sparse files. 138 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 139 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 140 */ 141static inline int shmem_acct_block(unsigned long flags) 142{ 143 return (flags & VM_NORESERVE) ? 144 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0; 145} 146 147static inline void shmem_unacct_blocks(unsigned long flags, long pages) 148{ 149 if (flags & VM_NORESERVE) 150 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 151} 152 153static const struct super_operations shmem_ops; 154static const struct address_space_operations shmem_aops; 155static const struct file_operations shmem_file_operations; 156static const struct inode_operations shmem_inode_operations; 157static const struct inode_operations shmem_dir_inode_operations; 158static const struct inode_operations shmem_special_inode_operations; 159static const struct vm_operations_struct shmem_vm_ops; 160 161static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 162 .ra_pages = 0, /* No readahead */ 163 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 164}; 165 166static LIST_HEAD(shmem_swaplist); 167static DEFINE_MUTEX(shmem_swaplist_mutex); 168 169static void shmem_free_blocks(struct inode *inode, long pages) 170{ 171 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 172 if (sbinfo->max_blocks) { 173 percpu_counter_add(&sbinfo->used_blocks, -pages); 174 inode->i_blocks -= pages*BLOCKS_PER_PAGE; 175 } 176} 177 178static int shmem_reserve_inode(struct super_block *sb) 179{ 180 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 181 if (sbinfo->max_inodes) { 182 spin_lock(&sbinfo->stat_lock); 183 if (!sbinfo->free_inodes) { 184 spin_unlock(&sbinfo->stat_lock); 185 return -ENOSPC; 186 } 187 sbinfo->free_inodes--; 188 spin_unlock(&sbinfo->stat_lock); 189 } 190 return 0; 191} 192 193static void shmem_free_inode(struct super_block *sb) 194{ 195 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 196 if (sbinfo->max_inodes) { 197 spin_lock(&sbinfo->stat_lock); 198 sbinfo->free_inodes++; 199 spin_unlock(&sbinfo->stat_lock); 200 } 201} 202 203/** 204 * shmem_recalc_inode - recalculate the block usage of an inode 205 * @inode: inode to recalc 206 * 207 * We have to calculate the free blocks since the mm can drop 208 * undirtied hole pages behind our back. 209 * 210 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 211 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 212 * 213 * It has to be called with the spinlock held. 214 */ 215static void shmem_recalc_inode(struct inode *inode) 216{ 217 struct shmem_inode_info *info = SHMEM_I(inode); 218 long freed; 219 220 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 221 if (freed > 0) { 222 info->alloced -= freed; 223 shmem_unacct_blocks(info->flags, freed); 224 shmem_free_blocks(inode, freed); 225 } 226} 227 228static void shmem_put_swap(struct shmem_inode_info *info, pgoff_t index, 229 swp_entry_t swap) 230{ 231 if (index < SHMEM_NR_DIRECT) 232 info->i_direct[index] = swap; 233} 234 235static swp_entry_t shmem_get_swap(struct shmem_inode_info *info, pgoff_t index) 236{ 237 return (index < SHMEM_NR_DIRECT) ? 238 info->i_direct[index] : (swp_entry_t){0}; 239} 240 241/* 242 * Replace item expected in radix tree by a new item, while holding tree lock. 243 */ 244static int shmem_radix_tree_replace(struct address_space *mapping, 245 pgoff_t index, void *expected, void *replacement) 246{ 247 void **pslot; 248 void *item = NULL; 249 250 VM_BUG_ON(!expected); 251 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 252 if (pslot) 253 item = radix_tree_deref_slot_protected(pslot, 254 &mapping->tree_lock); 255 if (item != expected) 256 return -ENOENT; 257 if (replacement) 258 radix_tree_replace_slot(pslot, replacement); 259 else 260 radix_tree_delete(&mapping->page_tree, index); 261 return 0; 262} 263 264/* 265 * Like add_to_page_cache_locked, but error if expected item has gone. 266 */ 267static int shmem_add_to_page_cache(struct page *page, 268 struct address_space *mapping, 269 pgoff_t index, gfp_t gfp, void *expected) 270{ 271 int error; 272 273 VM_BUG_ON(!PageLocked(page)); 274 VM_BUG_ON(!PageSwapBacked(page)); 275 276 error = mem_cgroup_cache_charge(page, current->mm, 277 gfp & GFP_RECLAIM_MASK); 278 if (error) 279 goto out; 280 if (!expected) 281 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); 282 if (!error) { 283 page_cache_get(page); 284 page->mapping = mapping; 285 page->index = index; 286 287 spin_lock_irq(&mapping->tree_lock); 288 if (!expected) 289 error = radix_tree_insert(&mapping->page_tree, 290 index, page); 291 else 292 error = shmem_radix_tree_replace(mapping, index, 293 expected, page); 294 if (!error) { 295 mapping->nrpages++; 296 __inc_zone_page_state(page, NR_FILE_PAGES); 297 __inc_zone_page_state(page, NR_SHMEM); 298 spin_unlock_irq(&mapping->tree_lock); 299 } else { 300 page->mapping = NULL; 301 spin_unlock_irq(&mapping->tree_lock); 302 page_cache_release(page); 303 } 304 if (!expected) 305 radix_tree_preload_end(); 306 } 307 if (error) 308 mem_cgroup_uncharge_cache_page(page); 309out: 310 return error; 311} 312 313/* 314 * Like find_get_pages, but collecting swap entries as well as pages. 315 */ 316static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, 317 pgoff_t start, unsigned int nr_pages, 318 struct page **pages, pgoff_t *indices) 319{ 320 unsigned int i; 321 unsigned int ret; 322 unsigned int nr_found; 323 324 rcu_read_lock(); 325restart: 326 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 327 (void ***)pages, indices, start, nr_pages); 328 ret = 0; 329 for (i = 0; i < nr_found; i++) { 330 struct page *page; 331repeat: 332 page = radix_tree_deref_slot((void **)pages[i]); 333 if (unlikely(!page)) 334 continue; 335 if (radix_tree_exception(page)) { 336 if (radix_tree_exceptional_entry(page)) 337 goto export; 338 /* radix_tree_deref_retry(page) */ 339 goto restart; 340 } 341 if (!page_cache_get_speculative(page)) 342 goto repeat; 343 344 /* Has the page moved? */ 345 if (unlikely(page != *((void **)pages[i]))) { 346 page_cache_release(page); 347 goto repeat; 348 } 349export: 350 indices[ret] = indices[i]; 351 pages[ret] = page; 352 ret++; 353 } 354 if (unlikely(!ret && nr_found)) 355 goto restart; 356 rcu_read_unlock(); 357 return ret; 358} 359 360/* 361 * Lockless lookup of swap entry in radix tree, avoiding refcount on pages. 362 */ 363static pgoff_t shmem_find_swap(struct address_space *mapping, void *radswap) 364{ 365 void **slots[PAGEVEC_SIZE]; 366 pgoff_t indices[PAGEVEC_SIZE]; 367 unsigned int nr_found; 368 369restart: 370 nr_found = 1; 371 indices[0] = -1; 372 while (nr_found) { 373 pgoff_t index = indices[nr_found - 1] + 1; 374 unsigned int i; 375 376 rcu_read_lock(); 377 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 378 slots, indices, index, PAGEVEC_SIZE); 379 for (i = 0; i < nr_found; i++) { 380 void *item = radix_tree_deref_slot(slots[i]); 381 if (radix_tree_deref_retry(item)) { 382 rcu_read_unlock(); 383 goto restart; 384 } 385 if (item == radswap) { 386 rcu_read_unlock(); 387 return indices[i]; 388 } 389 } 390 rcu_read_unlock(); 391 cond_resched(); 392 } 393 return -1; 394} 395 396/* 397 * Remove swap entry from radix tree, free the swap and its page cache. 398 */ 399static int shmem_free_swap(struct address_space *mapping, 400 pgoff_t index, void *radswap) 401{ 402 int error; 403 404 spin_lock_irq(&mapping->tree_lock); 405 error = shmem_radix_tree_replace(mapping, index, radswap, NULL); 406 spin_unlock_irq(&mapping->tree_lock); 407 if (!error) 408 free_swap_and_cache(radix_to_swp_entry(radswap)); 409 return error; 410} 411 412/* 413 * Pagevec may contain swap entries, so shuffle up pages before releasing. 414 */ 415static void shmem_pagevec_release(struct pagevec *pvec) 416{ 417 int i, j; 418 419 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 420 struct page *page = pvec->pages[i]; 421 if (!radix_tree_exceptional_entry(page)) 422 pvec->pages[j++] = page; 423 } 424 pvec->nr = j; 425 pagevec_release(pvec); 426} 427 428/* 429 * Remove range of pages and swap entries from radix tree, and free them. 430 */ 431void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 432{ 433 struct address_space *mapping = inode->i_mapping; 434 struct shmem_inode_info *info = SHMEM_I(inode); 435 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 436 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 437 pgoff_t end = (lend >> PAGE_CACHE_SHIFT); 438 struct pagevec pvec; 439 pgoff_t indices[PAGEVEC_SIZE]; 440 long nr_swaps_freed = 0; 441 pgoff_t index; 442 int i; 443 444 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 445 446 pagevec_init(&pvec, 0); 447 index = start; 448 while (index <= end) { 449 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 450 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, 451 pvec.pages, indices); 452 if (!pvec.nr) 453 break; 454 mem_cgroup_uncharge_start(); 455 for (i = 0; i < pagevec_count(&pvec); i++) { 456 struct page *page = pvec.pages[i]; 457 458 index = indices[i]; 459 if (index > end) 460 break; 461 462 if (radix_tree_exceptional_entry(page)) { 463 nr_swaps_freed += !shmem_free_swap(mapping, 464 index, page); 465 continue; 466 } 467 468 if (!trylock_page(page)) 469 continue; 470 if (page->mapping == mapping) { 471 VM_BUG_ON(PageWriteback(page)); 472 truncate_inode_page(mapping, page); 473 } 474 unlock_page(page); 475 } 476 shmem_pagevec_release(&pvec); 477 mem_cgroup_uncharge_end(); 478 cond_resched(); 479 index++; 480 } 481 482 if (partial) { 483 struct page *page = NULL; 484 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 485 if (page) { 486 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 487 set_page_dirty(page); 488 unlock_page(page); 489 page_cache_release(page); 490 } 491 } 492 493 index = start; 494 for ( ; ; ) { 495 cond_resched(); 496 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 497 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, 498 pvec.pages, indices); 499 if (!pvec.nr) { 500 if (index == start) 501 break; 502 index = start; 503 continue; 504 } 505 if (index == start && indices[0] > end) { 506 shmem_pagevec_release(&pvec); 507 break; 508 } 509 mem_cgroup_uncharge_start(); 510 for (i = 0; i < pagevec_count(&pvec); i++) { 511 struct page *page = pvec.pages[i]; 512 513 index = indices[i]; 514 if (index > end) 515 break; 516 517 if (radix_tree_exceptional_entry(page)) { 518 nr_swaps_freed += !shmem_free_swap(mapping, 519 index, page); 520 continue; 521 } 522 523 lock_page(page); 524 if (page->mapping == mapping) { 525 VM_BUG_ON(PageWriteback(page)); 526 truncate_inode_page(mapping, page); 527 } 528 unlock_page(page); 529 } 530 shmem_pagevec_release(&pvec); 531 mem_cgroup_uncharge_end(); 532 index++; 533 } 534 535 spin_lock(&info->lock); 536 info->swapped -= nr_swaps_freed; 537 shmem_recalc_inode(inode); 538 spin_unlock(&info->lock); 539 540 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 541} 542EXPORT_SYMBOL_GPL(shmem_truncate_range); 543 544static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 545{ 546 struct inode *inode = dentry->d_inode; 547 int error; 548 549 error = inode_change_ok(inode, attr); 550 if (error) 551 return error; 552 553 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 554 loff_t oldsize = inode->i_size; 555 loff_t newsize = attr->ia_size; 556 557 if (newsize != oldsize) { 558 i_size_write(inode, newsize); 559 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 560 } 561 if (newsize < oldsize) { 562 loff_t holebegin = round_up(newsize, PAGE_SIZE); 563 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 564 shmem_truncate_range(inode, newsize, (loff_t)-1); 565 /* unmap again to remove racily COWed private pages */ 566 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 567 } 568 } 569 570 setattr_copy(inode, attr); 571#ifdef CONFIG_TMPFS_POSIX_ACL 572 if (attr->ia_valid & ATTR_MODE) 573 error = generic_acl_chmod(inode); 574#endif 575 return error; 576} 577 578static void shmem_evict_inode(struct inode *inode) 579{ 580 struct shmem_inode_info *info = SHMEM_I(inode); 581 struct shmem_xattr *xattr, *nxattr; 582 583 if (inode->i_mapping->a_ops == &shmem_aops) { 584 shmem_unacct_size(info->flags, inode->i_size); 585 inode->i_size = 0; 586 shmem_truncate_range(inode, 0, (loff_t)-1); 587 if (!list_empty(&info->swaplist)) { 588 mutex_lock(&shmem_swaplist_mutex); 589 list_del_init(&info->swaplist); 590 mutex_unlock(&shmem_swaplist_mutex); 591 } 592 } 593 594 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) { 595 kfree(xattr->name); 596 kfree(xattr); 597 } 598 BUG_ON(inode->i_blocks); 599 shmem_free_inode(inode->i_sb); 600 end_writeback(inode); 601} 602 603/* 604 * If swap found in inode, free it and move page from swapcache to filecache. 605 */ 606static int shmem_unuse_inode(struct shmem_inode_info *info, 607 swp_entry_t swap, struct page *page) 608{ 609 struct address_space *mapping = info->vfs_inode.i_mapping; 610 void *radswap; 611 pgoff_t index; 612 int error; 613 614 radswap = swp_to_radix_entry(swap); 615 index = shmem_find_swap(mapping, radswap); 616 if (index == -1) 617 return 0; 618 619 /* 620 * Move _head_ to start search for next from here. 621 * But be careful: shmem_evict_inode checks list_empty without taking 622 * mutex, and there's an instant in list_move_tail when info->swaplist 623 * would appear empty, if it were the only one on shmem_swaplist. 624 */ 625 if (shmem_swaplist.next != &info->swaplist) 626 list_move_tail(&shmem_swaplist, &info->swaplist); 627 628 /* 629 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 630 * but also to hold up shmem_evict_inode(): so inode cannot be freed 631 * beneath us (pagelock doesn't help until the page is in pagecache). 632 */ 633 error = shmem_add_to_page_cache(page, mapping, index, 634 GFP_NOWAIT, radswap); 635 /* which does mem_cgroup_uncharge_cache_page on error */ 636 637 if (error != -ENOMEM) { 638 /* 639 * Truncation and eviction use free_swap_and_cache(), which 640 * only does trylock page: if we raced, best clean up here. 641 */ 642 delete_from_swap_cache(page); 643 set_page_dirty(page); 644 if (!error) { 645 spin_lock(&info->lock); 646 info->swapped--; 647 spin_unlock(&info->lock); 648 swap_free(swap); 649 } 650 error = 1; /* not an error, but entry was found */ 651 } 652 return error; 653} 654 655/* 656 * Search through swapped inodes to find and replace swap by page. 657 */ 658int shmem_unuse(swp_entry_t swap, struct page *page) 659{ 660 struct list_head *this, *next; 661 struct shmem_inode_info *info; 662 int found = 0; 663 int error; 664 665 /* 666 * Charge page using GFP_KERNEL while we can wait, before taking 667 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 668 * Charged back to the user (not to caller) when swap account is used. 669 * shmem_add_to_page_cache() will be called with GFP_NOWAIT. 670 */ 671 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 672 if (error) 673 goto out; 674 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 675 676 mutex_lock(&shmem_swaplist_mutex); 677 list_for_each_safe(this, next, &shmem_swaplist) { 678 info = list_entry(this, struct shmem_inode_info, swaplist); 679 if (!info->swapped) { 680 spin_lock(&info->lock); 681 if (!info->swapped) 682 list_del_init(&info->swaplist); 683 spin_unlock(&info->lock); 684 } 685 if (info->swapped) 686 found = shmem_unuse_inode(info, swap, page); 687 cond_resched(); 688 if (found) 689 break; 690 } 691 mutex_unlock(&shmem_swaplist_mutex); 692 693 if (!found) 694 mem_cgroup_uncharge_cache_page(page); 695 if (found < 0) 696 error = found; 697out: 698 unlock_page(page); 699 page_cache_release(page); 700 return error; 701} 702 703/* 704 * Move the page from the page cache to the swap cache. 705 */ 706static int shmem_writepage(struct page *page, struct writeback_control *wbc) 707{ 708 struct shmem_inode_info *info; 709 swp_entry_t swap, oswap; 710 struct address_space *mapping; 711 pgoff_t index; 712 struct inode *inode; 713 714 BUG_ON(!PageLocked(page)); 715 mapping = page->mapping; 716 index = page->index; 717 inode = mapping->host; 718 info = SHMEM_I(inode); 719 if (info->flags & VM_LOCKED) 720 goto redirty; 721 if (!total_swap_pages) 722 goto redirty; 723 724 /* 725 * shmem_backing_dev_info's capabilities prevent regular writeback or 726 * sync from ever calling shmem_writepage; but a stacking filesystem 727 * might use ->writepage of its underlying filesystem, in which case 728 * tmpfs should write out to swap only in response to memory pressure, 729 * and not for the writeback threads or sync. 730 */ 731 if (!wbc->for_reclaim) { 732 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 733 goto redirty; 734 } 735 736 /* 737 * Disable even the toy swapping implementation, while we convert 738 * functions one by one to having swap entries in the radix tree. 739 */ 740 if (index < ULONG_MAX) 741 goto redirty; 742 743 swap = get_swap_page(); 744 if (!swap.val) 745 goto redirty; 746 747 /* 748 * Add inode to shmem_unuse()'s list of swapped-out inodes, 749 * if it's not already there. Do it now because we cannot take 750 * mutex while holding spinlock, and must do so before the page 751 * is moved to swap cache, when its pagelock no longer protects 752 * the inode from eviction. But don't unlock the mutex until 753 * we've taken the spinlock, because shmem_unuse_inode() will 754 * prune a !swapped inode from the swaplist under both locks. 755 */ 756 mutex_lock(&shmem_swaplist_mutex); 757 if (list_empty(&info->swaplist)) 758 list_add_tail(&info->swaplist, &shmem_swaplist); 759 760 spin_lock(&info->lock); 761 mutex_unlock(&shmem_swaplist_mutex); 762 763 oswap = shmem_get_swap(info, index); 764 if (oswap.val) { 765 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 766 free_swap_and_cache(oswap); 767 shmem_put_swap(info, index, (swp_entry_t){0}); 768 info->swapped--; 769 } 770 shmem_recalc_inode(inode); 771 772 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 773 delete_from_page_cache(page); 774 shmem_put_swap(info, index, swap); 775 info->swapped++; 776 swap_shmem_alloc(swap); 777 spin_unlock(&info->lock); 778 BUG_ON(page_mapped(page)); 779 swap_writepage(page, wbc); 780 return 0; 781 } 782 783 spin_unlock(&info->lock); 784 swapcache_free(swap, NULL); 785redirty: 786 set_page_dirty(page); 787 if (wbc->for_reclaim) 788 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 789 unlock_page(page); 790 return 0; 791} 792 793#ifdef CONFIG_NUMA 794#ifdef CONFIG_TMPFS 795static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 796{ 797 char buffer[64]; 798 799 if (!mpol || mpol->mode == MPOL_DEFAULT) 800 return; /* show nothing */ 801 802 mpol_to_str(buffer, sizeof(buffer), mpol, 1); 803 804 seq_printf(seq, ",mpol=%s", buffer); 805} 806 807static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 808{ 809 struct mempolicy *mpol = NULL; 810 if (sbinfo->mpol) { 811 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 812 mpol = sbinfo->mpol; 813 mpol_get(mpol); 814 spin_unlock(&sbinfo->stat_lock); 815 } 816 return mpol; 817} 818#endif /* CONFIG_TMPFS */ 819 820static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 821 struct shmem_inode_info *info, pgoff_t index) 822{ 823 struct mempolicy mpol, *spol; 824 struct vm_area_struct pvma; 825 826 spol = mpol_cond_copy(&mpol, 827 mpol_shared_policy_lookup(&info->policy, index)); 828 829 /* Create a pseudo vma that just contains the policy */ 830 pvma.vm_start = 0; 831 pvma.vm_pgoff = index; 832 pvma.vm_ops = NULL; 833 pvma.vm_policy = spol; 834 return swapin_readahead(swap, gfp, &pvma, 0); 835} 836 837static struct page *shmem_alloc_page(gfp_t gfp, 838 struct shmem_inode_info *info, pgoff_t index) 839{ 840 struct vm_area_struct pvma; 841 842 /* Create a pseudo vma that just contains the policy */ 843 pvma.vm_start = 0; 844 pvma.vm_pgoff = index; 845 pvma.vm_ops = NULL; 846 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 847 848 /* 849 * alloc_page_vma() will drop the shared policy reference 850 */ 851 return alloc_page_vma(gfp, &pvma, 0); 852} 853#else /* !CONFIG_NUMA */ 854#ifdef CONFIG_TMPFS 855static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 856{ 857} 858#endif /* CONFIG_TMPFS */ 859 860static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 861 struct shmem_inode_info *info, pgoff_t index) 862{ 863 return swapin_readahead(swap, gfp, NULL, 0); 864} 865 866static inline struct page *shmem_alloc_page(gfp_t gfp, 867 struct shmem_inode_info *info, pgoff_t index) 868{ 869 return alloc_page(gfp); 870} 871#endif /* CONFIG_NUMA */ 872 873#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 874static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 875{ 876 return NULL; 877} 878#endif 879 880/* 881 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 882 * 883 * If we allocate a new one we do not mark it dirty. That's up to the 884 * vm. If we swap it in we mark it dirty since we also free the swap 885 * entry since a page cannot live in both the swap and page cache 886 */ 887static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 888 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 889{ 890 struct address_space *mapping = inode->i_mapping; 891 struct shmem_inode_info *info = SHMEM_I(inode); 892 struct shmem_sb_info *sbinfo; 893 struct page *page; 894 struct page *prealloc_page = NULL; 895 swp_entry_t swap; 896 int error; 897 898 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 899 return -EFBIG; 900repeat: 901 page = find_lock_page(mapping, index); 902 if (page) { 903 /* 904 * Once we can get the page lock, it must be uptodate: 905 * if there were an error in reading back from swap, 906 * the page would not be inserted into the filecache. 907 */ 908 BUG_ON(!PageUptodate(page)); 909 goto done; 910 } 911 912 /* 913 * Try to preload while we can wait, to not make a habit of 914 * draining atomic reserves; but don't latch on to this cpu. 915 */ 916 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); 917 if (error) 918 goto out; 919 radix_tree_preload_end(); 920 921 if (sgp != SGP_READ && !prealloc_page) { 922 prealloc_page = shmem_alloc_page(gfp, info, index); 923 if (prealloc_page) { 924 SetPageSwapBacked(prealloc_page); 925 if (mem_cgroup_cache_charge(prealloc_page, 926 current->mm, GFP_KERNEL)) { 927 page_cache_release(prealloc_page); 928 prealloc_page = NULL; 929 } 930 } 931 } 932 933 spin_lock(&info->lock); 934 shmem_recalc_inode(inode); 935 swap = shmem_get_swap(info, index); 936 if (swap.val) { 937 /* Look it up and read it in.. */ 938 page = lookup_swap_cache(swap); 939 if (!page) { 940 spin_unlock(&info->lock); 941 /* here we actually do the io */ 942 if (fault_type) 943 *fault_type |= VM_FAULT_MAJOR; 944 page = shmem_swapin(swap, gfp, info, index); 945 if (!page) { 946 swp_entry_t nswap = shmem_get_swap(info, index); 947 if (nswap.val == swap.val) { 948 error = -ENOMEM; 949 goto out; 950 } 951 goto repeat; 952 } 953 wait_on_page_locked(page); 954 page_cache_release(page); 955 goto repeat; 956 } 957 958 /* We have to do this with page locked to prevent races */ 959 if (!trylock_page(page)) { 960 spin_unlock(&info->lock); 961 wait_on_page_locked(page); 962 page_cache_release(page); 963 goto repeat; 964 } 965 if (PageWriteback(page)) { 966 spin_unlock(&info->lock); 967 wait_on_page_writeback(page); 968 unlock_page(page); 969 page_cache_release(page); 970 goto repeat; 971 } 972 if (!PageUptodate(page)) { 973 spin_unlock(&info->lock); 974 unlock_page(page); 975 page_cache_release(page); 976 error = -EIO; 977 goto out; 978 } 979 980 error = add_to_page_cache_locked(page, mapping, 981 index, GFP_NOWAIT); 982 if (error) { 983 spin_unlock(&info->lock); 984 if (error == -ENOMEM) { 985 /* 986 * reclaim from proper memory cgroup and 987 * call memcg's OOM if needed. 988 */ 989 error = mem_cgroup_shmem_charge_fallback( 990 page, current->mm, gfp); 991 if (error) { 992 unlock_page(page); 993 page_cache_release(page); 994 goto out; 995 } 996 } 997 unlock_page(page); 998 page_cache_release(page); 999 goto repeat; 1000 } 1001 1002 delete_from_swap_cache(page); 1003 shmem_put_swap(info, index, (swp_entry_t){0}); 1004 info->swapped--; 1005 spin_unlock(&info->lock); 1006 set_page_dirty(page); 1007 swap_free(swap); 1008 1009 } else if (sgp == SGP_READ) { 1010 page = find_get_page(mapping, index); 1011 if (page && !trylock_page(page)) { 1012 spin_unlock(&info->lock); 1013 wait_on_page_locked(page); 1014 page_cache_release(page); 1015 goto repeat; 1016 } 1017 spin_unlock(&info->lock); 1018 1019 } else if (prealloc_page) { 1020 sbinfo = SHMEM_SB(inode->i_sb); 1021 if (sbinfo->max_blocks) { 1022 if (percpu_counter_compare(&sbinfo->used_blocks, 1023 sbinfo->max_blocks) >= 0 || 1024 shmem_acct_block(info->flags)) 1025 goto nospace; 1026 percpu_counter_inc(&sbinfo->used_blocks); 1027 inode->i_blocks += BLOCKS_PER_PAGE; 1028 } else if (shmem_acct_block(info->flags)) 1029 goto nospace; 1030 1031 page = prealloc_page; 1032 prealloc_page = NULL; 1033 1034 swap = shmem_get_swap(info, index); 1035 if (swap.val) 1036 mem_cgroup_uncharge_cache_page(page); 1037 else 1038 error = add_to_page_cache_lru(page, mapping, 1039 index, GFP_NOWAIT); 1040 /* 1041 * At add_to_page_cache_lru() failure, 1042 * uncharge will be done automatically. 1043 */ 1044 if (swap.val || error) { 1045 shmem_unacct_blocks(info->flags, 1); 1046 shmem_free_blocks(inode, 1); 1047 spin_unlock(&info->lock); 1048 page_cache_release(page); 1049 goto repeat; 1050 } 1051 1052 info->alloced++; 1053 spin_unlock(&info->lock); 1054 clear_highpage(page); 1055 flush_dcache_page(page); 1056 SetPageUptodate(page); 1057 if (sgp == SGP_DIRTY) 1058 set_page_dirty(page); 1059 1060 } else { 1061 spin_unlock(&info->lock); 1062 error = -ENOMEM; 1063 goto out; 1064 } 1065done: 1066 *pagep = page; 1067 error = 0; 1068out: 1069 if (prealloc_page) { 1070 mem_cgroup_uncharge_cache_page(prealloc_page); 1071 page_cache_release(prealloc_page); 1072 } 1073 return error; 1074 1075nospace: 1076 /* 1077 * Perhaps the page was brought in from swap between find_lock_page 1078 * and taking info->lock? We allow for that at add_to_page_cache_lru, 1079 * but must also avoid reporting a spurious ENOSPC while working on a 1080 * full tmpfs. 1081 */ 1082 page = find_get_page(mapping, index); 1083 spin_unlock(&info->lock); 1084 if (page) { 1085 page_cache_release(page); 1086 goto repeat; 1087 } 1088 error = -ENOSPC; 1089 goto out; 1090} 1091 1092static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1093{ 1094 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1095 int error; 1096 int ret = VM_FAULT_LOCKED; 1097 1098 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 1099 return VM_FAULT_SIGBUS; 1100 1101 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1102 if (error) 1103 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1104 1105 if (ret & VM_FAULT_MAJOR) { 1106 count_vm_event(PGMAJFAULT); 1107 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1108 } 1109 return ret; 1110} 1111 1112#ifdef CONFIG_NUMA 1113static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1114{ 1115 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1116 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1117} 1118 1119static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1120 unsigned long addr) 1121{ 1122 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1123 pgoff_t index; 1124 1125 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1126 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1127} 1128#endif 1129 1130int shmem_lock(struct file *file, int lock, struct user_struct *user) 1131{ 1132 struct inode *inode = file->f_path.dentry->d_inode; 1133 struct shmem_inode_info *info = SHMEM_I(inode); 1134 int retval = -ENOMEM; 1135 1136 spin_lock(&info->lock); 1137 if (lock && !(info->flags & VM_LOCKED)) { 1138 if (!user_shm_lock(inode->i_size, user)) 1139 goto out_nomem; 1140 info->flags |= VM_LOCKED; 1141 mapping_set_unevictable(file->f_mapping); 1142 } 1143 if (!lock && (info->flags & VM_LOCKED) && user) { 1144 user_shm_unlock(inode->i_size, user); 1145 info->flags &= ~VM_LOCKED; 1146 mapping_clear_unevictable(file->f_mapping); 1147 scan_mapping_unevictable_pages(file->f_mapping); 1148 } 1149 retval = 0; 1150 1151out_nomem: 1152 spin_unlock(&info->lock); 1153 return retval; 1154} 1155 1156static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1157{ 1158 file_accessed(file); 1159 vma->vm_ops = &shmem_vm_ops; 1160 vma->vm_flags |= VM_CAN_NONLINEAR; 1161 return 0; 1162} 1163 1164static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1165 int mode, dev_t dev, unsigned long flags) 1166{ 1167 struct inode *inode; 1168 struct shmem_inode_info *info; 1169 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1170 1171 if (shmem_reserve_inode(sb)) 1172 return NULL; 1173 1174 inode = new_inode(sb); 1175 if (inode) { 1176 inode->i_ino = get_next_ino(); 1177 inode_init_owner(inode, dir, mode); 1178 inode->i_blocks = 0; 1179 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1180 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1181 inode->i_generation = get_seconds(); 1182 info = SHMEM_I(inode); 1183 memset(info, 0, (char *)inode - (char *)info); 1184 spin_lock_init(&info->lock); 1185 info->flags = flags & VM_NORESERVE; 1186 INIT_LIST_HEAD(&info->swaplist); 1187 INIT_LIST_HEAD(&info->xattr_list); 1188 cache_no_acl(inode); 1189 1190 switch (mode & S_IFMT) { 1191 default: 1192 inode->i_op = &shmem_special_inode_operations; 1193 init_special_inode(inode, mode, dev); 1194 break; 1195 case S_IFREG: 1196 inode->i_mapping->a_ops = &shmem_aops; 1197 inode->i_op = &shmem_inode_operations; 1198 inode->i_fop = &shmem_file_operations; 1199 mpol_shared_policy_init(&info->policy, 1200 shmem_get_sbmpol(sbinfo)); 1201 break; 1202 case S_IFDIR: 1203 inc_nlink(inode); 1204 /* Some things misbehave if size == 0 on a directory */ 1205 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1206 inode->i_op = &shmem_dir_inode_operations; 1207 inode->i_fop = &simple_dir_operations; 1208 break; 1209 case S_IFLNK: 1210 /* 1211 * Must not load anything in the rbtree, 1212 * mpol_free_shared_policy will not be called. 1213 */ 1214 mpol_shared_policy_init(&info->policy, NULL); 1215 break; 1216 } 1217 } else 1218 shmem_free_inode(sb); 1219 return inode; 1220} 1221 1222#ifdef CONFIG_TMPFS 1223static const struct inode_operations shmem_symlink_inode_operations; 1224static const struct inode_operations shmem_symlink_inline_operations; 1225 1226static int 1227shmem_write_begin(struct file *file, struct address_space *mapping, 1228 loff_t pos, unsigned len, unsigned flags, 1229 struct page **pagep, void **fsdata) 1230{ 1231 struct inode *inode = mapping->host; 1232 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1233 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1234} 1235 1236static int 1237shmem_write_end(struct file *file, struct address_space *mapping, 1238 loff_t pos, unsigned len, unsigned copied, 1239 struct page *page, void *fsdata) 1240{ 1241 struct inode *inode = mapping->host; 1242 1243 if (pos + copied > inode->i_size) 1244 i_size_write(inode, pos + copied); 1245 1246 set_page_dirty(page); 1247 unlock_page(page); 1248 page_cache_release(page); 1249 1250 return copied; 1251} 1252 1253static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1254{ 1255 struct inode *inode = filp->f_path.dentry->d_inode; 1256 struct address_space *mapping = inode->i_mapping; 1257 pgoff_t index; 1258 unsigned long offset; 1259 enum sgp_type sgp = SGP_READ; 1260 1261 /* 1262 * Might this read be for a stacking filesystem? Then when reading 1263 * holes of a sparse file, we actually need to allocate those pages, 1264 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1265 */ 1266 if (segment_eq(get_fs(), KERNEL_DS)) 1267 sgp = SGP_DIRTY; 1268 1269 index = *ppos >> PAGE_CACHE_SHIFT; 1270 offset = *ppos & ~PAGE_CACHE_MASK; 1271 1272 for (;;) { 1273 struct page *page = NULL; 1274 pgoff_t end_index; 1275 unsigned long nr, ret; 1276 loff_t i_size = i_size_read(inode); 1277 1278 end_index = i_size >> PAGE_CACHE_SHIFT; 1279 if (index > end_index) 1280 break; 1281 if (index == end_index) { 1282 nr = i_size & ~PAGE_CACHE_MASK; 1283 if (nr <= offset) 1284 break; 1285 } 1286 1287 desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 1288 if (desc->error) { 1289 if (desc->error == -EINVAL) 1290 desc->error = 0; 1291 break; 1292 } 1293 if (page) 1294 unlock_page(page); 1295 1296 /* 1297 * We must evaluate after, since reads (unlike writes) 1298 * are called without i_mutex protection against truncate 1299 */ 1300 nr = PAGE_CACHE_SIZE; 1301 i_size = i_size_read(inode); 1302 end_index = i_size >> PAGE_CACHE_SHIFT; 1303 if (index == end_index) { 1304 nr = i_size & ~PAGE_CACHE_MASK; 1305 if (nr <= offset) { 1306 if (page) 1307 page_cache_release(page); 1308 break; 1309 } 1310 } 1311 nr -= offset; 1312 1313 if (page) { 1314 /* 1315 * If users can be writing to this page using arbitrary 1316 * virtual addresses, take care about potential aliasing 1317 * before reading the page on the kernel side. 1318 */ 1319 if (mapping_writably_mapped(mapping)) 1320 flush_dcache_page(page); 1321 /* 1322 * Mark the page accessed if we read the beginning. 1323 */ 1324 if (!offset) 1325 mark_page_accessed(page); 1326 } else { 1327 page = ZERO_PAGE(0); 1328 page_cache_get(page); 1329 } 1330 1331 /* 1332 * Ok, we have the page, and it's up-to-date, so 1333 * now we can copy it to user space... 1334 * 1335 * The actor routine returns how many bytes were actually used.. 1336 * NOTE! This may not be the same as how much of a user buffer 1337 * we filled up (we may be padding etc), so we can only update 1338 * "pos" here (the actor routine has to update the user buffer 1339 * pointers and the remaining count). 1340 */ 1341 ret = actor(desc, page, offset, nr); 1342 offset += ret; 1343 index += offset >> PAGE_CACHE_SHIFT; 1344 offset &= ~PAGE_CACHE_MASK; 1345 1346 page_cache_release(page); 1347 if (ret != nr || !desc->count) 1348 break; 1349 1350 cond_resched(); 1351 } 1352 1353 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1354 file_accessed(filp); 1355} 1356 1357static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1358 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1359{ 1360 struct file *filp = iocb->ki_filp; 1361 ssize_t retval; 1362 unsigned long seg; 1363 size_t count; 1364 loff_t *ppos = &iocb->ki_pos; 1365 1366 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1367 if (retval) 1368 return retval; 1369 1370 for (seg = 0; seg < nr_segs; seg++) { 1371 read_descriptor_t desc; 1372 1373 desc.written = 0; 1374 desc.arg.buf = iov[seg].iov_base; 1375 desc.count = iov[seg].iov_len; 1376 if (desc.count == 0) 1377 continue; 1378 desc.error = 0; 1379 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1380 retval += desc.written; 1381 if (desc.error) { 1382 retval = retval ?: desc.error; 1383 break; 1384 } 1385 if (desc.count > 0) 1386 break; 1387 } 1388 return retval; 1389} 1390 1391static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1392 struct pipe_inode_info *pipe, size_t len, 1393 unsigned int flags) 1394{ 1395 struct address_space *mapping = in->f_mapping; 1396 struct inode *inode = mapping->host; 1397 unsigned int loff, nr_pages, req_pages; 1398 struct page *pages[PIPE_DEF_BUFFERS]; 1399 struct partial_page partial[PIPE_DEF_BUFFERS]; 1400 struct page *page; 1401 pgoff_t index, end_index; 1402 loff_t isize, left; 1403 int error, page_nr; 1404 struct splice_pipe_desc spd = { 1405 .pages = pages, 1406 .partial = partial, 1407 .flags = flags, 1408 .ops = &page_cache_pipe_buf_ops, 1409 .spd_release = spd_release_page, 1410 }; 1411 1412 isize = i_size_read(inode); 1413 if (unlikely(*ppos >= isize)) 1414 return 0; 1415 1416 left = isize - *ppos; 1417 if (unlikely(left < len)) 1418 len = left; 1419 1420 if (splice_grow_spd(pipe, &spd)) 1421 return -ENOMEM; 1422 1423 index = *ppos >> PAGE_CACHE_SHIFT; 1424 loff = *ppos & ~PAGE_CACHE_MASK; 1425 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1426 nr_pages = min(req_pages, pipe->buffers); 1427 1428 spd.nr_pages = find_get_pages_contig(mapping, index, 1429 nr_pages, spd.pages); 1430 index += spd.nr_pages; 1431 error = 0; 1432 1433 while (spd.nr_pages < nr_pages) { 1434 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1435 if (error) 1436 break; 1437 unlock_page(page); 1438 spd.pages[spd.nr_pages++] = page; 1439 index++; 1440 } 1441 1442 index = *ppos >> PAGE_CACHE_SHIFT; 1443 nr_pages = spd.nr_pages; 1444 spd.nr_pages = 0; 1445 1446 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1447 unsigned int this_len; 1448 1449 if (!len) 1450 break; 1451 1452 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1453 page = spd.pages[page_nr]; 1454 1455 if (!PageUptodate(page) || page->mapping != mapping) { 1456 error = shmem_getpage(inode, index, &page, 1457 SGP_CACHE, NULL); 1458 if (error) 1459 break; 1460 unlock_page(page); 1461 page_cache_release(spd.pages[page_nr]); 1462 spd.pages[page_nr] = page; 1463 } 1464 1465 isize = i_size_read(inode); 1466 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1467 if (unlikely(!isize || index > end_index)) 1468 break; 1469 1470 if (end_index == index) { 1471 unsigned int plen; 1472 1473 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1474 if (plen <= loff) 1475 break; 1476 1477 this_len = min(this_len, plen - loff); 1478 len = this_len; 1479 } 1480 1481 spd.partial[page_nr].offset = loff; 1482 spd.partial[page_nr].len = this_len; 1483 len -= this_len; 1484 loff = 0; 1485 spd.nr_pages++; 1486 index++; 1487 } 1488 1489 while (page_nr < nr_pages) 1490 page_cache_release(spd.pages[page_nr++]); 1491 1492 if (spd.nr_pages) 1493 error = splice_to_pipe(pipe, &spd); 1494 1495 splice_shrink_spd(pipe, &spd); 1496 1497 if (error > 0) { 1498 *ppos += error; 1499 file_accessed(in); 1500 } 1501 return error; 1502} 1503 1504static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1505{ 1506 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1507 1508 buf->f_type = TMPFS_MAGIC; 1509 buf->f_bsize = PAGE_CACHE_SIZE; 1510 buf->f_namelen = NAME_MAX; 1511 if (sbinfo->max_blocks) { 1512 buf->f_blocks = sbinfo->max_blocks; 1513 buf->f_bavail = 1514 buf->f_bfree = sbinfo->max_blocks - 1515 percpu_counter_sum(&sbinfo->used_blocks); 1516 } 1517 if (sbinfo->max_inodes) { 1518 buf->f_files = sbinfo->max_inodes; 1519 buf->f_ffree = sbinfo->free_inodes; 1520 } 1521 /* else leave those fields 0 like simple_statfs */ 1522 return 0; 1523} 1524 1525/* 1526 * File creation. Allocate an inode, and we're done.. 1527 */ 1528static int 1529shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1530{ 1531 struct inode *inode; 1532 int error = -ENOSPC; 1533 1534 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1535 if (inode) { 1536 error = security_inode_init_security(inode, dir, 1537 &dentry->d_name, NULL, 1538 NULL, NULL); 1539 if (error) { 1540 if (error != -EOPNOTSUPP) { 1541 iput(inode); 1542 return error; 1543 } 1544 } 1545#ifdef CONFIG_TMPFS_POSIX_ACL 1546 error = generic_acl_init(inode, dir); 1547 if (error) { 1548 iput(inode); 1549 return error; 1550 } 1551#else 1552 error = 0; 1553#endif 1554 dir->i_size += BOGO_DIRENT_SIZE; 1555 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1556 d_instantiate(dentry, inode); 1557 dget(dentry); /* Extra count - pin the dentry in core */ 1558 } 1559 return error; 1560} 1561 1562static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1563{ 1564 int error; 1565 1566 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1567 return error; 1568 inc_nlink(dir); 1569 return 0; 1570} 1571 1572static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, 1573 struct nameidata *nd) 1574{ 1575 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1576} 1577 1578/* 1579 * Link a file.. 1580 */ 1581static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1582{ 1583 struct inode *inode = old_dentry->d_inode; 1584 int ret; 1585 1586 /* 1587 * No ordinary (disk based) filesystem counts links as inodes; 1588 * but each new link needs a new dentry, pinning lowmem, and 1589 * tmpfs dentries cannot be pruned until they are unlinked. 1590 */ 1591 ret = shmem_reserve_inode(inode->i_sb); 1592 if (ret) 1593 goto out; 1594 1595 dir->i_size += BOGO_DIRENT_SIZE; 1596 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1597 inc_nlink(inode); 1598 ihold(inode); /* New dentry reference */ 1599 dget(dentry); /* Extra pinning count for the created dentry */ 1600 d_instantiate(dentry, inode); 1601out: 1602 return ret; 1603} 1604 1605static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1606{ 1607 struct inode *inode = dentry->d_inode; 1608 1609 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 1610 shmem_free_inode(inode->i_sb); 1611 1612 dir->i_size -= BOGO_DIRENT_SIZE; 1613 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1614 drop_nlink(inode); 1615 dput(dentry); /* Undo the count from "create" - this does all the work */ 1616 return 0; 1617} 1618 1619static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1620{ 1621 if (!simple_empty(dentry)) 1622 return -ENOTEMPTY; 1623 1624 drop_nlink(dentry->d_inode); 1625 drop_nlink(dir); 1626 return shmem_unlink(dir, dentry); 1627} 1628 1629/* 1630 * The VFS layer already does all the dentry stuff for rename, 1631 * we just have to decrement the usage count for the target if 1632 * it exists so that the VFS layer correctly free's it when it 1633 * gets overwritten. 1634 */ 1635static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1636{ 1637 struct inode *inode = old_dentry->d_inode; 1638 int they_are_dirs = S_ISDIR(inode->i_mode); 1639 1640 if (!simple_empty(new_dentry)) 1641 return -ENOTEMPTY; 1642 1643 if (new_dentry->d_inode) { 1644 (void) shmem_unlink(new_dir, new_dentry); 1645 if (they_are_dirs) 1646 drop_nlink(old_dir); 1647 } else if (they_are_dirs) { 1648 drop_nlink(old_dir); 1649 inc_nlink(new_dir); 1650 } 1651 1652 old_dir->i_size -= BOGO_DIRENT_SIZE; 1653 new_dir->i_size += BOGO_DIRENT_SIZE; 1654 old_dir->i_ctime = old_dir->i_mtime = 1655 new_dir->i_ctime = new_dir->i_mtime = 1656 inode->i_ctime = CURRENT_TIME; 1657 return 0; 1658} 1659 1660static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1661{ 1662 int error; 1663 int len; 1664 struct inode *inode; 1665 struct page *page; 1666 char *kaddr; 1667 struct shmem_inode_info *info; 1668 1669 len = strlen(symname) + 1; 1670 if (len > PAGE_CACHE_SIZE) 1671 return -ENAMETOOLONG; 1672 1673 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 1674 if (!inode) 1675 return -ENOSPC; 1676 1677 error = security_inode_init_security(inode, dir, &dentry->d_name, NULL, 1678 NULL, NULL); 1679 if (error) { 1680 if (error != -EOPNOTSUPP) { 1681 iput(inode); 1682 return error; 1683 } 1684 error = 0; 1685 } 1686 1687 info = SHMEM_I(inode); 1688 inode->i_size = len-1; 1689 if (len <= SHMEM_SYMLINK_INLINE_LEN) { 1690 /* do it inline */ 1691 memcpy(info->inline_symlink, symname, len); 1692 inode->i_op = &shmem_symlink_inline_operations; 1693 } else { 1694 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 1695 if (error) { 1696 iput(inode); 1697 return error; 1698 } 1699 inode->i_mapping->a_ops = &shmem_aops; 1700 inode->i_op = &shmem_symlink_inode_operations; 1701 kaddr = kmap_atomic(page, KM_USER0); 1702 memcpy(kaddr, symname, len); 1703 kunmap_atomic(kaddr, KM_USER0); 1704 set_page_dirty(page); 1705 unlock_page(page); 1706 page_cache_release(page); 1707 } 1708 dir->i_size += BOGO_DIRENT_SIZE; 1709 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1710 d_instantiate(dentry, inode); 1711 dget(dentry); 1712 return 0; 1713} 1714 1715static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 1716{ 1717 nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink); 1718 return NULL; 1719} 1720 1721static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 1722{ 1723 struct page *page = NULL; 1724 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 1725 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 1726 if (page) 1727 unlock_page(page); 1728 return page; 1729} 1730 1731static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 1732{ 1733 if (!IS_ERR(nd_get_link(nd))) { 1734 struct page *page = cookie; 1735 kunmap(page); 1736 mark_page_accessed(page); 1737 page_cache_release(page); 1738 } 1739} 1740 1741#ifdef CONFIG_TMPFS_XATTR 1742/* 1743 * Superblocks without xattr inode operations may get some security.* xattr 1744 * support from the LSM "for free". As soon as we have any other xattrs 1745 * like ACLs, we also need to implement the security.* handlers at 1746 * filesystem level, though. 1747 */ 1748 1749static int shmem_xattr_get(struct dentry *dentry, const char *name, 1750 void *buffer, size_t size) 1751{ 1752 struct shmem_inode_info *info; 1753 struct shmem_xattr *xattr; 1754 int ret = -ENODATA; 1755 1756 info = SHMEM_I(dentry->d_inode); 1757 1758 spin_lock(&info->lock); 1759 list_for_each_entry(xattr, &info->xattr_list, list) { 1760 if (strcmp(name, xattr->name)) 1761 continue; 1762 1763 ret = xattr->size; 1764 if (buffer) { 1765 if (size < xattr->size) 1766 ret = -ERANGE; 1767 else 1768 memcpy(buffer, xattr->value, xattr->size); 1769 } 1770 break; 1771 } 1772 spin_unlock(&info->lock); 1773 return ret; 1774} 1775 1776static int shmem_xattr_set(struct dentry *dentry, const char *name, 1777 const void *value, size_t size, int flags) 1778{ 1779 struct inode *inode = dentry->d_inode; 1780 struct shmem_inode_info *info = SHMEM_I(inode); 1781 struct shmem_xattr *xattr; 1782 struct shmem_xattr *new_xattr = NULL; 1783 size_t len; 1784 int err = 0; 1785 1786 /* value == NULL means remove */ 1787 if (value) { 1788 /* wrap around? */ 1789 len = sizeof(*new_xattr) + size; 1790 if (len <= sizeof(*new_xattr)) 1791 return -ENOMEM; 1792 1793 new_xattr = kmalloc(len, GFP_KERNEL); 1794 if (!new_xattr) 1795 return -ENOMEM; 1796 1797 new_xattr->name = kstrdup(name, GFP_KERNEL); 1798 if (!new_xattr->name) { 1799 kfree(new_xattr); 1800 return -ENOMEM; 1801 } 1802 1803 new_xattr->size = size; 1804 memcpy(new_xattr->value, value, size); 1805 } 1806 1807 spin_lock(&info->lock); 1808 list_for_each_entry(xattr, &info->xattr_list, list) { 1809 if (!strcmp(name, xattr->name)) { 1810 if (flags & XATTR_CREATE) { 1811 xattr = new_xattr; 1812 err = -EEXIST; 1813 } else if (new_xattr) { 1814 list_replace(&xattr->list, &new_xattr->list); 1815 } else { 1816 list_del(&xattr->list); 1817 } 1818 goto out; 1819 } 1820 } 1821 if (flags & XATTR_REPLACE) { 1822 xattr = new_xattr; 1823 err = -ENODATA; 1824 } else { 1825 list_add(&new_xattr->list, &info->xattr_list); 1826 xattr = NULL; 1827 } 1828out: 1829 spin_unlock(&info->lock); 1830 if (xattr) 1831 kfree(xattr->name); 1832 kfree(xattr); 1833 return err; 1834} 1835 1836static const struct xattr_handler *shmem_xattr_handlers[] = { 1837#ifdef CONFIG_TMPFS_POSIX_ACL 1838 &generic_acl_access_handler, 1839 &generic_acl_default_handler, 1840#endif 1841 NULL 1842}; 1843 1844static int shmem_xattr_validate(const char *name) 1845{ 1846 struct { const char *prefix; size_t len; } arr[] = { 1847 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 1848 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 1849 }; 1850 int i; 1851 1852 for (i = 0; i < ARRAY_SIZE(arr); i++) { 1853 size_t preflen = arr[i].len; 1854 if (strncmp(name, arr[i].prefix, preflen) == 0) { 1855 if (!name[preflen]) 1856 return -EINVAL; 1857 return 0; 1858 } 1859 } 1860 return -EOPNOTSUPP; 1861} 1862 1863static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 1864 void *buffer, size_t size) 1865{ 1866 int err; 1867 1868 /* 1869 * If this is a request for a synthetic attribute in the system.* 1870 * namespace use the generic infrastructure to resolve a handler 1871 * for it via sb->s_xattr. 1872 */ 1873 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 1874 return generic_getxattr(dentry, name, buffer, size); 1875 1876 err = shmem_xattr_validate(name); 1877 if (err) 1878 return err; 1879 1880 return shmem_xattr_get(dentry, name, buffer, size); 1881} 1882 1883static int shmem_setxattr(struct dentry *dentry, const char *name, 1884 const void *value, size_t size, int flags) 1885{ 1886 int err; 1887 1888 /* 1889 * If this is a request for a synthetic attribute in the system.* 1890 * namespace use the generic infrastructure to resolve a handler 1891 * for it via sb->s_xattr. 1892 */ 1893 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 1894 return generic_setxattr(dentry, name, value, size, flags); 1895 1896 err = shmem_xattr_validate(name); 1897 if (err) 1898 return err; 1899 1900 if (size == 0) 1901 value = ""; /* empty EA, do not remove */ 1902 1903 return shmem_xattr_set(dentry, name, value, size, flags); 1904 1905} 1906 1907static int shmem_removexattr(struct dentry *dentry, const char *name) 1908{ 1909 int err; 1910 1911 /* 1912 * If this is a request for a synthetic attribute in the system.* 1913 * namespace use the generic infrastructure to resolve a handler 1914 * for it via sb->s_xattr. 1915 */ 1916 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 1917 return generic_removexattr(dentry, name); 1918 1919 err = shmem_xattr_validate(name); 1920 if (err) 1921 return err; 1922 1923 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE); 1924} 1925 1926static bool xattr_is_trusted(const char *name) 1927{ 1928 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); 1929} 1930 1931static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 1932{ 1933 bool trusted = capable(CAP_SYS_ADMIN); 1934 struct shmem_xattr *xattr; 1935 struct shmem_inode_info *info; 1936 size_t used = 0; 1937 1938 info = SHMEM_I(dentry->d_inode); 1939 1940 spin_lock(&info->lock); 1941 list_for_each_entry(xattr, &info->xattr_list, list) { 1942 size_t len; 1943 1944 /* skip "trusted." attributes for unprivileged callers */ 1945 if (!trusted && xattr_is_trusted(xattr->name)) 1946 continue; 1947 1948 len = strlen(xattr->name) + 1; 1949 used += len; 1950 if (buffer) { 1951 if (size < used) { 1952 used = -ERANGE; 1953 break; 1954 } 1955 memcpy(buffer, xattr->name, len); 1956 buffer += len; 1957 } 1958 } 1959 spin_unlock(&info->lock); 1960 1961 return used; 1962} 1963#endif /* CONFIG_TMPFS_XATTR */ 1964 1965static const struct inode_operations shmem_symlink_inline_operations = { 1966 .readlink = generic_readlink, 1967 .follow_link = shmem_follow_link_inline, 1968#ifdef CONFIG_TMPFS_XATTR 1969 .setxattr = shmem_setxattr, 1970 .getxattr = shmem_getxattr, 1971 .listxattr = shmem_listxattr, 1972 .removexattr = shmem_removexattr, 1973#endif 1974}; 1975 1976static const struct inode_operations shmem_symlink_inode_operations = { 1977 .readlink = generic_readlink, 1978 .follow_link = shmem_follow_link, 1979 .put_link = shmem_put_link, 1980#ifdef CONFIG_TMPFS_XATTR 1981 .setxattr = shmem_setxattr, 1982 .getxattr = shmem_getxattr, 1983 .listxattr = shmem_listxattr, 1984 .removexattr = shmem_removexattr, 1985#endif 1986}; 1987 1988static struct dentry *shmem_get_parent(struct dentry *child) 1989{ 1990 return ERR_PTR(-ESTALE); 1991} 1992 1993static int shmem_match(struct inode *ino, void *vfh) 1994{ 1995 __u32 *fh = vfh; 1996 __u64 inum = fh[2]; 1997 inum = (inum << 32) | fh[1]; 1998 return ino->i_ino == inum && fh[0] == ino->i_generation; 1999} 2000 2001static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2002 struct fid *fid, int fh_len, int fh_type) 2003{ 2004 struct inode *inode; 2005 struct dentry *dentry = NULL; 2006 u64 inum = fid->raw[2]; 2007 inum = (inum << 32) | fid->raw[1]; 2008 2009 if (fh_len < 3) 2010 return NULL; 2011 2012 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2013 shmem_match, fid->raw); 2014 if (inode) { 2015 dentry = d_find_alias(inode); 2016 iput(inode); 2017 } 2018 2019 return dentry; 2020} 2021 2022static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 2023 int connectable) 2024{ 2025 struct inode *inode = dentry->d_inode; 2026 2027 if (*len < 3) { 2028 *len = 3; 2029 return 255; 2030 } 2031 2032 if (inode_unhashed(inode)) { 2033 /* Unfortunately insert_inode_hash is not idempotent, 2034 * so as we hash inodes here rather than at creation 2035 * time, we need a lock to ensure we only try 2036 * to do it once 2037 */ 2038 static DEFINE_SPINLOCK(lock); 2039 spin_lock(&lock); 2040 if (inode_unhashed(inode)) 2041 __insert_inode_hash(inode, 2042 inode->i_ino + inode->i_generation); 2043 spin_unlock(&lock); 2044 } 2045 2046 fh[0] = inode->i_generation; 2047 fh[1] = inode->i_ino; 2048 fh[2] = ((__u64)inode->i_ino) >> 32; 2049 2050 *len = 3; 2051 return 1; 2052} 2053 2054static const struct export_operations shmem_export_ops = { 2055 .get_parent = shmem_get_parent, 2056 .encode_fh = shmem_encode_fh, 2057 .fh_to_dentry = shmem_fh_to_dentry, 2058}; 2059 2060static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2061 bool remount) 2062{ 2063 char *this_char, *value, *rest; 2064 2065 while (options != NULL) { 2066 this_char = options; 2067 for (;;) { 2068 /* 2069 * NUL-terminate this option: unfortunately, 2070 * mount options form a comma-separated list, 2071 * but mpol's nodelist may also contain commas. 2072 */ 2073 options = strchr(options, ','); 2074 if (options == NULL) 2075 break; 2076 options++; 2077 if (!isdigit(*options)) { 2078 options[-1] = '\0'; 2079 break; 2080 } 2081 } 2082 if (!*this_char) 2083 continue; 2084 if ((value = strchr(this_char,'=')) != NULL) { 2085 *value++ = 0; 2086 } else { 2087 printk(KERN_ERR 2088 "tmpfs: No value for mount option '%s'\n", 2089 this_char); 2090 return 1; 2091 } 2092 2093 if (!strcmp(this_char,"size")) { 2094 unsigned long long size; 2095 size = memparse(value,&rest); 2096 if (*rest == '%') { 2097 size <<= PAGE_SHIFT; 2098 size *= totalram_pages; 2099 do_div(size, 100); 2100 rest++; 2101 } 2102 if (*rest) 2103 goto bad_val; 2104 sbinfo->max_blocks = 2105 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2106 } else if (!strcmp(this_char,"nr_blocks")) { 2107 sbinfo->max_blocks = memparse(value, &rest); 2108 if (*rest) 2109 goto bad_val; 2110 } else if (!strcmp(this_char,"nr_inodes")) { 2111 sbinfo->max_inodes = memparse(value, &rest); 2112 if (*rest) 2113 goto bad_val; 2114 } else if (!strcmp(this_char,"mode")) { 2115 if (remount) 2116 continue; 2117 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2118 if (*rest) 2119 goto bad_val; 2120 } else if (!strcmp(this_char,"uid")) { 2121 if (remount) 2122 continue; 2123 sbinfo->uid = simple_strtoul(value, &rest, 0); 2124 if (*rest) 2125 goto bad_val; 2126 } else if (!strcmp(this_char,"gid")) { 2127 if (remount) 2128 continue; 2129 sbinfo->gid = simple_strtoul(value, &rest, 0); 2130 if (*rest) 2131 goto bad_val; 2132 } else if (!strcmp(this_char,"mpol")) { 2133 if (mpol_parse_str(value, &sbinfo->mpol, 1)) 2134 goto bad_val; 2135 } else { 2136 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2137 this_char); 2138 return 1; 2139 } 2140 } 2141 return 0; 2142 2143bad_val: 2144 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2145 value, this_char); 2146 return 1; 2147 2148} 2149 2150static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2151{ 2152 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2153 struct shmem_sb_info config = *sbinfo; 2154 unsigned long inodes; 2155 int error = -EINVAL; 2156 2157 if (shmem_parse_options(data, &config, true)) 2158 return error; 2159 2160 spin_lock(&sbinfo->stat_lock); 2161 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2162 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2163 goto out; 2164 if (config.max_inodes < inodes) 2165 goto out; 2166 /* 2167 * Those tests also disallow limited->unlimited while any are in 2168 * use, so i_blocks will always be zero when max_blocks is zero; 2169 * but we must separately disallow unlimited->limited, because 2170 * in that case we have no record of how much is already in use. 2171 */ 2172 if (config.max_blocks && !sbinfo->max_blocks) 2173 goto out; 2174 if (config.max_inodes && !sbinfo->max_inodes) 2175 goto out; 2176 2177 error = 0; 2178 sbinfo->max_blocks = config.max_blocks; 2179 sbinfo->max_inodes = config.max_inodes; 2180 sbinfo->free_inodes = config.max_inodes - inodes; 2181 2182 mpol_put(sbinfo->mpol); 2183 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2184out: 2185 spin_unlock(&sbinfo->stat_lock); 2186 return error; 2187} 2188 2189static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) 2190{ 2191 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); 2192 2193 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2194 seq_printf(seq, ",size=%luk", 2195 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2196 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2197 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2198 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2199 seq_printf(seq, ",mode=%03o", sbinfo->mode); 2200 if (sbinfo->uid != 0) 2201 seq_printf(seq, ",uid=%u", sbinfo->uid); 2202 if (sbinfo->gid != 0) 2203 seq_printf(seq, ",gid=%u", sbinfo->gid); 2204 shmem_show_mpol(seq, sbinfo->mpol); 2205 return 0; 2206} 2207#endif /* CONFIG_TMPFS */ 2208 2209static void shmem_put_super(struct super_block *sb) 2210{ 2211 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2212 2213 percpu_counter_destroy(&sbinfo->used_blocks); 2214 kfree(sbinfo); 2215 sb->s_fs_info = NULL; 2216} 2217 2218int shmem_fill_super(struct super_block *sb, void *data, int silent) 2219{ 2220 struct inode *inode; 2221 struct dentry *root; 2222 struct shmem_sb_info *sbinfo; 2223 int err = -ENOMEM; 2224 2225 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2226 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2227 L1_CACHE_BYTES), GFP_KERNEL); 2228 if (!sbinfo) 2229 return -ENOMEM; 2230 2231 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2232 sbinfo->uid = current_fsuid(); 2233 sbinfo->gid = current_fsgid(); 2234 sb->s_fs_info = sbinfo; 2235 2236#ifdef CONFIG_TMPFS 2237 /* 2238 * Per default we only allow half of the physical ram per 2239 * tmpfs instance, limiting inodes to one per page of lowmem; 2240 * but the internal instance is left unlimited. 2241 */ 2242 if (!(sb->s_flags & MS_NOUSER)) { 2243 sbinfo->max_blocks = shmem_default_max_blocks(); 2244 sbinfo->max_inodes = shmem_default_max_inodes(); 2245 if (shmem_parse_options(data, sbinfo, false)) { 2246 err = -EINVAL; 2247 goto failed; 2248 } 2249 } 2250 sb->s_export_op = &shmem_export_ops; 2251#else 2252 sb->s_flags |= MS_NOUSER; 2253#endif 2254 2255 spin_lock_init(&sbinfo->stat_lock); 2256 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2257 goto failed; 2258 sbinfo->free_inodes = sbinfo->max_inodes; 2259 2260 sb->s_maxbytes = MAX_LFS_FILESIZE; 2261 sb->s_blocksize = PAGE_CACHE_SIZE; 2262 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2263 sb->s_magic = TMPFS_MAGIC; 2264 sb->s_op = &shmem_ops; 2265 sb->s_time_gran = 1; 2266#ifdef CONFIG_TMPFS_XATTR 2267 sb->s_xattr = shmem_xattr_handlers; 2268#endif 2269#ifdef CONFIG_TMPFS_POSIX_ACL 2270 sb->s_flags |= MS_POSIXACL; 2271#endif 2272 2273 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2274 if (!inode) 2275 goto failed; 2276 inode->i_uid = sbinfo->uid; 2277 inode->i_gid = sbinfo->gid; 2278 root = d_alloc_root(inode); 2279 if (!root) 2280 goto failed_iput; 2281 sb->s_root = root; 2282 return 0; 2283 2284failed_iput: 2285 iput(inode); 2286failed: 2287 shmem_put_super(sb); 2288 return err; 2289} 2290 2291static struct kmem_cache *shmem_inode_cachep; 2292 2293static struct inode *shmem_alloc_inode(struct super_block *sb) 2294{ 2295 struct shmem_inode_info *info; 2296 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2297 if (!info) 2298 return NULL; 2299 return &info->vfs_inode; 2300} 2301 2302static void shmem_destroy_callback(struct rcu_head *head) 2303{ 2304 struct inode *inode = container_of(head, struct inode, i_rcu); 2305 INIT_LIST_HEAD(&inode->i_dentry); 2306 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2307} 2308 2309static void shmem_destroy_inode(struct inode *inode) 2310{ 2311 if ((inode->i_mode & S_IFMT) == S_IFREG) { 2312 /* only struct inode is valid if it's an inline symlink */ 2313 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2314 } 2315 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2316} 2317 2318static void shmem_init_inode(void *foo) 2319{ 2320 struct shmem_inode_info *info = foo; 2321 inode_init_once(&info->vfs_inode); 2322} 2323 2324static int shmem_init_inodecache(void) 2325{ 2326 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2327 sizeof(struct shmem_inode_info), 2328 0, SLAB_PANIC, shmem_init_inode); 2329 return 0; 2330} 2331 2332static void shmem_destroy_inodecache(void) 2333{ 2334 kmem_cache_destroy(shmem_inode_cachep); 2335} 2336 2337static const struct address_space_operations shmem_aops = { 2338 .writepage = shmem_writepage, 2339 .set_page_dirty = __set_page_dirty_no_writeback, 2340#ifdef CONFIG_TMPFS 2341 .write_begin = shmem_write_begin, 2342 .write_end = shmem_write_end, 2343#endif 2344 .migratepage = migrate_page, 2345 .error_remove_page = generic_error_remove_page, 2346}; 2347 2348static const struct file_operations shmem_file_operations = { 2349 .mmap = shmem_mmap, 2350#ifdef CONFIG_TMPFS 2351 .llseek = generic_file_llseek, 2352 .read = do_sync_read, 2353 .write = do_sync_write, 2354 .aio_read = shmem_file_aio_read, 2355 .aio_write = generic_file_aio_write, 2356 .fsync = noop_fsync, 2357 .splice_read = shmem_file_splice_read, 2358 .splice_write = generic_file_splice_write, 2359#endif 2360}; 2361 2362static const struct inode_operations shmem_inode_operations = { 2363 .setattr = shmem_setattr, 2364 .truncate_range = shmem_truncate_range, 2365#ifdef CONFIG_TMPFS_XATTR 2366 .setxattr = shmem_setxattr, 2367 .getxattr = shmem_getxattr, 2368 .listxattr = shmem_listxattr, 2369 .removexattr = shmem_removexattr, 2370#endif 2371}; 2372 2373static const struct inode_operations shmem_dir_inode_operations = { 2374#ifdef CONFIG_TMPFS 2375 .create = shmem_create, 2376 .lookup = simple_lookup, 2377 .link = shmem_link, 2378 .unlink = shmem_unlink, 2379 .symlink = shmem_symlink, 2380 .mkdir = shmem_mkdir, 2381 .rmdir = shmem_rmdir, 2382 .mknod = shmem_mknod, 2383 .rename = shmem_rename, 2384#endif 2385#ifdef CONFIG_TMPFS_XATTR 2386 .setxattr = shmem_setxattr, 2387 .getxattr = shmem_getxattr, 2388 .listxattr = shmem_listxattr, 2389 .removexattr = shmem_removexattr, 2390#endif 2391#ifdef CONFIG_TMPFS_POSIX_ACL 2392 .setattr = shmem_setattr, 2393#endif 2394}; 2395 2396static const struct inode_operations shmem_special_inode_operations = { 2397#ifdef CONFIG_TMPFS_XATTR 2398 .setxattr = shmem_setxattr, 2399 .getxattr = shmem_getxattr, 2400 .listxattr = shmem_listxattr, 2401 .removexattr = shmem_removexattr, 2402#endif 2403#ifdef CONFIG_TMPFS_POSIX_ACL 2404 .setattr = shmem_setattr, 2405#endif 2406}; 2407 2408static const struct super_operations shmem_ops = { 2409 .alloc_inode = shmem_alloc_inode, 2410 .destroy_inode = shmem_destroy_inode, 2411#ifdef CONFIG_TMPFS 2412 .statfs = shmem_statfs, 2413 .remount_fs = shmem_remount_fs, 2414 .show_options = shmem_show_options, 2415#endif 2416 .evict_inode = shmem_evict_inode, 2417 .drop_inode = generic_delete_inode, 2418 .put_super = shmem_put_super, 2419}; 2420 2421static const struct vm_operations_struct shmem_vm_ops = { 2422 .fault = shmem_fault, 2423#ifdef CONFIG_NUMA 2424 .set_policy = shmem_set_policy, 2425 .get_policy = shmem_get_policy, 2426#endif 2427}; 2428 2429static struct dentry *shmem_mount(struct file_system_type *fs_type, 2430 int flags, const char *dev_name, void *data) 2431{ 2432 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2433} 2434 2435static struct file_system_type shmem_fs_type = { 2436 .owner = THIS_MODULE, 2437 .name = "tmpfs", 2438 .mount = shmem_mount, 2439 .kill_sb = kill_litter_super, 2440}; 2441 2442int __init shmem_init(void) 2443{ 2444 int error; 2445 2446 error = bdi_init(&shmem_backing_dev_info); 2447 if (error) 2448 goto out4; 2449 2450 error = shmem_init_inodecache(); 2451 if (error) 2452 goto out3; 2453 2454 error = register_filesystem(&shmem_fs_type); 2455 if (error) { 2456 printk(KERN_ERR "Could not register tmpfs\n"); 2457 goto out2; 2458 } 2459 2460 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, 2461 shmem_fs_type.name, NULL); 2462 if (IS_ERR(shm_mnt)) { 2463 error = PTR_ERR(shm_mnt); 2464 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2465 goto out1; 2466 } 2467 return 0; 2468 2469out1: 2470 unregister_filesystem(&shmem_fs_type); 2471out2: 2472 shmem_destroy_inodecache(); 2473out3: 2474 bdi_destroy(&shmem_backing_dev_info); 2475out4: 2476 shm_mnt = ERR_PTR(error); 2477 return error; 2478} 2479 2480#ifdef CONFIG_CGROUP_MEM_RES_CTLR 2481/** 2482 * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file 2483 * @inode: the inode to be searched 2484 * @index: the page offset to be searched 2485 * @pagep: the pointer for the found page to be stored 2486 * @swapp: the pointer for the found swap entry to be stored 2487 * 2488 * If a page is found, refcount of it is incremented. Callers should handle 2489 * these refcount. 2490 */ 2491void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, 2492 struct page **pagep, swp_entry_t *swapp) 2493{ 2494 struct shmem_inode_info *info = SHMEM_I(inode); 2495 struct page *page = NULL; 2496 swp_entry_t swap = {0}; 2497 2498 if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 2499 goto out; 2500 2501 spin_lock(&info->lock); 2502#ifdef CONFIG_SWAP 2503 swap = shmem_get_swap(info, index); 2504 if (swap.val) 2505 page = find_get_page(&swapper_space, swap.val); 2506 else 2507#endif 2508 page = find_get_page(inode->i_mapping, index); 2509 spin_unlock(&info->lock); 2510out: 2511 *pagep = page; 2512 *swapp = swap; 2513} 2514#endif 2515 2516#else /* !CONFIG_SHMEM */ 2517 2518/* 2519 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2520 * 2521 * This is intended for small system where the benefits of the full 2522 * shmem code (swap-backed and resource-limited) are outweighed by 2523 * their complexity. On systems without swap this code should be 2524 * effectively equivalent, but much lighter weight. 2525 */ 2526 2527#include <linux/ramfs.h> 2528 2529static struct file_system_type shmem_fs_type = { 2530 .name = "tmpfs", 2531 .mount = ramfs_mount, 2532 .kill_sb = kill_litter_super, 2533}; 2534 2535int __init shmem_init(void) 2536{ 2537 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2538 2539 shm_mnt = kern_mount(&shmem_fs_type); 2540 BUG_ON(IS_ERR(shm_mnt)); 2541 2542 return 0; 2543} 2544 2545int shmem_unuse(swp_entry_t swap, struct page *page) 2546{ 2547 return 0; 2548} 2549 2550int shmem_lock(struct file *file, int lock, struct user_struct *user) 2551{ 2552 return 0; 2553} 2554 2555void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2556{ 2557 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2558} 2559EXPORT_SYMBOL_GPL(shmem_truncate_range); 2560 2561#ifdef CONFIG_CGROUP_MEM_RES_CTLR 2562/** 2563 * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file 2564 * @inode: the inode to be searched 2565 * @index: the page offset to be searched 2566 * @pagep: the pointer for the found page to be stored 2567 * @swapp: the pointer for the found swap entry to be stored 2568 * 2569 * If a page is found, refcount of it is incremented. Callers should handle 2570 * these refcount. 2571 */ 2572void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index, 2573 struct page **pagep, swp_entry_t *swapp) 2574{ 2575 struct page *page = NULL; 2576 2577 if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 2578 goto out; 2579 page = find_get_page(inode->i_mapping, index); 2580out: 2581 *pagep = page; 2582 *swapp = (swp_entry_t){0}; 2583} 2584#endif 2585 2586#define shmem_vm_ops generic_file_vm_ops 2587#define shmem_file_operations ramfs_file_operations 2588#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2589#define shmem_acct_size(flags, size) 0 2590#define shmem_unacct_size(flags, size) do {} while (0) 2591 2592#endif /* CONFIG_SHMEM */ 2593 2594/* common code */ 2595 2596/** 2597 * shmem_file_setup - get an unlinked file living in tmpfs 2598 * @name: name for dentry (to be seen in /proc/<pid>/maps 2599 * @size: size to be set for the file 2600 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2601 */ 2602struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2603{ 2604 int error; 2605 struct file *file; 2606 struct inode *inode; 2607 struct path path; 2608 struct dentry *root; 2609 struct qstr this; 2610 2611 if (IS_ERR(shm_mnt)) 2612 return (void *)shm_mnt; 2613 2614 if (size < 0 || size > MAX_LFS_FILESIZE) 2615 return ERR_PTR(-EINVAL); 2616 2617 if (shmem_acct_size(flags, size)) 2618 return ERR_PTR(-ENOMEM); 2619 2620 error = -ENOMEM; 2621 this.name = name; 2622 this.len = strlen(name); 2623 this.hash = 0; /* will go */ 2624 root = shm_mnt->mnt_root; 2625 path.dentry = d_alloc(root, &this); 2626 if (!path.dentry) 2627 goto put_memory; 2628 path.mnt = mntget(shm_mnt); 2629 2630 error = -ENOSPC; 2631 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2632 if (!inode) 2633 goto put_dentry; 2634 2635 d_instantiate(path.dentry, inode); 2636 inode->i_size = size; 2637 inode->i_nlink = 0; /* It is unlinked */ 2638#ifndef CONFIG_MMU 2639 error = ramfs_nommu_expand_for_mapping(inode, size); 2640 if (error) 2641 goto put_dentry; 2642#endif 2643 2644 error = -ENFILE; 2645 file = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2646 &shmem_file_operations); 2647 if (!file) 2648 goto put_dentry; 2649 2650 return file; 2651 2652put_dentry: 2653 path_put(&path); 2654put_memory: 2655 shmem_unacct_size(flags, size); 2656 return ERR_PTR(error); 2657} 2658EXPORT_SYMBOL_GPL(shmem_file_setup); 2659 2660/** 2661 * shmem_zero_setup - setup a shared anonymous mapping 2662 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2663 */ 2664int shmem_zero_setup(struct vm_area_struct *vma) 2665{ 2666 struct file *file; 2667 loff_t size = vma->vm_end - vma->vm_start; 2668 2669 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2670 if (IS_ERR(file)) 2671 return PTR_ERR(file); 2672 2673 if (vma->vm_file) 2674 fput(vma->vm_file); 2675 vma->vm_file = file; 2676 vma->vm_ops = &shmem_vm_ops; 2677 vma->vm_flags |= VM_CAN_NONLINEAR; 2678 return 0; 2679} 2680 2681/** 2682 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 2683 * @mapping: the page's address_space 2684 * @index: the page index 2685 * @gfp: the page allocator flags to use if allocating 2686 * 2687 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 2688 * with any new page allocations done using the specified allocation flags. 2689 * But read_cache_page_gfp() uses the ->readpage() method: which does not 2690 * suit tmpfs, since it may have pages in swapcache, and needs to find those 2691 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 2692 * 2693 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 2694 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 2695 */ 2696struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 2697 pgoff_t index, gfp_t gfp) 2698{ 2699#ifdef CONFIG_SHMEM 2700 struct inode *inode = mapping->host; 2701 struct page *page; 2702 int error; 2703 2704 BUG_ON(mapping->a_ops != &shmem_aops); 2705 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 2706 if (error) 2707 page = ERR_PTR(error); 2708 else 2709 unlock_page(page); 2710 return page; 2711#else 2712 /* 2713 * The tiny !SHMEM case uses ramfs without swap 2714 */ 2715 return read_cache_page_gfp(mapping, index, gfp); 2716#endif 2717} 2718EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 2719