shmem.c revision 8079b1c859c44f27d63da4951f5038a16589a563
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24#include <linux/fs.h> 25#include <linux/init.h> 26#include <linux/vfs.h> 27#include <linux/mount.h> 28#include <linux/pagemap.h> 29#include <linux/file.h> 30#include <linux/mm.h> 31#include <linux/module.h> 32#include <linux/swap.h> 33 34static struct vfsmount *shm_mnt; 35 36#ifdef CONFIG_SHMEM 37/* 38 * This virtual memory filesystem is heavily based on the ramfs. It 39 * extends ramfs by the ability to use swap and honor resource limits 40 * which makes it a completely usable filesystem. 41 */ 42 43#include <linux/xattr.h> 44#include <linux/exportfs.h> 45#include <linux/posix_acl.h> 46#include <linux/generic_acl.h> 47#include <linux/mman.h> 48#include <linux/string.h> 49#include <linux/slab.h> 50#include <linux/backing-dev.h> 51#include <linux/shmem_fs.h> 52#include <linux/writeback.h> 53#include <linux/blkdev.h> 54#include <linux/pagevec.h> 55#include <linux/percpu_counter.h> 56#include <linux/splice.h> 57#include <linux/security.h> 58#include <linux/swapops.h> 59#include <linux/mempolicy.h> 60#include <linux/namei.h> 61#include <linux/ctype.h> 62#include <linux/migrate.h> 63#include <linux/highmem.h> 64#include <linux/seq_file.h> 65#include <linux/magic.h> 66 67#include <asm/uaccess.h> 68#include <asm/pgtable.h> 69 70#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 71#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 72 73/* Pretend that each entry is of this size in directory's i_size */ 74#define BOGO_DIRENT_SIZE 20 75 76/* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 77#define SHORT_SYMLINK_LEN 128 78 79struct shmem_xattr { 80 struct list_head list; /* anchored by shmem_inode_info->xattr_list */ 81 char *name; /* xattr name */ 82 size_t size; 83 char value[0]; 84}; 85 86/* Flag allocation requirements to shmem_getpage */ 87enum sgp_type { 88 SGP_READ, /* don't exceed i_size, don't allocate page */ 89 SGP_CACHE, /* don't exceed i_size, may allocate page */ 90 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 91 SGP_WRITE, /* may exceed i_size, may allocate page */ 92}; 93 94#ifdef CONFIG_TMPFS 95static unsigned long shmem_default_max_blocks(void) 96{ 97 return totalram_pages / 2; 98} 99 100static unsigned long shmem_default_max_inodes(void) 101{ 102 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 103} 104#endif 105 106static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 107 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 108 109static inline int shmem_getpage(struct inode *inode, pgoff_t index, 110 struct page **pagep, enum sgp_type sgp, int *fault_type) 111{ 112 return shmem_getpage_gfp(inode, index, pagep, sgp, 113 mapping_gfp_mask(inode->i_mapping), fault_type); 114} 115 116static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 117{ 118 return sb->s_fs_info; 119} 120 121/* 122 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 123 * for shared memory and for shared anonymous (/dev/zero) mappings 124 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 125 * consistent with the pre-accounting of private mappings ... 126 */ 127static inline int shmem_acct_size(unsigned long flags, loff_t size) 128{ 129 return (flags & VM_NORESERVE) ? 130 0 : security_vm_enough_memory_kern(VM_ACCT(size)); 131} 132 133static inline void shmem_unacct_size(unsigned long flags, loff_t size) 134{ 135 if (!(flags & VM_NORESERVE)) 136 vm_unacct_memory(VM_ACCT(size)); 137} 138 139/* 140 * ... whereas tmpfs objects are accounted incrementally as 141 * pages are allocated, in order to allow huge sparse files. 142 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 143 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 144 */ 145static inline int shmem_acct_block(unsigned long flags) 146{ 147 return (flags & VM_NORESERVE) ? 148 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0; 149} 150 151static inline void shmem_unacct_blocks(unsigned long flags, long pages) 152{ 153 if (flags & VM_NORESERVE) 154 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 155} 156 157static const struct super_operations shmem_ops; 158static const struct address_space_operations shmem_aops; 159static const struct file_operations shmem_file_operations; 160static const struct inode_operations shmem_inode_operations; 161static const struct inode_operations shmem_dir_inode_operations; 162static const struct inode_operations shmem_special_inode_operations; 163static const struct vm_operations_struct shmem_vm_ops; 164 165static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 166 .ra_pages = 0, /* No readahead */ 167 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 168}; 169 170static LIST_HEAD(shmem_swaplist); 171static DEFINE_MUTEX(shmem_swaplist_mutex); 172 173static int shmem_reserve_inode(struct super_block *sb) 174{ 175 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 176 if (sbinfo->max_inodes) { 177 spin_lock(&sbinfo->stat_lock); 178 if (!sbinfo->free_inodes) { 179 spin_unlock(&sbinfo->stat_lock); 180 return -ENOSPC; 181 } 182 sbinfo->free_inodes--; 183 spin_unlock(&sbinfo->stat_lock); 184 } 185 return 0; 186} 187 188static void shmem_free_inode(struct super_block *sb) 189{ 190 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 191 if (sbinfo->max_inodes) { 192 spin_lock(&sbinfo->stat_lock); 193 sbinfo->free_inodes++; 194 spin_unlock(&sbinfo->stat_lock); 195 } 196} 197 198/** 199 * shmem_recalc_inode - recalculate the block usage of an inode 200 * @inode: inode to recalc 201 * 202 * We have to calculate the free blocks since the mm can drop 203 * undirtied hole pages behind our back. 204 * 205 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 206 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 207 * 208 * It has to be called with the spinlock held. 209 */ 210static void shmem_recalc_inode(struct inode *inode) 211{ 212 struct shmem_inode_info *info = SHMEM_I(inode); 213 long freed; 214 215 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 216 if (freed > 0) { 217 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 218 if (sbinfo->max_blocks) 219 percpu_counter_add(&sbinfo->used_blocks, -freed); 220 info->alloced -= freed; 221 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 222 shmem_unacct_blocks(info->flags, freed); 223 } 224} 225 226/* 227 * Replace item expected in radix tree by a new item, while holding tree lock. 228 */ 229static int shmem_radix_tree_replace(struct address_space *mapping, 230 pgoff_t index, void *expected, void *replacement) 231{ 232 void **pslot; 233 void *item = NULL; 234 235 VM_BUG_ON(!expected); 236 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 237 if (pslot) 238 item = radix_tree_deref_slot_protected(pslot, 239 &mapping->tree_lock); 240 if (item != expected) 241 return -ENOENT; 242 if (replacement) 243 radix_tree_replace_slot(pslot, replacement); 244 else 245 radix_tree_delete(&mapping->page_tree, index); 246 return 0; 247} 248 249/* 250 * Like add_to_page_cache_locked, but error if expected item has gone. 251 */ 252static int shmem_add_to_page_cache(struct page *page, 253 struct address_space *mapping, 254 pgoff_t index, gfp_t gfp, void *expected) 255{ 256 int error = 0; 257 258 VM_BUG_ON(!PageLocked(page)); 259 VM_BUG_ON(!PageSwapBacked(page)); 260 261 if (!expected) 262 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); 263 if (!error) { 264 page_cache_get(page); 265 page->mapping = mapping; 266 page->index = index; 267 268 spin_lock_irq(&mapping->tree_lock); 269 if (!expected) 270 error = radix_tree_insert(&mapping->page_tree, 271 index, page); 272 else 273 error = shmem_radix_tree_replace(mapping, index, 274 expected, page); 275 if (!error) { 276 mapping->nrpages++; 277 __inc_zone_page_state(page, NR_FILE_PAGES); 278 __inc_zone_page_state(page, NR_SHMEM); 279 spin_unlock_irq(&mapping->tree_lock); 280 } else { 281 page->mapping = NULL; 282 spin_unlock_irq(&mapping->tree_lock); 283 page_cache_release(page); 284 } 285 if (!expected) 286 radix_tree_preload_end(); 287 } 288 if (error) 289 mem_cgroup_uncharge_cache_page(page); 290 return error; 291} 292 293/* 294 * Like delete_from_page_cache, but substitutes swap for page. 295 */ 296static void shmem_delete_from_page_cache(struct page *page, void *radswap) 297{ 298 struct address_space *mapping = page->mapping; 299 int error; 300 301 spin_lock_irq(&mapping->tree_lock); 302 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 303 page->mapping = NULL; 304 mapping->nrpages--; 305 __dec_zone_page_state(page, NR_FILE_PAGES); 306 __dec_zone_page_state(page, NR_SHMEM); 307 spin_unlock_irq(&mapping->tree_lock); 308 page_cache_release(page); 309 BUG_ON(error); 310} 311 312/* 313 * Like find_get_pages, but collecting swap entries as well as pages. 314 */ 315static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, 316 pgoff_t start, unsigned int nr_pages, 317 struct page **pages, pgoff_t *indices) 318{ 319 unsigned int i; 320 unsigned int ret; 321 unsigned int nr_found; 322 323 rcu_read_lock(); 324restart: 325 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree, 326 (void ***)pages, indices, start, nr_pages); 327 ret = 0; 328 for (i = 0; i < nr_found; i++) { 329 struct page *page; 330repeat: 331 page = radix_tree_deref_slot((void **)pages[i]); 332 if (unlikely(!page)) 333 continue; 334 if (radix_tree_exception(page)) { 335 if (radix_tree_deref_retry(page)) 336 goto restart; 337 /* 338 * Otherwise, we must be storing a swap entry 339 * here as an exceptional entry: so return it 340 * without attempting to raise page count. 341 */ 342 goto export; 343 } 344 if (!page_cache_get_speculative(page)) 345 goto repeat; 346 347 /* Has the page moved? */ 348 if (unlikely(page != *((void **)pages[i]))) { 349 page_cache_release(page); 350 goto repeat; 351 } 352export: 353 indices[ret] = indices[i]; 354 pages[ret] = page; 355 ret++; 356 } 357 if (unlikely(!ret && nr_found)) 358 goto restart; 359 rcu_read_unlock(); 360 return ret; 361} 362 363/* 364 * Remove swap entry from radix tree, free the swap and its page cache. 365 */ 366static int shmem_free_swap(struct address_space *mapping, 367 pgoff_t index, void *radswap) 368{ 369 int error; 370 371 spin_lock_irq(&mapping->tree_lock); 372 error = shmem_radix_tree_replace(mapping, index, radswap, NULL); 373 spin_unlock_irq(&mapping->tree_lock); 374 if (!error) 375 free_swap_and_cache(radix_to_swp_entry(radswap)); 376 return error; 377} 378 379/* 380 * Pagevec may contain swap entries, so shuffle up pages before releasing. 381 */ 382static void shmem_pagevec_release(struct pagevec *pvec) 383{ 384 int i, j; 385 386 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 387 struct page *page = pvec->pages[i]; 388 if (!radix_tree_exceptional_entry(page)) 389 pvec->pages[j++] = page; 390 } 391 pvec->nr = j; 392 pagevec_release(pvec); 393} 394 395/* 396 * Remove range of pages and swap entries from radix tree, and free them. 397 */ 398void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 399{ 400 struct address_space *mapping = inode->i_mapping; 401 struct shmem_inode_info *info = SHMEM_I(inode); 402 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 403 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 404 pgoff_t end = (lend >> PAGE_CACHE_SHIFT); 405 struct pagevec pvec; 406 pgoff_t indices[PAGEVEC_SIZE]; 407 long nr_swaps_freed = 0; 408 pgoff_t index; 409 int i; 410 411 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 412 413 pagevec_init(&pvec, 0); 414 index = start; 415 while (index <= end) { 416 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 417 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, 418 pvec.pages, indices); 419 if (!pvec.nr) 420 break; 421 mem_cgroup_uncharge_start(); 422 for (i = 0; i < pagevec_count(&pvec); i++) { 423 struct page *page = pvec.pages[i]; 424 425 index = indices[i]; 426 if (index > end) 427 break; 428 429 if (radix_tree_exceptional_entry(page)) { 430 nr_swaps_freed += !shmem_free_swap(mapping, 431 index, page); 432 continue; 433 } 434 435 if (!trylock_page(page)) 436 continue; 437 if (page->mapping == mapping) { 438 VM_BUG_ON(PageWriteback(page)); 439 truncate_inode_page(mapping, page); 440 } 441 unlock_page(page); 442 } 443 shmem_pagevec_release(&pvec); 444 mem_cgroup_uncharge_end(); 445 cond_resched(); 446 index++; 447 } 448 449 if (partial) { 450 struct page *page = NULL; 451 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 452 if (page) { 453 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 454 set_page_dirty(page); 455 unlock_page(page); 456 page_cache_release(page); 457 } 458 } 459 460 index = start; 461 for ( ; ; ) { 462 cond_resched(); 463 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 464 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, 465 pvec.pages, indices); 466 if (!pvec.nr) { 467 if (index == start) 468 break; 469 index = start; 470 continue; 471 } 472 if (index == start && indices[0] > end) { 473 shmem_pagevec_release(&pvec); 474 break; 475 } 476 mem_cgroup_uncharge_start(); 477 for (i = 0; i < pagevec_count(&pvec); i++) { 478 struct page *page = pvec.pages[i]; 479 480 index = indices[i]; 481 if (index > end) 482 break; 483 484 if (radix_tree_exceptional_entry(page)) { 485 nr_swaps_freed += !shmem_free_swap(mapping, 486 index, page); 487 continue; 488 } 489 490 lock_page(page); 491 if (page->mapping == mapping) { 492 VM_BUG_ON(PageWriteback(page)); 493 truncate_inode_page(mapping, page); 494 } 495 unlock_page(page); 496 } 497 shmem_pagevec_release(&pvec); 498 mem_cgroup_uncharge_end(); 499 index++; 500 } 501 502 spin_lock(&info->lock); 503 info->swapped -= nr_swaps_freed; 504 shmem_recalc_inode(inode); 505 spin_unlock(&info->lock); 506 507 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 508} 509EXPORT_SYMBOL_GPL(shmem_truncate_range); 510 511static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 512{ 513 struct inode *inode = dentry->d_inode; 514 int error; 515 516 error = inode_change_ok(inode, attr); 517 if (error) 518 return error; 519 520 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 521 loff_t oldsize = inode->i_size; 522 loff_t newsize = attr->ia_size; 523 524 if (newsize != oldsize) { 525 i_size_write(inode, newsize); 526 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 527 } 528 if (newsize < oldsize) { 529 loff_t holebegin = round_up(newsize, PAGE_SIZE); 530 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 531 shmem_truncate_range(inode, newsize, (loff_t)-1); 532 /* unmap again to remove racily COWed private pages */ 533 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 534 } 535 } 536 537 setattr_copy(inode, attr); 538#ifdef CONFIG_TMPFS_POSIX_ACL 539 if (attr->ia_valid & ATTR_MODE) 540 error = generic_acl_chmod(inode); 541#endif 542 return error; 543} 544 545static void shmem_evict_inode(struct inode *inode) 546{ 547 struct shmem_inode_info *info = SHMEM_I(inode); 548 struct shmem_xattr *xattr, *nxattr; 549 550 if (inode->i_mapping->a_ops == &shmem_aops) { 551 shmem_unacct_size(info->flags, inode->i_size); 552 inode->i_size = 0; 553 shmem_truncate_range(inode, 0, (loff_t)-1); 554 if (!list_empty(&info->swaplist)) { 555 mutex_lock(&shmem_swaplist_mutex); 556 list_del_init(&info->swaplist); 557 mutex_unlock(&shmem_swaplist_mutex); 558 } 559 } else 560 kfree(info->symlink); 561 562 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) { 563 kfree(xattr->name); 564 kfree(xattr); 565 } 566 BUG_ON(inode->i_blocks); 567 shmem_free_inode(inode->i_sb); 568 end_writeback(inode); 569} 570 571/* 572 * If swap found in inode, free it and move page from swapcache to filecache. 573 */ 574static int shmem_unuse_inode(struct shmem_inode_info *info, 575 swp_entry_t swap, struct page *page) 576{ 577 struct address_space *mapping = info->vfs_inode.i_mapping; 578 void *radswap; 579 pgoff_t index; 580 int error; 581 582 radswap = swp_to_radix_entry(swap); 583 index = radix_tree_locate_item(&mapping->page_tree, radswap); 584 if (index == -1) 585 return 0; 586 587 /* 588 * Move _head_ to start search for next from here. 589 * But be careful: shmem_evict_inode checks list_empty without taking 590 * mutex, and there's an instant in list_move_tail when info->swaplist 591 * would appear empty, if it were the only one on shmem_swaplist. 592 */ 593 if (shmem_swaplist.next != &info->swaplist) 594 list_move_tail(&shmem_swaplist, &info->swaplist); 595 596 /* 597 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 598 * but also to hold up shmem_evict_inode(): so inode cannot be freed 599 * beneath us (pagelock doesn't help until the page is in pagecache). 600 */ 601 error = shmem_add_to_page_cache(page, mapping, index, 602 GFP_NOWAIT, radswap); 603 /* which does mem_cgroup_uncharge_cache_page on error */ 604 605 if (error != -ENOMEM) { 606 /* 607 * Truncation and eviction use free_swap_and_cache(), which 608 * only does trylock page: if we raced, best clean up here. 609 */ 610 delete_from_swap_cache(page); 611 set_page_dirty(page); 612 if (!error) { 613 spin_lock(&info->lock); 614 info->swapped--; 615 spin_unlock(&info->lock); 616 swap_free(swap); 617 } 618 error = 1; /* not an error, but entry was found */ 619 } 620 return error; 621} 622 623/* 624 * Search through swapped inodes to find and replace swap by page. 625 */ 626int shmem_unuse(swp_entry_t swap, struct page *page) 627{ 628 struct list_head *this, *next; 629 struct shmem_inode_info *info; 630 int found = 0; 631 int error; 632 633 /* 634 * Charge page using GFP_KERNEL while we can wait, before taking 635 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 636 * Charged back to the user (not to caller) when swap account is used. 637 */ 638 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 639 if (error) 640 goto out; 641 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 642 643 mutex_lock(&shmem_swaplist_mutex); 644 list_for_each_safe(this, next, &shmem_swaplist) { 645 info = list_entry(this, struct shmem_inode_info, swaplist); 646 if (info->swapped) 647 found = shmem_unuse_inode(info, swap, page); 648 else 649 list_del_init(&info->swaplist); 650 cond_resched(); 651 if (found) 652 break; 653 } 654 mutex_unlock(&shmem_swaplist_mutex); 655 656 if (!found) 657 mem_cgroup_uncharge_cache_page(page); 658 if (found < 0) 659 error = found; 660out: 661 unlock_page(page); 662 page_cache_release(page); 663 return error; 664} 665 666/* 667 * Move the page from the page cache to the swap cache. 668 */ 669static int shmem_writepage(struct page *page, struct writeback_control *wbc) 670{ 671 struct shmem_inode_info *info; 672 struct address_space *mapping; 673 struct inode *inode; 674 swp_entry_t swap; 675 pgoff_t index; 676 677 BUG_ON(!PageLocked(page)); 678 mapping = page->mapping; 679 index = page->index; 680 inode = mapping->host; 681 info = SHMEM_I(inode); 682 if (info->flags & VM_LOCKED) 683 goto redirty; 684 if (!total_swap_pages) 685 goto redirty; 686 687 /* 688 * shmem_backing_dev_info's capabilities prevent regular writeback or 689 * sync from ever calling shmem_writepage; but a stacking filesystem 690 * might use ->writepage of its underlying filesystem, in which case 691 * tmpfs should write out to swap only in response to memory pressure, 692 * and not for the writeback threads or sync. 693 */ 694 if (!wbc->for_reclaim) { 695 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 696 goto redirty; 697 } 698 swap = get_swap_page(); 699 if (!swap.val) 700 goto redirty; 701 702 /* 703 * Add inode to shmem_unuse()'s list of swapped-out inodes, 704 * if it's not already there. Do it now before the page is 705 * moved to swap cache, when its pagelock no longer protects 706 * the inode from eviction. But don't unlock the mutex until 707 * we've incremented swapped, because shmem_unuse_inode() will 708 * prune a !swapped inode from the swaplist under this mutex. 709 */ 710 mutex_lock(&shmem_swaplist_mutex); 711 if (list_empty(&info->swaplist)) 712 list_add_tail(&info->swaplist, &shmem_swaplist); 713 714 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 715 swap_shmem_alloc(swap); 716 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 717 718 spin_lock(&info->lock); 719 info->swapped++; 720 shmem_recalc_inode(inode); 721 spin_unlock(&info->lock); 722 723 mutex_unlock(&shmem_swaplist_mutex); 724 BUG_ON(page_mapped(page)); 725 swap_writepage(page, wbc); 726 return 0; 727 } 728 729 mutex_unlock(&shmem_swaplist_mutex); 730 swapcache_free(swap, NULL); 731redirty: 732 set_page_dirty(page); 733 if (wbc->for_reclaim) 734 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 735 unlock_page(page); 736 return 0; 737} 738 739#ifdef CONFIG_NUMA 740#ifdef CONFIG_TMPFS 741static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 742{ 743 char buffer[64]; 744 745 if (!mpol || mpol->mode == MPOL_DEFAULT) 746 return; /* show nothing */ 747 748 mpol_to_str(buffer, sizeof(buffer), mpol, 1); 749 750 seq_printf(seq, ",mpol=%s", buffer); 751} 752 753static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 754{ 755 struct mempolicy *mpol = NULL; 756 if (sbinfo->mpol) { 757 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 758 mpol = sbinfo->mpol; 759 mpol_get(mpol); 760 spin_unlock(&sbinfo->stat_lock); 761 } 762 return mpol; 763} 764#endif /* CONFIG_TMPFS */ 765 766static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 767 struct shmem_inode_info *info, pgoff_t index) 768{ 769 struct mempolicy mpol, *spol; 770 struct vm_area_struct pvma; 771 772 spol = mpol_cond_copy(&mpol, 773 mpol_shared_policy_lookup(&info->policy, index)); 774 775 /* Create a pseudo vma that just contains the policy */ 776 pvma.vm_start = 0; 777 pvma.vm_pgoff = index; 778 pvma.vm_ops = NULL; 779 pvma.vm_policy = spol; 780 return swapin_readahead(swap, gfp, &pvma, 0); 781} 782 783static struct page *shmem_alloc_page(gfp_t gfp, 784 struct shmem_inode_info *info, pgoff_t index) 785{ 786 struct vm_area_struct pvma; 787 788 /* Create a pseudo vma that just contains the policy */ 789 pvma.vm_start = 0; 790 pvma.vm_pgoff = index; 791 pvma.vm_ops = NULL; 792 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 793 794 /* 795 * alloc_page_vma() will drop the shared policy reference 796 */ 797 return alloc_page_vma(gfp, &pvma, 0); 798} 799#else /* !CONFIG_NUMA */ 800#ifdef CONFIG_TMPFS 801static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 802{ 803} 804#endif /* CONFIG_TMPFS */ 805 806static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 807 struct shmem_inode_info *info, pgoff_t index) 808{ 809 return swapin_readahead(swap, gfp, NULL, 0); 810} 811 812static inline struct page *shmem_alloc_page(gfp_t gfp, 813 struct shmem_inode_info *info, pgoff_t index) 814{ 815 return alloc_page(gfp); 816} 817#endif /* CONFIG_NUMA */ 818 819#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 820static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 821{ 822 return NULL; 823} 824#endif 825 826/* 827 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 828 * 829 * If we allocate a new one we do not mark it dirty. That's up to the 830 * vm. If we swap it in we mark it dirty since we also free the swap 831 * entry since a page cannot live in both the swap and page cache 832 */ 833static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 834 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 835{ 836 struct address_space *mapping = inode->i_mapping; 837 struct shmem_inode_info *info; 838 struct shmem_sb_info *sbinfo; 839 struct page *page; 840 swp_entry_t swap; 841 int error; 842 int once = 0; 843 844 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 845 return -EFBIG; 846repeat: 847 swap.val = 0; 848 page = find_lock_page(mapping, index); 849 if (radix_tree_exceptional_entry(page)) { 850 swap = radix_to_swp_entry(page); 851 page = NULL; 852 } 853 854 if (sgp != SGP_WRITE && 855 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 856 error = -EINVAL; 857 goto failed; 858 } 859 860 if (page || (sgp == SGP_READ && !swap.val)) { 861 /* 862 * Once we can get the page lock, it must be uptodate: 863 * if there were an error in reading back from swap, 864 * the page would not be inserted into the filecache. 865 */ 866 BUG_ON(page && !PageUptodate(page)); 867 *pagep = page; 868 return 0; 869 } 870 871 /* 872 * Fast cache lookup did not find it: 873 * bring it back from swap or allocate. 874 */ 875 info = SHMEM_I(inode); 876 sbinfo = SHMEM_SB(inode->i_sb); 877 878 if (swap.val) { 879 /* Look it up and read it in.. */ 880 page = lookup_swap_cache(swap); 881 if (!page) { 882 /* here we actually do the io */ 883 if (fault_type) 884 *fault_type |= VM_FAULT_MAJOR; 885 page = shmem_swapin(swap, gfp, info, index); 886 if (!page) { 887 error = -ENOMEM; 888 goto failed; 889 } 890 } 891 892 /* We have to do this with page locked to prevent races */ 893 lock_page(page); 894 if (!PageUptodate(page)) { 895 error = -EIO; 896 goto failed; 897 } 898 wait_on_page_writeback(page); 899 900 /* Someone may have already done it for us */ 901 if (page->mapping) { 902 if (page->mapping == mapping && 903 page->index == index) 904 goto done; 905 error = -EEXIST; 906 goto failed; 907 } 908 909 error = mem_cgroup_cache_charge(page, current->mm, 910 gfp & GFP_RECLAIM_MASK); 911 if (!error) 912 error = shmem_add_to_page_cache(page, mapping, index, 913 gfp, swp_to_radix_entry(swap)); 914 if (error) 915 goto failed; 916 917 spin_lock(&info->lock); 918 info->swapped--; 919 shmem_recalc_inode(inode); 920 spin_unlock(&info->lock); 921 922 delete_from_swap_cache(page); 923 set_page_dirty(page); 924 swap_free(swap); 925 926 } else { 927 if (shmem_acct_block(info->flags)) { 928 error = -ENOSPC; 929 goto failed; 930 } 931 if (sbinfo->max_blocks) { 932 if (percpu_counter_compare(&sbinfo->used_blocks, 933 sbinfo->max_blocks) >= 0) { 934 error = -ENOSPC; 935 goto unacct; 936 } 937 percpu_counter_inc(&sbinfo->used_blocks); 938 } 939 940 page = shmem_alloc_page(gfp, info, index); 941 if (!page) { 942 error = -ENOMEM; 943 goto decused; 944 } 945 946 SetPageSwapBacked(page); 947 __set_page_locked(page); 948 error = mem_cgroup_cache_charge(page, current->mm, 949 gfp & GFP_RECLAIM_MASK); 950 if (!error) 951 error = shmem_add_to_page_cache(page, mapping, index, 952 gfp, NULL); 953 if (error) 954 goto decused; 955 lru_cache_add_anon(page); 956 957 spin_lock(&info->lock); 958 info->alloced++; 959 inode->i_blocks += BLOCKS_PER_PAGE; 960 shmem_recalc_inode(inode); 961 spin_unlock(&info->lock); 962 963 clear_highpage(page); 964 flush_dcache_page(page); 965 SetPageUptodate(page); 966 if (sgp == SGP_DIRTY) 967 set_page_dirty(page); 968 } 969done: 970 /* Perhaps the file has been truncated since we checked */ 971 if (sgp != SGP_WRITE && 972 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 973 error = -EINVAL; 974 goto trunc; 975 } 976 *pagep = page; 977 return 0; 978 979 /* 980 * Error recovery. 981 */ 982trunc: 983 ClearPageDirty(page); 984 delete_from_page_cache(page); 985 spin_lock(&info->lock); 986 info->alloced--; 987 inode->i_blocks -= BLOCKS_PER_PAGE; 988 spin_unlock(&info->lock); 989decused: 990 if (sbinfo->max_blocks) 991 percpu_counter_add(&sbinfo->used_blocks, -1); 992unacct: 993 shmem_unacct_blocks(info->flags, 1); 994failed: 995 if (swap.val && error != -EINVAL) { 996 struct page *test = find_get_page(mapping, index); 997 if (test && !radix_tree_exceptional_entry(test)) 998 page_cache_release(test); 999 /* Have another try if the entry has changed */ 1000 if (test != swp_to_radix_entry(swap)) 1001 error = -EEXIST; 1002 } 1003 if (page) { 1004 unlock_page(page); 1005 page_cache_release(page); 1006 } 1007 if (error == -ENOSPC && !once++) { 1008 info = SHMEM_I(inode); 1009 spin_lock(&info->lock); 1010 shmem_recalc_inode(inode); 1011 spin_unlock(&info->lock); 1012 goto repeat; 1013 } 1014 if (error == -EEXIST) 1015 goto repeat; 1016 return error; 1017} 1018 1019static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1020{ 1021 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1022 int error; 1023 int ret = VM_FAULT_LOCKED; 1024 1025 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1026 if (error) 1027 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1028 1029 if (ret & VM_FAULT_MAJOR) { 1030 count_vm_event(PGMAJFAULT); 1031 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1032 } 1033 return ret; 1034} 1035 1036#ifdef CONFIG_NUMA 1037static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1038{ 1039 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1040 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1041} 1042 1043static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1044 unsigned long addr) 1045{ 1046 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1047 pgoff_t index; 1048 1049 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1050 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1051} 1052#endif 1053 1054int shmem_lock(struct file *file, int lock, struct user_struct *user) 1055{ 1056 struct inode *inode = file->f_path.dentry->d_inode; 1057 struct shmem_inode_info *info = SHMEM_I(inode); 1058 int retval = -ENOMEM; 1059 1060 spin_lock(&info->lock); 1061 if (lock && !(info->flags & VM_LOCKED)) { 1062 if (!user_shm_lock(inode->i_size, user)) 1063 goto out_nomem; 1064 info->flags |= VM_LOCKED; 1065 mapping_set_unevictable(file->f_mapping); 1066 } 1067 if (!lock && (info->flags & VM_LOCKED) && user) { 1068 user_shm_unlock(inode->i_size, user); 1069 info->flags &= ~VM_LOCKED; 1070 mapping_clear_unevictable(file->f_mapping); 1071 scan_mapping_unevictable_pages(file->f_mapping); 1072 } 1073 retval = 0; 1074 1075out_nomem: 1076 spin_unlock(&info->lock); 1077 return retval; 1078} 1079 1080static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1081{ 1082 file_accessed(file); 1083 vma->vm_ops = &shmem_vm_ops; 1084 vma->vm_flags |= VM_CAN_NONLINEAR; 1085 return 0; 1086} 1087 1088static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1089 int mode, dev_t dev, unsigned long flags) 1090{ 1091 struct inode *inode; 1092 struct shmem_inode_info *info; 1093 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1094 1095 if (shmem_reserve_inode(sb)) 1096 return NULL; 1097 1098 inode = new_inode(sb); 1099 if (inode) { 1100 inode->i_ino = get_next_ino(); 1101 inode_init_owner(inode, dir, mode); 1102 inode->i_blocks = 0; 1103 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1104 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1105 inode->i_generation = get_seconds(); 1106 info = SHMEM_I(inode); 1107 memset(info, 0, (char *)inode - (char *)info); 1108 spin_lock_init(&info->lock); 1109 info->flags = flags & VM_NORESERVE; 1110 INIT_LIST_HEAD(&info->swaplist); 1111 INIT_LIST_HEAD(&info->xattr_list); 1112 cache_no_acl(inode); 1113 1114 switch (mode & S_IFMT) { 1115 default: 1116 inode->i_op = &shmem_special_inode_operations; 1117 init_special_inode(inode, mode, dev); 1118 break; 1119 case S_IFREG: 1120 inode->i_mapping->a_ops = &shmem_aops; 1121 inode->i_op = &shmem_inode_operations; 1122 inode->i_fop = &shmem_file_operations; 1123 mpol_shared_policy_init(&info->policy, 1124 shmem_get_sbmpol(sbinfo)); 1125 break; 1126 case S_IFDIR: 1127 inc_nlink(inode); 1128 /* Some things misbehave if size == 0 on a directory */ 1129 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1130 inode->i_op = &shmem_dir_inode_operations; 1131 inode->i_fop = &simple_dir_operations; 1132 break; 1133 case S_IFLNK: 1134 /* 1135 * Must not load anything in the rbtree, 1136 * mpol_free_shared_policy will not be called. 1137 */ 1138 mpol_shared_policy_init(&info->policy, NULL); 1139 break; 1140 } 1141 } else 1142 shmem_free_inode(sb); 1143 return inode; 1144} 1145 1146#ifdef CONFIG_TMPFS 1147static const struct inode_operations shmem_symlink_inode_operations; 1148static const struct inode_operations shmem_short_symlink_operations; 1149 1150static int 1151shmem_write_begin(struct file *file, struct address_space *mapping, 1152 loff_t pos, unsigned len, unsigned flags, 1153 struct page **pagep, void **fsdata) 1154{ 1155 struct inode *inode = mapping->host; 1156 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1157 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1158} 1159 1160static int 1161shmem_write_end(struct file *file, struct address_space *mapping, 1162 loff_t pos, unsigned len, unsigned copied, 1163 struct page *page, void *fsdata) 1164{ 1165 struct inode *inode = mapping->host; 1166 1167 if (pos + copied > inode->i_size) 1168 i_size_write(inode, pos + copied); 1169 1170 set_page_dirty(page); 1171 unlock_page(page); 1172 page_cache_release(page); 1173 1174 return copied; 1175} 1176 1177static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1178{ 1179 struct inode *inode = filp->f_path.dentry->d_inode; 1180 struct address_space *mapping = inode->i_mapping; 1181 pgoff_t index; 1182 unsigned long offset; 1183 enum sgp_type sgp = SGP_READ; 1184 1185 /* 1186 * Might this read be for a stacking filesystem? Then when reading 1187 * holes of a sparse file, we actually need to allocate those pages, 1188 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1189 */ 1190 if (segment_eq(get_fs(), KERNEL_DS)) 1191 sgp = SGP_DIRTY; 1192 1193 index = *ppos >> PAGE_CACHE_SHIFT; 1194 offset = *ppos & ~PAGE_CACHE_MASK; 1195 1196 for (;;) { 1197 struct page *page = NULL; 1198 pgoff_t end_index; 1199 unsigned long nr, ret; 1200 loff_t i_size = i_size_read(inode); 1201 1202 end_index = i_size >> PAGE_CACHE_SHIFT; 1203 if (index > end_index) 1204 break; 1205 if (index == end_index) { 1206 nr = i_size & ~PAGE_CACHE_MASK; 1207 if (nr <= offset) 1208 break; 1209 } 1210 1211 desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 1212 if (desc->error) { 1213 if (desc->error == -EINVAL) 1214 desc->error = 0; 1215 break; 1216 } 1217 if (page) 1218 unlock_page(page); 1219 1220 /* 1221 * We must evaluate after, since reads (unlike writes) 1222 * are called without i_mutex protection against truncate 1223 */ 1224 nr = PAGE_CACHE_SIZE; 1225 i_size = i_size_read(inode); 1226 end_index = i_size >> PAGE_CACHE_SHIFT; 1227 if (index == end_index) { 1228 nr = i_size & ~PAGE_CACHE_MASK; 1229 if (nr <= offset) { 1230 if (page) 1231 page_cache_release(page); 1232 break; 1233 } 1234 } 1235 nr -= offset; 1236 1237 if (page) { 1238 /* 1239 * If users can be writing to this page using arbitrary 1240 * virtual addresses, take care about potential aliasing 1241 * before reading the page on the kernel side. 1242 */ 1243 if (mapping_writably_mapped(mapping)) 1244 flush_dcache_page(page); 1245 /* 1246 * Mark the page accessed if we read the beginning. 1247 */ 1248 if (!offset) 1249 mark_page_accessed(page); 1250 } else { 1251 page = ZERO_PAGE(0); 1252 page_cache_get(page); 1253 } 1254 1255 /* 1256 * Ok, we have the page, and it's up-to-date, so 1257 * now we can copy it to user space... 1258 * 1259 * The actor routine returns how many bytes were actually used.. 1260 * NOTE! This may not be the same as how much of a user buffer 1261 * we filled up (we may be padding etc), so we can only update 1262 * "pos" here (the actor routine has to update the user buffer 1263 * pointers and the remaining count). 1264 */ 1265 ret = actor(desc, page, offset, nr); 1266 offset += ret; 1267 index += offset >> PAGE_CACHE_SHIFT; 1268 offset &= ~PAGE_CACHE_MASK; 1269 1270 page_cache_release(page); 1271 if (ret != nr || !desc->count) 1272 break; 1273 1274 cond_resched(); 1275 } 1276 1277 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1278 file_accessed(filp); 1279} 1280 1281static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1282 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1283{ 1284 struct file *filp = iocb->ki_filp; 1285 ssize_t retval; 1286 unsigned long seg; 1287 size_t count; 1288 loff_t *ppos = &iocb->ki_pos; 1289 1290 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1291 if (retval) 1292 return retval; 1293 1294 for (seg = 0; seg < nr_segs; seg++) { 1295 read_descriptor_t desc; 1296 1297 desc.written = 0; 1298 desc.arg.buf = iov[seg].iov_base; 1299 desc.count = iov[seg].iov_len; 1300 if (desc.count == 0) 1301 continue; 1302 desc.error = 0; 1303 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1304 retval += desc.written; 1305 if (desc.error) { 1306 retval = retval ?: desc.error; 1307 break; 1308 } 1309 if (desc.count > 0) 1310 break; 1311 } 1312 return retval; 1313} 1314 1315static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1316 struct pipe_inode_info *pipe, size_t len, 1317 unsigned int flags) 1318{ 1319 struct address_space *mapping = in->f_mapping; 1320 struct inode *inode = mapping->host; 1321 unsigned int loff, nr_pages, req_pages; 1322 struct page *pages[PIPE_DEF_BUFFERS]; 1323 struct partial_page partial[PIPE_DEF_BUFFERS]; 1324 struct page *page; 1325 pgoff_t index, end_index; 1326 loff_t isize, left; 1327 int error, page_nr; 1328 struct splice_pipe_desc spd = { 1329 .pages = pages, 1330 .partial = partial, 1331 .flags = flags, 1332 .ops = &page_cache_pipe_buf_ops, 1333 .spd_release = spd_release_page, 1334 }; 1335 1336 isize = i_size_read(inode); 1337 if (unlikely(*ppos >= isize)) 1338 return 0; 1339 1340 left = isize - *ppos; 1341 if (unlikely(left < len)) 1342 len = left; 1343 1344 if (splice_grow_spd(pipe, &spd)) 1345 return -ENOMEM; 1346 1347 index = *ppos >> PAGE_CACHE_SHIFT; 1348 loff = *ppos & ~PAGE_CACHE_MASK; 1349 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1350 nr_pages = min(req_pages, pipe->buffers); 1351 1352 spd.nr_pages = find_get_pages_contig(mapping, index, 1353 nr_pages, spd.pages); 1354 index += spd.nr_pages; 1355 error = 0; 1356 1357 while (spd.nr_pages < nr_pages) { 1358 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1359 if (error) 1360 break; 1361 unlock_page(page); 1362 spd.pages[spd.nr_pages++] = page; 1363 index++; 1364 } 1365 1366 index = *ppos >> PAGE_CACHE_SHIFT; 1367 nr_pages = spd.nr_pages; 1368 spd.nr_pages = 0; 1369 1370 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1371 unsigned int this_len; 1372 1373 if (!len) 1374 break; 1375 1376 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1377 page = spd.pages[page_nr]; 1378 1379 if (!PageUptodate(page) || page->mapping != mapping) { 1380 error = shmem_getpage(inode, index, &page, 1381 SGP_CACHE, NULL); 1382 if (error) 1383 break; 1384 unlock_page(page); 1385 page_cache_release(spd.pages[page_nr]); 1386 spd.pages[page_nr] = page; 1387 } 1388 1389 isize = i_size_read(inode); 1390 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1391 if (unlikely(!isize || index > end_index)) 1392 break; 1393 1394 if (end_index == index) { 1395 unsigned int plen; 1396 1397 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1398 if (plen <= loff) 1399 break; 1400 1401 this_len = min(this_len, plen - loff); 1402 len = this_len; 1403 } 1404 1405 spd.partial[page_nr].offset = loff; 1406 spd.partial[page_nr].len = this_len; 1407 len -= this_len; 1408 loff = 0; 1409 spd.nr_pages++; 1410 index++; 1411 } 1412 1413 while (page_nr < nr_pages) 1414 page_cache_release(spd.pages[page_nr++]); 1415 1416 if (spd.nr_pages) 1417 error = splice_to_pipe(pipe, &spd); 1418 1419 splice_shrink_spd(pipe, &spd); 1420 1421 if (error > 0) { 1422 *ppos += error; 1423 file_accessed(in); 1424 } 1425 return error; 1426} 1427 1428static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1429{ 1430 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1431 1432 buf->f_type = TMPFS_MAGIC; 1433 buf->f_bsize = PAGE_CACHE_SIZE; 1434 buf->f_namelen = NAME_MAX; 1435 if (sbinfo->max_blocks) { 1436 buf->f_blocks = sbinfo->max_blocks; 1437 buf->f_bavail = 1438 buf->f_bfree = sbinfo->max_blocks - 1439 percpu_counter_sum(&sbinfo->used_blocks); 1440 } 1441 if (sbinfo->max_inodes) { 1442 buf->f_files = sbinfo->max_inodes; 1443 buf->f_ffree = sbinfo->free_inodes; 1444 } 1445 /* else leave those fields 0 like simple_statfs */ 1446 return 0; 1447} 1448 1449/* 1450 * File creation. Allocate an inode, and we're done.. 1451 */ 1452static int 1453shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1454{ 1455 struct inode *inode; 1456 int error = -ENOSPC; 1457 1458 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1459 if (inode) { 1460 error = security_inode_init_security(inode, dir, 1461 &dentry->d_name, NULL, 1462 NULL, NULL); 1463 if (error) { 1464 if (error != -EOPNOTSUPP) { 1465 iput(inode); 1466 return error; 1467 } 1468 } 1469#ifdef CONFIG_TMPFS_POSIX_ACL 1470 error = generic_acl_init(inode, dir); 1471 if (error) { 1472 iput(inode); 1473 return error; 1474 } 1475#else 1476 error = 0; 1477#endif 1478 dir->i_size += BOGO_DIRENT_SIZE; 1479 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1480 d_instantiate(dentry, inode); 1481 dget(dentry); /* Extra count - pin the dentry in core */ 1482 } 1483 return error; 1484} 1485 1486static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1487{ 1488 int error; 1489 1490 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1491 return error; 1492 inc_nlink(dir); 1493 return 0; 1494} 1495 1496static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, 1497 struct nameidata *nd) 1498{ 1499 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1500} 1501 1502/* 1503 * Link a file.. 1504 */ 1505static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1506{ 1507 struct inode *inode = old_dentry->d_inode; 1508 int ret; 1509 1510 /* 1511 * No ordinary (disk based) filesystem counts links as inodes; 1512 * but each new link needs a new dentry, pinning lowmem, and 1513 * tmpfs dentries cannot be pruned until they are unlinked. 1514 */ 1515 ret = shmem_reserve_inode(inode->i_sb); 1516 if (ret) 1517 goto out; 1518 1519 dir->i_size += BOGO_DIRENT_SIZE; 1520 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1521 inc_nlink(inode); 1522 ihold(inode); /* New dentry reference */ 1523 dget(dentry); /* Extra pinning count for the created dentry */ 1524 d_instantiate(dentry, inode); 1525out: 1526 return ret; 1527} 1528 1529static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1530{ 1531 struct inode *inode = dentry->d_inode; 1532 1533 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 1534 shmem_free_inode(inode->i_sb); 1535 1536 dir->i_size -= BOGO_DIRENT_SIZE; 1537 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1538 drop_nlink(inode); 1539 dput(dentry); /* Undo the count from "create" - this does all the work */ 1540 return 0; 1541} 1542 1543static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1544{ 1545 if (!simple_empty(dentry)) 1546 return -ENOTEMPTY; 1547 1548 drop_nlink(dentry->d_inode); 1549 drop_nlink(dir); 1550 return shmem_unlink(dir, dentry); 1551} 1552 1553/* 1554 * The VFS layer already does all the dentry stuff for rename, 1555 * we just have to decrement the usage count for the target if 1556 * it exists so that the VFS layer correctly free's it when it 1557 * gets overwritten. 1558 */ 1559static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1560{ 1561 struct inode *inode = old_dentry->d_inode; 1562 int they_are_dirs = S_ISDIR(inode->i_mode); 1563 1564 if (!simple_empty(new_dentry)) 1565 return -ENOTEMPTY; 1566 1567 if (new_dentry->d_inode) { 1568 (void) shmem_unlink(new_dir, new_dentry); 1569 if (they_are_dirs) 1570 drop_nlink(old_dir); 1571 } else if (they_are_dirs) { 1572 drop_nlink(old_dir); 1573 inc_nlink(new_dir); 1574 } 1575 1576 old_dir->i_size -= BOGO_DIRENT_SIZE; 1577 new_dir->i_size += BOGO_DIRENT_SIZE; 1578 old_dir->i_ctime = old_dir->i_mtime = 1579 new_dir->i_ctime = new_dir->i_mtime = 1580 inode->i_ctime = CURRENT_TIME; 1581 return 0; 1582} 1583 1584static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1585{ 1586 int error; 1587 int len; 1588 struct inode *inode; 1589 struct page *page; 1590 char *kaddr; 1591 struct shmem_inode_info *info; 1592 1593 len = strlen(symname) + 1; 1594 if (len > PAGE_CACHE_SIZE) 1595 return -ENAMETOOLONG; 1596 1597 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 1598 if (!inode) 1599 return -ENOSPC; 1600 1601 error = security_inode_init_security(inode, dir, &dentry->d_name, NULL, 1602 NULL, NULL); 1603 if (error) { 1604 if (error != -EOPNOTSUPP) { 1605 iput(inode); 1606 return error; 1607 } 1608 error = 0; 1609 } 1610 1611 info = SHMEM_I(inode); 1612 inode->i_size = len-1; 1613 if (len <= SHORT_SYMLINK_LEN) { 1614 info->symlink = kmemdup(symname, len, GFP_KERNEL); 1615 if (!info->symlink) { 1616 iput(inode); 1617 return -ENOMEM; 1618 } 1619 inode->i_op = &shmem_short_symlink_operations; 1620 } else { 1621 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 1622 if (error) { 1623 iput(inode); 1624 return error; 1625 } 1626 inode->i_mapping->a_ops = &shmem_aops; 1627 inode->i_op = &shmem_symlink_inode_operations; 1628 kaddr = kmap_atomic(page, KM_USER0); 1629 memcpy(kaddr, symname, len); 1630 kunmap_atomic(kaddr, KM_USER0); 1631 set_page_dirty(page); 1632 unlock_page(page); 1633 page_cache_release(page); 1634 } 1635 dir->i_size += BOGO_DIRENT_SIZE; 1636 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1637 d_instantiate(dentry, inode); 1638 dget(dentry); 1639 return 0; 1640} 1641 1642static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 1643{ 1644 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 1645 return NULL; 1646} 1647 1648static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 1649{ 1650 struct page *page = NULL; 1651 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 1652 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 1653 if (page) 1654 unlock_page(page); 1655 return page; 1656} 1657 1658static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 1659{ 1660 if (!IS_ERR(nd_get_link(nd))) { 1661 struct page *page = cookie; 1662 kunmap(page); 1663 mark_page_accessed(page); 1664 page_cache_release(page); 1665 } 1666} 1667 1668#ifdef CONFIG_TMPFS_XATTR 1669/* 1670 * Superblocks without xattr inode operations may get some security.* xattr 1671 * support from the LSM "for free". As soon as we have any other xattrs 1672 * like ACLs, we also need to implement the security.* handlers at 1673 * filesystem level, though. 1674 */ 1675 1676static int shmem_xattr_get(struct dentry *dentry, const char *name, 1677 void *buffer, size_t size) 1678{ 1679 struct shmem_inode_info *info; 1680 struct shmem_xattr *xattr; 1681 int ret = -ENODATA; 1682 1683 info = SHMEM_I(dentry->d_inode); 1684 1685 spin_lock(&info->lock); 1686 list_for_each_entry(xattr, &info->xattr_list, list) { 1687 if (strcmp(name, xattr->name)) 1688 continue; 1689 1690 ret = xattr->size; 1691 if (buffer) { 1692 if (size < xattr->size) 1693 ret = -ERANGE; 1694 else 1695 memcpy(buffer, xattr->value, xattr->size); 1696 } 1697 break; 1698 } 1699 spin_unlock(&info->lock); 1700 return ret; 1701} 1702 1703static int shmem_xattr_set(struct dentry *dentry, const char *name, 1704 const void *value, size_t size, int flags) 1705{ 1706 struct inode *inode = dentry->d_inode; 1707 struct shmem_inode_info *info = SHMEM_I(inode); 1708 struct shmem_xattr *xattr; 1709 struct shmem_xattr *new_xattr = NULL; 1710 size_t len; 1711 int err = 0; 1712 1713 /* value == NULL means remove */ 1714 if (value) { 1715 /* wrap around? */ 1716 len = sizeof(*new_xattr) + size; 1717 if (len <= sizeof(*new_xattr)) 1718 return -ENOMEM; 1719 1720 new_xattr = kmalloc(len, GFP_KERNEL); 1721 if (!new_xattr) 1722 return -ENOMEM; 1723 1724 new_xattr->name = kstrdup(name, GFP_KERNEL); 1725 if (!new_xattr->name) { 1726 kfree(new_xattr); 1727 return -ENOMEM; 1728 } 1729 1730 new_xattr->size = size; 1731 memcpy(new_xattr->value, value, size); 1732 } 1733 1734 spin_lock(&info->lock); 1735 list_for_each_entry(xattr, &info->xattr_list, list) { 1736 if (!strcmp(name, xattr->name)) { 1737 if (flags & XATTR_CREATE) { 1738 xattr = new_xattr; 1739 err = -EEXIST; 1740 } else if (new_xattr) { 1741 list_replace(&xattr->list, &new_xattr->list); 1742 } else { 1743 list_del(&xattr->list); 1744 } 1745 goto out; 1746 } 1747 } 1748 if (flags & XATTR_REPLACE) { 1749 xattr = new_xattr; 1750 err = -ENODATA; 1751 } else { 1752 list_add(&new_xattr->list, &info->xattr_list); 1753 xattr = NULL; 1754 } 1755out: 1756 spin_unlock(&info->lock); 1757 if (xattr) 1758 kfree(xattr->name); 1759 kfree(xattr); 1760 return err; 1761} 1762 1763static const struct xattr_handler *shmem_xattr_handlers[] = { 1764#ifdef CONFIG_TMPFS_POSIX_ACL 1765 &generic_acl_access_handler, 1766 &generic_acl_default_handler, 1767#endif 1768 NULL 1769}; 1770 1771static int shmem_xattr_validate(const char *name) 1772{ 1773 struct { const char *prefix; size_t len; } arr[] = { 1774 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 1775 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 1776 }; 1777 int i; 1778 1779 for (i = 0; i < ARRAY_SIZE(arr); i++) { 1780 size_t preflen = arr[i].len; 1781 if (strncmp(name, arr[i].prefix, preflen) == 0) { 1782 if (!name[preflen]) 1783 return -EINVAL; 1784 return 0; 1785 } 1786 } 1787 return -EOPNOTSUPP; 1788} 1789 1790static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 1791 void *buffer, size_t size) 1792{ 1793 int err; 1794 1795 /* 1796 * If this is a request for a synthetic attribute in the system.* 1797 * namespace use the generic infrastructure to resolve a handler 1798 * for it via sb->s_xattr. 1799 */ 1800 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 1801 return generic_getxattr(dentry, name, buffer, size); 1802 1803 err = shmem_xattr_validate(name); 1804 if (err) 1805 return err; 1806 1807 return shmem_xattr_get(dentry, name, buffer, size); 1808} 1809 1810static int shmem_setxattr(struct dentry *dentry, const char *name, 1811 const void *value, size_t size, int flags) 1812{ 1813 int err; 1814 1815 /* 1816 * If this is a request for a synthetic attribute in the system.* 1817 * namespace use the generic infrastructure to resolve a handler 1818 * for it via sb->s_xattr. 1819 */ 1820 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 1821 return generic_setxattr(dentry, name, value, size, flags); 1822 1823 err = shmem_xattr_validate(name); 1824 if (err) 1825 return err; 1826 1827 if (size == 0) 1828 value = ""; /* empty EA, do not remove */ 1829 1830 return shmem_xattr_set(dentry, name, value, size, flags); 1831 1832} 1833 1834static int shmem_removexattr(struct dentry *dentry, const char *name) 1835{ 1836 int err; 1837 1838 /* 1839 * If this is a request for a synthetic attribute in the system.* 1840 * namespace use the generic infrastructure to resolve a handler 1841 * for it via sb->s_xattr. 1842 */ 1843 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 1844 return generic_removexattr(dentry, name); 1845 1846 err = shmem_xattr_validate(name); 1847 if (err) 1848 return err; 1849 1850 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE); 1851} 1852 1853static bool xattr_is_trusted(const char *name) 1854{ 1855 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); 1856} 1857 1858static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 1859{ 1860 bool trusted = capable(CAP_SYS_ADMIN); 1861 struct shmem_xattr *xattr; 1862 struct shmem_inode_info *info; 1863 size_t used = 0; 1864 1865 info = SHMEM_I(dentry->d_inode); 1866 1867 spin_lock(&info->lock); 1868 list_for_each_entry(xattr, &info->xattr_list, list) { 1869 size_t len; 1870 1871 /* skip "trusted." attributes for unprivileged callers */ 1872 if (!trusted && xattr_is_trusted(xattr->name)) 1873 continue; 1874 1875 len = strlen(xattr->name) + 1; 1876 used += len; 1877 if (buffer) { 1878 if (size < used) { 1879 used = -ERANGE; 1880 break; 1881 } 1882 memcpy(buffer, xattr->name, len); 1883 buffer += len; 1884 } 1885 } 1886 spin_unlock(&info->lock); 1887 1888 return used; 1889} 1890#endif /* CONFIG_TMPFS_XATTR */ 1891 1892static const struct inode_operations shmem_short_symlink_operations = { 1893 .readlink = generic_readlink, 1894 .follow_link = shmem_follow_short_symlink, 1895#ifdef CONFIG_TMPFS_XATTR 1896 .setxattr = shmem_setxattr, 1897 .getxattr = shmem_getxattr, 1898 .listxattr = shmem_listxattr, 1899 .removexattr = shmem_removexattr, 1900#endif 1901}; 1902 1903static const struct inode_operations shmem_symlink_inode_operations = { 1904 .readlink = generic_readlink, 1905 .follow_link = shmem_follow_link, 1906 .put_link = shmem_put_link, 1907#ifdef CONFIG_TMPFS_XATTR 1908 .setxattr = shmem_setxattr, 1909 .getxattr = shmem_getxattr, 1910 .listxattr = shmem_listxattr, 1911 .removexattr = shmem_removexattr, 1912#endif 1913}; 1914 1915static struct dentry *shmem_get_parent(struct dentry *child) 1916{ 1917 return ERR_PTR(-ESTALE); 1918} 1919 1920static int shmem_match(struct inode *ino, void *vfh) 1921{ 1922 __u32 *fh = vfh; 1923 __u64 inum = fh[2]; 1924 inum = (inum << 32) | fh[1]; 1925 return ino->i_ino == inum && fh[0] == ino->i_generation; 1926} 1927 1928static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 1929 struct fid *fid, int fh_len, int fh_type) 1930{ 1931 struct inode *inode; 1932 struct dentry *dentry = NULL; 1933 u64 inum = fid->raw[2]; 1934 inum = (inum << 32) | fid->raw[1]; 1935 1936 if (fh_len < 3) 1937 return NULL; 1938 1939 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 1940 shmem_match, fid->raw); 1941 if (inode) { 1942 dentry = d_find_alias(inode); 1943 iput(inode); 1944 } 1945 1946 return dentry; 1947} 1948 1949static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 1950 int connectable) 1951{ 1952 struct inode *inode = dentry->d_inode; 1953 1954 if (*len < 3) { 1955 *len = 3; 1956 return 255; 1957 } 1958 1959 if (inode_unhashed(inode)) { 1960 /* Unfortunately insert_inode_hash is not idempotent, 1961 * so as we hash inodes here rather than at creation 1962 * time, we need a lock to ensure we only try 1963 * to do it once 1964 */ 1965 static DEFINE_SPINLOCK(lock); 1966 spin_lock(&lock); 1967 if (inode_unhashed(inode)) 1968 __insert_inode_hash(inode, 1969 inode->i_ino + inode->i_generation); 1970 spin_unlock(&lock); 1971 } 1972 1973 fh[0] = inode->i_generation; 1974 fh[1] = inode->i_ino; 1975 fh[2] = ((__u64)inode->i_ino) >> 32; 1976 1977 *len = 3; 1978 return 1; 1979} 1980 1981static const struct export_operations shmem_export_ops = { 1982 .get_parent = shmem_get_parent, 1983 .encode_fh = shmem_encode_fh, 1984 .fh_to_dentry = shmem_fh_to_dentry, 1985}; 1986 1987static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 1988 bool remount) 1989{ 1990 char *this_char, *value, *rest; 1991 1992 while (options != NULL) { 1993 this_char = options; 1994 for (;;) { 1995 /* 1996 * NUL-terminate this option: unfortunately, 1997 * mount options form a comma-separated list, 1998 * but mpol's nodelist may also contain commas. 1999 */ 2000 options = strchr(options, ','); 2001 if (options == NULL) 2002 break; 2003 options++; 2004 if (!isdigit(*options)) { 2005 options[-1] = '\0'; 2006 break; 2007 } 2008 } 2009 if (!*this_char) 2010 continue; 2011 if ((value = strchr(this_char,'=')) != NULL) { 2012 *value++ = 0; 2013 } else { 2014 printk(KERN_ERR 2015 "tmpfs: No value for mount option '%s'\n", 2016 this_char); 2017 return 1; 2018 } 2019 2020 if (!strcmp(this_char,"size")) { 2021 unsigned long long size; 2022 size = memparse(value,&rest); 2023 if (*rest == '%') { 2024 size <<= PAGE_SHIFT; 2025 size *= totalram_pages; 2026 do_div(size, 100); 2027 rest++; 2028 } 2029 if (*rest) 2030 goto bad_val; 2031 sbinfo->max_blocks = 2032 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2033 } else if (!strcmp(this_char,"nr_blocks")) { 2034 sbinfo->max_blocks = memparse(value, &rest); 2035 if (*rest) 2036 goto bad_val; 2037 } else if (!strcmp(this_char,"nr_inodes")) { 2038 sbinfo->max_inodes = memparse(value, &rest); 2039 if (*rest) 2040 goto bad_val; 2041 } else if (!strcmp(this_char,"mode")) { 2042 if (remount) 2043 continue; 2044 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2045 if (*rest) 2046 goto bad_val; 2047 } else if (!strcmp(this_char,"uid")) { 2048 if (remount) 2049 continue; 2050 sbinfo->uid = simple_strtoul(value, &rest, 0); 2051 if (*rest) 2052 goto bad_val; 2053 } else if (!strcmp(this_char,"gid")) { 2054 if (remount) 2055 continue; 2056 sbinfo->gid = simple_strtoul(value, &rest, 0); 2057 if (*rest) 2058 goto bad_val; 2059 } else if (!strcmp(this_char,"mpol")) { 2060 if (mpol_parse_str(value, &sbinfo->mpol, 1)) 2061 goto bad_val; 2062 } else { 2063 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2064 this_char); 2065 return 1; 2066 } 2067 } 2068 return 0; 2069 2070bad_val: 2071 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2072 value, this_char); 2073 return 1; 2074 2075} 2076 2077static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2078{ 2079 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2080 struct shmem_sb_info config = *sbinfo; 2081 unsigned long inodes; 2082 int error = -EINVAL; 2083 2084 if (shmem_parse_options(data, &config, true)) 2085 return error; 2086 2087 spin_lock(&sbinfo->stat_lock); 2088 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2089 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2090 goto out; 2091 if (config.max_inodes < inodes) 2092 goto out; 2093 /* 2094 * Those tests disallow limited->unlimited while any are in use; 2095 * but we must separately disallow unlimited->limited, because 2096 * in that case we have no record of how much is already in use. 2097 */ 2098 if (config.max_blocks && !sbinfo->max_blocks) 2099 goto out; 2100 if (config.max_inodes && !sbinfo->max_inodes) 2101 goto out; 2102 2103 error = 0; 2104 sbinfo->max_blocks = config.max_blocks; 2105 sbinfo->max_inodes = config.max_inodes; 2106 sbinfo->free_inodes = config.max_inodes - inodes; 2107 2108 mpol_put(sbinfo->mpol); 2109 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2110out: 2111 spin_unlock(&sbinfo->stat_lock); 2112 return error; 2113} 2114 2115static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) 2116{ 2117 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); 2118 2119 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2120 seq_printf(seq, ",size=%luk", 2121 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2122 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2123 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2124 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2125 seq_printf(seq, ",mode=%03o", sbinfo->mode); 2126 if (sbinfo->uid != 0) 2127 seq_printf(seq, ",uid=%u", sbinfo->uid); 2128 if (sbinfo->gid != 0) 2129 seq_printf(seq, ",gid=%u", sbinfo->gid); 2130 shmem_show_mpol(seq, sbinfo->mpol); 2131 return 0; 2132} 2133#endif /* CONFIG_TMPFS */ 2134 2135static void shmem_put_super(struct super_block *sb) 2136{ 2137 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2138 2139 percpu_counter_destroy(&sbinfo->used_blocks); 2140 kfree(sbinfo); 2141 sb->s_fs_info = NULL; 2142} 2143 2144int shmem_fill_super(struct super_block *sb, void *data, int silent) 2145{ 2146 struct inode *inode; 2147 struct dentry *root; 2148 struct shmem_sb_info *sbinfo; 2149 int err = -ENOMEM; 2150 2151 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2152 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2153 L1_CACHE_BYTES), GFP_KERNEL); 2154 if (!sbinfo) 2155 return -ENOMEM; 2156 2157 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2158 sbinfo->uid = current_fsuid(); 2159 sbinfo->gid = current_fsgid(); 2160 sb->s_fs_info = sbinfo; 2161 2162#ifdef CONFIG_TMPFS 2163 /* 2164 * Per default we only allow half of the physical ram per 2165 * tmpfs instance, limiting inodes to one per page of lowmem; 2166 * but the internal instance is left unlimited. 2167 */ 2168 if (!(sb->s_flags & MS_NOUSER)) { 2169 sbinfo->max_blocks = shmem_default_max_blocks(); 2170 sbinfo->max_inodes = shmem_default_max_inodes(); 2171 if (shmem_parse_options(data, sbinfo, false)) { 2172 err = -EINVAL; 2173 goto failed; 2174 } 2175 } 2176 sb->s_export_op = &shmem_export_ops; 2177#else 2178 sb->s_flags |= MS_NOUSER; 2179#endif 2180 2181 spin_lock_init(&sbinfo->stat_lock); 2182 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2183 goto failed; 2184 sbinfo->free_inodes = sbinfo->max_inodes; 2185 2186 sb->s_maxbytes = MAX_LFS_FILESIZE; 2187 sb->s_blocksize = PAGE_CACHE_SIZE; 2188 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2189 sb->s_magic = TMPFS_MAGIC; 2190 sb->s_op = &shmem_ops; 2191 sb->s_time_gran = 1; 2192#ifdef CONFIG_TMPFS_XATTR 2193 sb->s_xattr = shmem_xattr_handlers; 2194#endif 2195#ifdef CONFIG_TMPFS_POSIX_ACL 2196 sb->s_flags |= MS_POSIXACL; 2197#endif 2198 2199 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2200 if (!inode) 2201 goto failed; 2202 inode->i_uid = sbinfo->uid; 2203 inode->i_gid = sbinfo->gid; 2204 root = d_alloc_root(inode); 2205 if (!root) 2206 goto failed_iput; 2207 sb->s_root = root; 2208 return 0; 2209 2210failed_iput: 2211 iput(inode); 2212failed: 2213 shmem_put_super(sb); 2214 return err; 2215} 2216 2217static struct kmem_cache *shmem_inode_cachep; 2218 2219static struct inode *shmem_alloc_inode(struct super_block *sb) 2220{ 2221 struct shmem_inode_info *info; 2222 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2223 if (!info) 2224 return NULL; 2225 return &info->vfs_inode; 2226} 2227 2228static void shmem_destroy_callback(struct rcu_head *head) 2229{ 2230 struct inode *inode = container_of(head, struct inode, i_rcu); 2231 INIT_LIST_HEAD(&inode->i_dentry); 2232 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2233} 2234 2235static void shmem_destroy_inode(struct inode *inode) 2236{ 2237 if ((inode->i_mode & S_IFMT) == S_IFREG) 2238 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2239 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2240} 2241 2242static void shmem_init_inode(void *foo) 2243{ 2244 struct shmem_inode_info *info = foo; 2245 inode_init_once(&info->vfs_inode); 2246} 2247 2248static int shmem_init_inodecache(void) 2249{ 2250 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2251 sizeof(struct shmem_inode_info), 2252 0, SLAB_PANIC, shmem_init_inode); 2253 return 0; 2254} 2255 2256static void shmem_destroy_inodecache(void) 2257{ 2258 kmem_cache_destroy(shmem_inode_cachep); 2259} 2260 2261static const struct address_space_operations shmem_aops = { 2262 .writepage = shmem_writepage, 2263 .set_page_dirty = __set_page_dirty_no_writeback, 2264#ifdef CONFIG_TMPFS 2265 .write_begin = shmem_write_begin, 2266 .write_end = shmem_write_end, 2267#endif 2268 .migratepage = migrate_page, 2269 .error_remove_page = generic_error_remove_page, 2270}; 2271 2272static const struct file_operations shmem_file_operations = { 2273 .mmap = shmem_mmap, 2274#ifdef CONFIG_TMPFS 2275 .llseek = generic_file_llseek, 2276 .read = do_sync_read, 2277 .write = do_sync_write, 2278 .aio_read = shmem_file_aio_read, 2279 .aio_write = generic_file_aio_write, 2280 .fsync = noop_fsync, 2281 .splice_read = shmem_file_splice_read, 2282 .splice_write = generic_file_splice_write, 2283#endif 2284}; 2285 2286static const struct inode_operations shmem_inode_operations = { 2287 .setattr = shmem_setattr, 2288 .truncate_range = shmem_truncate_range, 2289#ifdef CONFIG_TMPFS_XATTR 2290 .setxattr = shmem_setxattr, 2291 .getxattr = shmem_getxattr, 2292 .listxattr = shmem_listxattr, 2293 .removexattr = shmem_removexattr, 2294#endif 2295}; 2296 2297static const struct inode_operations shmem_dir_inode_operations = { 2298#ifdef CONFIG_TMPFS 2299 .create = shmem_create, 2300 .lookup = simple_lookup, 2301 .link = shmem_link, 2302 .unlink = shmem_unlink, 2303 .symlink = shmem_symlink, 2304 .mkdir = shmem_mkdir, 2305 .rmdir = shmem_rmdir, 2306 .mknod = shmem_mknod, 2307 .rename = shmem_rename, 2308#endif 2309#ifdef CONFIG_TMPFS_XATTR 2310 .setxattr = shmem_setxattr, 2311 .getxattr = shmem_getxattr, 2312 .listxattr = shmem_listxattr, 2313 .removexattr = shmem_removexattr, 2314#endif 2315#ifdef CONFIG_TMPFS_POSIX_ACL 2316 .setattr = shmem_setattr, 2317#endif 2318}; 2319 2320static const struct inode_operations shmem_special_inode_operations = { 2321#ifdef CONFIG_TMPFS_XATTR 2322 .setxattr = shmem_setxattr, 2323 .getxattr = shmem_getxattr, 2324 .listxattr = shmem_listxattr, 2325 .removexattr = shmem_removexattr, 2326#endif 2327#ifdef CONFIG_TMPFS_POSIX_ACL 2328 .setattr = shmem_setattr, 2329#endif 2330}; 2331 2332static const struct super_operations shmem_ops = { 2333 .alloc_inode = shmem_alloc_inode, 2334 .destroy_inode = shmem_destroy_inode, 2335#ifdef CONFIG_TMPFS 2336 .statfs = shmem_statfs, 2337 .remount_fs = shmem_remount_fs, 2338 .show_options = shmem_show_options, 2339#endif 2340 .evict_inode = shmem_evict_inode, 2341 .drop_inode = generic_delete_inode, 2342 .put_super = shmem_put_super, 2343}; 2344 2345static const struct vm_operations_struct shmem_vm_ops = { 2346 .fault = shmem_fault, 2347#ifdef CONFIG_NUMA 2348 .set_policy = shmem_set_policy, 2349 .get_policy = shmem_get_policy, 2350#endif 2351}; 2352 2353static struct dentry *shmem_mount(struct file_system_type *fs_type, 2354 int flags, const char *dev_name, void *data) 2355{ 2356 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2357} 2358 2359static struct file_system_type shmem_fs_type = { 2360 .owner = THIS_MODULE, 2361 .name = "tmpfs", 2362 .mount = shmem_mount, 2363 .kill_sb = kill_litter_super, 2364}; 2365 2366int __init shmem_init(void) 2367{ 2368 int error; 2369 2370 error = bdi_init(&shmem_backing_dev_info); 2371 if (error) 2372 goto out4; 2373 2374 error = shmem_init_inodecache(); 2375 if (error) 2376 goto out3; 2377 2378 error = register_filesystem(&shmem_fs_type); 2379 if (error) { 2380 printk(KERN_ERR "Could not register tmpfs\n"); 2381 goto out2; 2382 } 2383 2384 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, 2385 shmem_fs_type.name, NULL); 2386 if (IS_ERR(shm_mnt)) { 2387 error = PTR_ERR(shm_mnt); 2388 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2389 goto out1; 2390 } 2391 return 0; 2392 2393out1: 2394 unregister_filesystem(&shmem_fs_type); 2395out2: 2396 shmem_destroy_inodecache(); 2397out3: 2398 bdi_destroy(&shmem_backing_dev_info); 2399out4: 2400 shm_mnt = ERR_PTR(error); 2401 return error; 2402} 2403 2404#else /* !CONFIG_SHMEM */ 2405 2406/* 2407 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2408 * 2409 * This is intended for small system where the benefits of the full 2410 * shmem code (swap-backed and resource-limited) are outweighed by 2411 * their complexity. On systems without swap this code should be 2412 * effectively equivalent, but much lighter weight. 2413 */ 2414 2415#include <linux/ramfs.h> 2416 2417static struct file_system_type shmem_fs_type = { 2418 .name = "tmpfs", 2419 .mount = ramfs_mount, 2420 .kill_sb = kill_litter_super, 2421}; 2422 2423int __init shmem_init(void) 2424{ 2425 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2426 2427 shm_mnt = kern_mount(&shmem_fs_type); 2428 BUG_ON(IS_ERR(shm_mnt)); 2429 2430 return 0; 2431} 2432 2433int shmem_unuse(swp_entry_t swap, struct page *page) 2434{ 2435 return 0; 2436} 2437 2438int shmem_lock(struct file *file, int lock, struct user_struct *user) 2439{ 2440 return 0; 2441} 2442 2443void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2444{ 2445 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2446} 2447EXPORT_SYMBOL_GPL(shmem_truncate_range); 2448 2449#define shmem_vm_ops generic_file_vm_ops 2450#define shmem_file_operations ramfs_file_operations 2451#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2452#define shmem_acct_size(flags, size) 0 2453#define shmem_unacct_size(flags, size) do {} while (0) 2454 2455#endif /* CONFIG_SHMEM */ 2456 2457/* common code */ 2458 2459/** 2460 * shmem_file_setup - get an unlinked file living in tmpfs 2461 * @name: name for dentry (to be seen in /proc/<pid>/maps 2462 * @size: size to be set for the file 2463 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2464 */ 2465struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2466{ 2467 int error; 2468 struct file *file; 2469 struct inode *inode; 2470 struct path path; 2471 struct dentry *root; 2472 struct qstr this; 2473 2474 if (IS_ERR(shm_mnt)) 2475 return (void *)shm_mnt; 2476 2477 if (size < 0 || size > MAX_LFS_FILESIZE) 2478 return ERR_PTR(-EINVAL); 2479 2480 if (shmem_acct_size(flags, size)) 2481 return ERR_PTR(-ENOMEM); 2482 2483 error = -ENOMEM; 2484 this.name = name; 2485 this.len = strlen(name); 2486 this.hash = 0; /* will go */ 2487 root = shm_mnt->mnt_root; 2488 path.dentry = d_alloc(root, &this); 2489 if (!path.dentry) 2490 goto put_memory; 2491 path.mnt = mntget(shm_mnt); 2492 2493 error = -ENOSPC; 2494 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2495 if (!inode) 2496 goto put_dentry; 2497 2498 d_instantiate(path.dentry, inode); 2499 inode->i_size = size; 2500 inode->i_nlink = 0; /* It is unlinked */ 2501#ifndef CONFIG_MMU 2502 error = ramfs_nommu_expand_for_mapping(inode, size); 2503 if (error) 2504 goto put_dentry; 2505#endif 2506 2507 error = -ENFILE; 2508 file = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2509 &shmem_file_operations); 2510 if (!file) 2511 goto put_dentry; 2512 2513 return file; 2514 2515put_dentry: 2516 path_put(&path); 2517put_memory: 2518 shmem_unacct_size(flags, size); 2519 return ERR_PTR(error); 2520} 2521EXPORT_SYMBOL_GPL(shmem_file_setup); 2522 2523/** 2524 * shmem_zero_setup - setup a shared anonymous mapping 2525 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2526 */ 2527int shmem_zero_setup(struct vm_area_struct *vma) 2528{ 2529 struct file *file; 2530 loff_t size = vma->vm_end - vma->vm_start; 2531 2532 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2533 if (IS_ERR(file)) 2534 return PTR_ERR(file); 2535 2536 if (vma->vm_file) 2537 fput(vma->vm_file); 2538 vma->vm_file = file; 2539 vma->vm_ops = &shmem_vm_ops; 2540 vma->vm_flags |= VM_CAN_NONLINEAR; 2541 return 0; 2542} 2543 2544/** 2545 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 2546 * @mapping: the page's address_space 2547 * @index: the page index 2548 * @gfp: the page allocator flags to use if allocating 2549 * 2550 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 2551 * with any new page allocations done using the specified allocation flags. 2552 * But read_cache_page_gfp() uses the ->readpage() method: which does not 2553 * suit tmpfs, since it may have pages in swapcache, and needs to find those 2554 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 2555 * 2556 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 2557 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 2558 */ 2559struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 2560 pgoff_t index, gfp_t gfp) 2561{ 2562#ifdef CONFIG_SHMEM 2563 struct inode *inode = mapping->host; 2564 struct page *page; 2565 int error; 2566 2567 BUG_ON(mapping->a_ops != &shmem_aops); 2568 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 2569 if (error) 2570 page = ERR_PTR(error); 2571 else 2572 unlock_page(page); 2573 return page; 2574#else 2575 /* 2576 * The tiny !SHMEM case uses ramfs without swap 2577 */ 2578 return read_cache_page_gfp(mapping, index, gfp); 2579#endif 2580} 2581EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 2582