shmem.c revision a786c06d9f2719203c00b3d97b21f9a96980d0b5
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24#include <linux/fs.h> 25#include <linux/init.h> 26#include <linux/vfs.h> 27#include <linux/mount.h> 28#include <linux/ramfs.h> 29#include <linux/pagemap.h> 30#include <linux/file.h> 31#include <linux/mm.h> 32#include <linux/export.h> 33#include <linux/swap.h> 34#include <linux/aio.h> 35 36static struct vfsmount *shm_mnt; 37 38#ifdef CONFIG_SHMEM 39/* 40 * This virtual memory filesystem is heavily based on the ramfs. It 41 * extends ramfs by the ability to use swap and honor resource limits 42 * which makes it a completely usable filesystem. 43 */ 44 45#include <linux/xattr.h> 46#include <linux/exportfs.h> 47#include <linux/posix_acl.h> 48#include <linux/posix_acl_xattr.h> 49#include <linux/mman.h> 50#include <linux/string.h> 51#include <linux/slab.h> 52#include <linux/backing-dev.h> 53#include <linux/shmem_fs.h> 54#include <linux/writeback.h> 55#include <linux/blkdev.h> 56#include <linux/pagevec.h> 57#include <linux/percpu_counter.h> 58#include <linux/falloc.h> 59#include <linux/splice.h> 60#include <linux/security.h> 61#include <linux/swapops.h> 62#include <linux/mempolicy.h> 63#include <linux/namei.h> 64#include <linux/ctype.h> 65#include <linux/migrate.h> 66#include <linux/highmem.h> 67#include <linux/seq_file.h> 68#include <linux/magic.h> 69 70#include <asm/uaccess.h> 71#include <asm/pgtable.h> 72 73#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 74#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 75 76/* Pretend that each entry is of this size in directory's i_size */ 77#define BOGO_DIRENT_SIZE 20 78 79/* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 80#define SHORT_SYMLINK_LEN 128 81 82/* 83 * shmem_fallocate and shmem_writepage communicate via inode->i_private 84 * (with i_mutex making sure that it has only one user at a time): 85 * we would prefer not to enlarge the shmem inode just for that. 86 */ 87struct shmem_falloc { 88 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 92}; 93 94/* Flag allocation requirements to shmem_getpage */ 95enum sgp_type { 96 SGP_READ, /* don't exceed i_size, don't allocate page */ 97 SGP_CACHE, /* don't exceed i_size, may allocate page */ 98 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 99 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 100 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 101}; 102 103#ifdef CONFIG_TMPFS 104static unsigned long shmem_default_max_blocks(void) 105{ 106 return totalram_pages / 2; 107} 108 109static unsigned long shmem_default_max_inodes(void) 110{ 111 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 112} 113#endif 114 115static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 116static int shmem_replace_page(struct page **pagep, gfp_t gfp, 117 struct shmem_inode_info *info, pgoff_t index); 118static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 119 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 120 121static inline int shmem_getpage(struct inode *inode, pgoff_t index, 122 struct page **pagep, enum sgp_type sgp, int *fault_type) 123{ 124 return shmem_getpage_gfp(inode, index, pagep, sgp, 125 mapping_gfp_mask(inode->i_mapping), fault_type); 126} 127 128static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 129{ 130 return sb->s_fs_info; 131} 132 133/* 134 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 135 * for shared memory and for shared anonymous (/dev/zero) mappings 136 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 137 * consistent with the pre-accounting of private mappings ... 138 */ 139static inline int shmem_acct_size(unsigned long flags, loff_t size) 140{ 141 return (flags & VM_NORESERVE) ? 142 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 143} 144 145static inline void shmem_unacct_size(unsigned long flags, loff_t size) 146{ 147 if (!(flags & VM_NORESERVE)) 148 vm_unacct_memory(VM_ACCT(size)); 149} 150 151/* 152 * ... whereas tmpfs objects are accounted incrementally as 153 * pages are allocated, in order to allow huge sparse files. 154 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 155 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 156 */ 157static inline int shmem_acct_block(unsigned long flags) 158{ 159 return (flags & VM_NORESERVE) ? 160 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 161} 162 163static inline void shmem_unacct_blocks(unsigned long flags, long pages) 164{ 165 if (flags & VM_NORESERVE) 166 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 167} 168 169static const struct super_operations shmem_ops; 170static const struct address_space_operations shmem_aops; 171static const struct file_operations shmem_file_operations; 172static const struct inode_operations shmem_inode_operations; 173static const struct inode_operations shmem_dir_inode_operations; 174static const struct inode_operations shmem_special_inode_operations; 175static const struct vm_operations_struct shmem_vm_ops; 176 177static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 178 .ra_pages = 0, /* No readahead */ 179 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 180}; 181 182static LIST_HEAD(shmem_swaplist); 183static DEFINE_MUTEX(shmem_swaplist_mutex); 184 185static int shmem_reserve_inode(struct super_block *sb) 186{ 187 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 188 if (sbinfo->max_inodes) { 189 spin_lock(&sbinfo->stat_lock); 190 if (!sbinfo->free_inodes) { 191 spin_unlock(&sbinfo->stat_lock); 192 return -ENOSPC; 193 } 194 sbinfo->free_inodes--; 195 spin_unlock(&sbinfo->stat_lock); 196 } 197 return 0; 198} 199 200static void shmem_free_inode(struct super_block *sb) 201{ 202 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 203 if (sbinfo->max_inodes) { 204 spin_lock(&sbinfo->stat_lock); 205 sbinfo->free_inodes++; 206 spin_unlock(&sbinfo->stat_lock); 207 } 208} 209 210/** 211 * shmem_recalc_inode - recalculate the block usage of an inode 212 * @inode: inode to recalc 213 * 214 * We have to calculate the free blocks since the mm can drop 215 * undirtied hole pages behind our back. 216 * 217 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 218 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 219 * 220 * It has to be called with the spinlock held. 221 */ 222static void shmem_recalc_inode(struct inode *inode) 223{ 224 struct shmem_inode_info *info = SHMEM_I(inode); 225 long freed; 226 227 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 228 if (freed > 0) { 229 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 230 if (sbinfo->max_blocks) 231 percpu_counter_add(&sbinfo->used_blocks, -freed); 232 info->alloced -= freed; 233 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 234 shmem_unacct_blocks(info->flags, freed); 235 } 236} 237 238/* 239 * Replace item expected in radix tree by a new item, while holding tree lock. 240 */ 241static int shmem_radix_tree_replace(struct address_space *mapping, 242 pgoff_t index, void *expected, void *replacement) 243{ 244 void **pslot; 245 void *item = NULL; 246 247 VM_BUG_ON(!expected); 248 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 249 if (pslot) 250 item = radix_tree_deref_slot_protected(pslot, 251 &mapping->tree_lock); 252 if (item != expected) 253 return -ENOENT; 254 if (replacement) 255 radix_tree_replace_slot(pslot, replacement); 256 else 257 radix_tree_delete(&mapping->page_tree, index); 258 return 0; 259} 260 261/* 262 * Sometimes, before we decide whether to proceed or to fail, we must check 263 * that an entry was not already brought back from swap by a racing thread. 264 * 265 * Checking page is not enough: by the time a SwapCache page is locked, it 266 * might be reused, and again be SwapCache, using the same swap as before. 267 */ 268static bool shmem_confirm_swap(struct address_space *mapping, 269 pgoff_t index, swp_entry_t swap) 270{ 271 void *item; 272 273 rcu_read_lock(); 274 item = radix_tree_lookup(&mapping->page_tree, index); 275 rcu_read_unlock(); 276 return item == swp_to_radix_entry(swap); 277} 278 279/* 280 * Like add_to_page_cache_locked, but error if expected item has gone. 281 */ 282static int shmem_add_to_page_cache(struct page *page, 283 struct address_space *mapping, 284 pgoff_t index, gfp_t gfp, void *expected) 285{ 286 int error; 287 288 VM_BUG_ON_PAGE(!PageLocked(page), page); 289 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 290 291 page_cache_get(page); 292 page->mapping = mapping; 293 page->index = index; 294 295 spin_lock_irq(&mapping->tree_lock); 296 if (!expected) 297 error = radix_tree_insert(&mapping->page_tree, index, page); 298 else 299 error = shmem_radix_tree_replace(mapping, index, expected, 300 page); 301 if (!error) { 302 mapping->nrpages++; 303 __inc_zone_page_state(page, NR_FILE_PAGES); 304 __inc_zone_page_state(page, NR_SHMEM); 305 spin_unlock_irq(&mapping->tree_lock); 306 } else { 307 page->mapping = NULL; 308 spin_unlock_irq(&mapping->tree_lock); 309 page_cache_release(page); 310 } 311 return error; 312} 313 314/* 315 * Like delete_from_page_cache, but substitutes swap for page. 316 */ 317static void shmem_delete_from_page_cache(struct page *page, void *radswap) 318{ 319 struct address_space *mapping = page->mapping; 320 int error; 321 322 spin_lock_irq(&mapping->tree_lock); 323 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 324 page->mapping = NULL; 325 mapping->nrpages--; 326 __dec_zone_page_state(page, NR_FILE_PAGES); 327 __dec_zone_page_state(page, NR_SHMEM); 328 spin_unlock_irq(&mapping->tree_lock); 329 page_cache_release(page); 330 BUG_ON(error); 331} 332 333/* 334 * Like find_get_pages, but collecting swap entries as well as pages. 335 */ 336static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, 337 pgoff_t start, unsigned int nr_pages, 338 struct page **pages, pgoff_t *indices) 339{ 340 void **slot; 341 unsigned int ret = 0; 342 struct radix_tree_iter iter; 343 344 if (!nr_pages) 345 return 0; 346 347 rcu_read_lock(); 348restart: 349 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 350 struct page *page; 351repeat: 352 page = radix_tree_deref_slot(slot); 353 if (unlikely(!page)) 354 continue; 355 if (radix_tree_exception(page)) { 356 if (radix_tree_deref_retry(page)) 357 goto restart; 358 /* 359 * Otherwise, we must be storing a swap entry 360 * here as an exceptional entry: so return it 361 * without attempting to raise page count. 362 */ 363 goto export; 364 } 365 if (!page_cache_get_speculative(page)) 366 goto repeat; 367 368 /* Has the page moved? */ 369 if (unlikely(page != *slot)) { 370 page_cache_release(page); 371 goto repeat; 372 } 373export: 374 indices[ret] = iter.index; 375 pages[ret] = page; 376 if (++ret == nr_pages) 377 break; 378 } 379 rcu_read_unlock(); 380 return ret; 381} 382 383/* 384 * Remove swap entry from radix tree, free the swap and its page cache. 385 */ 386static int shmem_free_swap(struct address_space *mapping, 387 pgoff_t index, void *radswap) 388{ 389 int error; 390 391 spin_lock_irq(&mapping->tree_lock); 392 error = shmem_radix_tree_replace(mapping, index, radswap, NULL); 393 spin_unlock_irq(&mapping->tree_lock); 394 if (!error) 395 free_swap_and_cache(radix_to_swp_entry(radswap)); 396 return error; 397} 398 399/* 400 * Pagevec may contain swap entries, so shuffle up pages before releasing. 401 */ 402static void shmem_deswap_pagevec(struct pagevec *pvec) 403{ 404 int i, j; 405 406 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 407 struct page *page = pvec->pages[i]; 408 if (!radix_tree_exceptional_entry(page)) 409 pvec->pages[j++] = page; 410 } 411 pvec->nr = j; 412} 413 414/* 415 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 416 */ 417void shmem_unlock_mapping(struct address_space *mapping) 418{ 419 struct pagevec pvec; 420 pgoff_t indices[PAGEVEC_SIZE]; 421 pgoff_t index = 0; 422 423 pagevec_init(&pvec, 0); 424 /* 425 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 426 */ 427 while (!mapping_unevictable(mapping)) { 428 /* 429 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 430 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 431 */ 432 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 433 PAGEVEC_SIZE, pvec.pages, indices); 434 if (!pvec.nr) 435 break; 436 index = indices[pvec.nr - 1] + 1; 437 shmem_deswap_pagevec(&pvec); 438 check_move_unevictable_pages(pvec.pages, pvec.nr); 439 pagevec_release(&pvec); 440 cond_resched(); 441 } 442} 443 444/* 445 * Remove range of pages and swap entries from radix tree, and free them. 446 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 447 */ 448static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 449 bool unfalloc) 450{ 451 struct address_space *mapping = inode->i_mapping; 452 struct shmem_inode_info *info = SHMEM_I(inode); 453 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 454 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 455 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 456 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 457 struct pagevec pvec; 458 pgoff_t indices[PAGEVEC_SIZE]; 459 long nr_swaps_freed = 0; 460 pgoff_t index; 461 int i; 462 463 if (lend == -1) 464 end = -1; /* unsigned, so actually very big */ 465 466 pagevec_init(&pvec, 0); 467 index = start; 468 while (index < end) { 469 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 470 min(end - index, (pgoff_t)PAGEVEC_SIZE), 471 pvec.pages, indices); 472 if (!pvec.nr) 473 break; 474 mem_cgroup_uncharge_start(); 475 for (i = 0; i < pagevec_count(&pvec); i++) { 476 struct page *page = pvec.pages[i]; 477 478 index = indices[i]; 479 if (index >= end) 480 break; 481 482 if (radix_tree_exceptional_entry(page)) { 483 if (unfalloc) 484 continue; 485 nr_swaps_freed += !shmem_free_swap(mapping, 486 index, page); 487 continue; 488 } 489 490 if (!trylock_page(page)) 491 continue; 492 if (!unfalloc || !PageUptodate(page)) { 493 if (page->mapping == mapping) { 494 VM_BUG_ON_PAGE(PageWriteback(page), page); 495 truncate_inode_page(mapping, page); 496 } 497 } 498 unlock_page(page); 499 } 500 shmem_deswap_pagevec(&pvec); 501 pagevec_release(&pvec); 502 mem_cgroup_uncharge_end(); 503 cond_resched(); 504 index++; 505 } 506 507 if (partial_start) { 508 struct page *page = NULL; 509 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 510 if (page) { 511 unsigned int top = PAGE_CACHE_SIZE; 512 if (start > end) { 513 top = partial_end; 514 partial_end = 0; 515 } 516 zero_user_segment(page, partial_start, top); 517 set_page_dirty(page); 518 unlock_page(page); 519 page_cache_release(page); 520 } 521 } 522 if (partial_end) { 523 struct page *page = NULL; 524 shmem_getpage(inode, end, &page, SGP_READ, NULL); 525 if (page) { 526 zero_user_segment(page, 0, partial_end); 527 set_page_dirty(page); 528 unlock_page(page); 529 page_cache_release(page); 530 } 531 } 532 if (start >= end) 533 return; 534 535 index = start; 536 for ( ; ; ) { 537 cond_resched(); 538 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 539 min(end - index, (pgoff_t)PAGEVEC_SIZE), 540 pvec.pages, indices); 541 if (!pvec.nr) { 542 if (index == start || unfalloc) 543 break; 544 index = start; 545 continue; 546 } 547 if ((index == start || unfalloc) && indices[0] >= end) { 548 shmem_deswap_pagevec(&pvec); 549 pagevec_release(&pvec); 550 break; 551 } 552 mem_cgroup_uncharge_start(); 553 for (i = 0; i < pagevec_count(&pvec); i++) { 554 struct page *page = pvec.pages[i]; 555 556 index = indices[i]; 557 if (index >= end) 558 break; 559 560 if (radix_tree_exceptional_entry(page)) { 561 if (unfalloc) 562 continue; 563 nr_swaps_freed += !shmem_free_swap(mapping, 564 index, page); 565 continue; 566 } 567 568 lock_page(page); 569 if (!unfalloc || !PageUptodate(page)) { 570 if (page->mapping == mapping) { 571 VM_BUG_ON_PAGE(PageWriteback(page), page); 572 truncate_inode_page(mapping, page); 573 } 574 } 575 unlock_page(page); 576 } 577 shmem_deswap_pagevec(&pvec); 578 pagevec_release(&pvec); 579 mem_cgroup_uncharge_end(); 580 index++; 581 } 582 583 spin_lock(&info->lock); 584 info->swapped -= nr_swaps_freed; 585 shmem_recalc_inode(inode); 586 spin_unlock(&info->lock); 587} 588 589void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 590{ 591 shmem_undo_range(inode, lstart, lend, false); 592 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 593} 594EXPORT_SYMBOL_GPL(shmem_truncate_range); 595 596static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 597{ 598 struct inode *inode = dentry->d_inode; 599 int error; 600 601 error = inode_change_ok(inode, attr); 602 if (error) 603 return error; 604 605 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 606 loff_t oldsize = inode->i_size; 607 loff_t newsize = attr->ia_size; 608 609 if (newsize != oldsize) { 610 i_size_write(inode, newsize); 611 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 612 } 613 if (newsize < oldsize) { 614 loff_t holebegin = round_up(newsize, PAGE_SIZE); 615 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 616 shmem_truncate_range(inode, newsize, (loff_t)-1); 617 /* unmap again to remove racily COWed private pages */ 618 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 619 } 620 } 621 622 setattr_copy(inode, attr); 623 if (attr->ia_valid & ATTR_MODE) 624 error = posix_acl_chmod(inode, inode->i_mode); 625 return error; 626} 627 628static void shmem_evict_inode(struct inode *inode) 629{ 630 struct shmem_inode_info *info = SHMEM_I(inode); 631 632 if (inode->i_mapping->a_ops == &shmem_aops) { 633 shmem_unacct_size(info->flags, inode->i_size); 634 inode->i_size = 0; 635 shmem_truncate_range(inode, 0, (loff_t)-1); 636 if (!list_empty(&info->swaplist)) { 637 mutex_lock(&shmem_swaplist_mutex); 638 list_del_init(&info->swaplist); 639 mutex_unlock(&shmem_swaplist_mutex); 640 } 641 } else 642 kfree(info->symlink); 643 644 simple_xattrs_free(&info->xattrs); 645 WARN_ON(inode->i_blocks); 646 shmem_free_inode(inode->i_sb); 647 clear_inode(inode); 648} 649 650/* 651 * If swap found in inode, free it and move page from swapcache to filecache. 652 */ 653static int shmem_unuse_inode(struct shmem_inode_info *info, 654 swp_entry_t swap, struct page **pagep) 655{ 656 struct address_space *mapping = info->vfs_inode.i_mapping; 657 void *radswap; 658 pgoff_t index; 659 gfp_t gfp; 660 int error = 0; 661 662 radswap = swp_to_radix_entry(swap); 663 index = radix_tree_locate_item(&mapping->page_tree, radswap); 664 if (index == -1) 665 return 0; 666 667 /* 668 * Move _head_ to start search for next from here. 669 * But be careful: shmem_evict_inode checks list_empty without taking 670 * mutex, and there's an instant in list_move_tail when info->swaplist 671 * would appear empty, if it were the only one on shmem_swaplist. 672 */ 673 if (shmem_swaplist.next != &info->swaplist) 674 list_move_tail(&shmem_swaplist, &info->swaplist); 675 676 gfp = mapping_gfp_mask(mapping); 677 if (shmem_should_replace_page(*pagep, gfp)) { 678 mutex_unlock(&shmem_swaplist_mutex); 679 error = shmem_replace_page(pagep, gfp, info, index); 680 mutex_lock(&shmem_swaplist_mutex); 681 /* 682 * We needed to drop mutex to make that restrictive page 683 * allocation, but the inode might have been freed while we 684 * dropped it: although a racing shmem_evict_inode() cannot 685 * complete without emptying the radix_tree, our page lock 686 * on this swapcache page is not enough to prevent that - 687 * free_swap_and_cache() of our swap entry will only 688 * trylock_page(), removing swap from radix_tree whatever. 689 * 690 * We must not proceed to shmem_add_to_page_cache() if the 691 * inode has been freed, but of course we cannot rely on 692 * inode or mapping or info to check that. However, we can 693 * safely check if our swap entry is still in use (and here 694 * it can't have got reused for another page): if it's still 695 * in use, then the inode cannot have been freed yet, and we 696 * can safely proceed (if it's no longer in use, that tells 697 * nothing about the inode, but we don't need to unuse swap). 698 */ 699 if (!page_swapcount(*pagep)) 700 error = -ENOENT; 701 } 702 703 /* 704 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 705 * but also to hold up shmem_evict_inode(): so inode cannot be freed 706 * beneath us (pagelock doesn't help until the page is in pagecache). 707 */ 708 if (!error) 709 error = shmem_add_to_page_cache(*pagep, mapping, index, 710 GFP_NOWAIT, radswap); 711 if (error != -ENOMEM) { 712 /* 713 * Truncation and eviction use free_swap_and_cache(), which 714 * only does trylock page: if we raced, best clean up here. 715 */ 716 delete_from_swap_cache(*pagep); 717 set_page_dirty(*pagep); 718 if (!error) { 719 spin_lock(&info->lock); 720 info->swapped--; 721 spin_unlock(&info->lock); 722 swap_free(swap); 723 } 724 error = 1; /* not an error, but entry was found */ 725 } 726 return error; 727} 728 729/* 730 * Search through swapped inodes to find and replace swap by page. 731 */ 732int shmem_unuse(swp_entry_t swap, struct page *page) 733{ 734 struct list_head *this, *next; 735 struct shmem_inode_info *info; 736 int found = 0; 737 int error = 0; 738 739 /* 740 * There's a faint possibility that swap page was replaced before 741 * caller locked it: caller will come back later with the right page. 742 */ 743 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 744 goto out; 745 746 /* 747 * Charge page using GFP_KERNEL while we can wait, before taking 748 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 749 * Charged back to the user (not to caller) when swap account is used. 750 */ 751 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 752 if (error) 753 goto out; 754 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 755 756 mutex_lock(&shmem_swaplist_mutex); 757 list_for_each_safe(this, next, &shmem_swaplist) { 758 info = list_entry(this, struct shmem_inode_info, swaplist); 759 if (info->swapped) 760 found = shmem_unuse_inode(info, swap, &page); 761 else 762 list_del_init(&info->swaplist); 763 cond_resched(); 764 if (found) 765 break; 766 } 767 mutex_unlock(&shmem_swaplist_mutex); 768 769 if (found < 0) 770 error = found; 771out: 772 unlock_page(page); 773 page_cache_release(page); 774 return error; 775} 776 777/* 778 * Move the page from the page cache to the swap cache. 779 */ 780static int shmem_writepage(struct page *page, struct writeback_control *wbc) 781{ 782 struct shmem_inode_info *info; 783 struct address_space *mapping; 784 struct inode *inode; 785 swp_entry_t swap; 786 pgoff_t index; 787 788 BUG_ON(!PageLocked(page)); 789 mapping = page->mapping; 790 index = page->index; 791 inode = mapping->host; 792 info = SHMEM_I(inode); 793 if (info->flags & VM_LOCKED) 794 goto redirty; 795 if (!total_swap_pages) 796 goto redirty; 797 798 /* 799 * shmem_backing_dev_info's capabilities prevent regular writeback or 800 * sync from ever calling shmem_writepage; but a stacking filesystem 801 * might use ->writepage of its underlying filesystem, in which case 802 * tmpfs should write out to swap only in response to memory pressure, 803 * and not for the writeback threads or sync. 804 */ 805 if (!wbc->for_reclaim) { 806 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 807 goto redirty; 808 } 809 810 /* 811 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 812 * value into swapfile.c, the only way we can correctly account for a 813 * fallocated page arriving here is now to initialize it and write it. 814 * 815 * That's okay for a page already fallocated earlier, but if we have 816 * not yet completed the fallocation, then (a) we want to keep track 817 * of this page in case we have to undo it, and (b) it may not be a 818 * good idea to continue anyway, once we're pushing into swap. So 819 * reactivate the page, and let shmem_fallocate() quit when too many. 820 */ 821 if (!PageUptodate(page)) { 822 if (inode->i_private) { 823 struct shmem_falloc *shmem_falloc; 824 spin_lock(&inode->i_lock); 825 shmem_falloc = inode->i_private; 826 if (shmem_falloc && 827 index >= shmem_falloc->start && 828 index < shmem_falloc->next) 829 shmem_falloc->nr_unswapped++; 830 else 831 shmem_falloc = NULL; 832 spin_unlock(&inode->i_lock); 833 if (shmem_falloc) 834 goto redirty; 835 } 836 clear_highpage(page); 837 flush_dcache_page(page); 838 SetPageUptodate(page); 839 } 840 841 swap = get_swap_page(); 842 if (!swap.val) 843 goto redirty; 844 845 /* 846 * Add inode to shmem_unuse()'s list of swapped-out inodes, 847 * if it's not already there. Do it now before the page is 848 * moved to swap cache, when its pagelock no longer protects 849 * the inode from eviction. But don't unlock the mutex until 850 * we've incremented swapped, because shmem_unuse_inode() will 851 * prune a !swapped inode from the swaplist under this mutex. 852 */ 853 mutex_lock(&shmem_swaplist_mutex); 854 if (list_empty(&info->swaplist)) 855 list_add_tail(&info->swaplist, &shmem_swaplist); 856 857 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 858 swap_shmem_alloc(swap); 859 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 860 861 spin_lock(&info->lock); 862 info->swapped++; 863 shmem_recalc_inode(inode); 864 spin_unlock(&info->lock); 865 866 mutex_unlock(&shmem_swaplist_mutex); 867 BUG_ON(page_mapped(page)); 868 swap_writepage(page, wbc); 869 return 0; 870 } 871 872 mutex_unlock(&shmem_swaplist_mutex); 873 swapcache_free(swap, NULL); 874redirty: 875 set_page_dirty(page); 876 if (wbc->for_reclaim) 877 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 878 unlock_page(page); 879 return 0; 880} 881 882#ifdef CONFIG_NUMA 883#ifdef CONFIG_TMPFS 884static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 885{ 886 char buffer[64]; 887 888 if (!mpol || mpol->mode == MPOL_DEFAULT) 889 return; /* show nothing */ 890 891 mpol_to_str(buffer, sizeof(buffer), mpol); 892 893 seq_printf(seq, ",mpol=%s", buffer); 894} 895 896static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 897{ 898 struct mempolicy *mpol = NULL; 899 if (sbinfo->mpol) { 900 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 901 mpol = sbinfo->mpol; 902 mpol_get(mpol); 903 spin_unlock(&sbinfo->stat_lock); 904 } 905 return mpol; 906} 907#endif /* CONFIG_TMPFS */ 908 909static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 910 struct shmem_inode_info *info, pgoff_t index) 911{ 912 struct vm_area_struct pvma; 913 struct page *page; 914 915 /* Create a pseudo vma that just contains the policy */ 916 pvma.vm_start = 0; 917 /* Bias interleave by inode number to distribute better across nodes */ 918 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 919 pvma.vm_ops = NULL; 920 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 921 922 page = swapin_readahead(swap, gfp, &pvma, 0); 923 924 /* Drop reference taken by mpol_shared_policy_lookup() */ 925 mpol_cond_put(pvma.vm_policy); 926 927 return page; 928} 929 930static struct page *shmem_alloc_page(gfp_t gfp, 931 struct shmem_inode_info *info, pgoff_t index) 932{ 933 struct vm_area_struct pvma; 934 struct page *page; 935 936 /* Create a pseudo vma that just contains the policy */ 937 pvma.vm_start = 0; 938 /* Bias interleave by inode number to distribute better across nodes */ 939 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 940 pvma.vm_ops = NULL; 941 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 942 943 page = alloc_page_vma(gfp, &pvma, 0); 944 945 /* Drop reference taken by mpol_shared_policy_lookup() */ 946 mpol_cond_put(pvma.vm_policy); 947 948 return page; 949} 950#else /* !CONFIG_NUMA */ 951#ifdef CONFIG_TMPFS 952static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 953{ 954} 955#endif /* CONFIG_TMPFS */ 956 957static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 958 struct shmem_inode_info *info, pgoff_t index) 959{ 960 return swapin_readahead(swap, gfp, NULL, 0); 961} 962 963static inline struct page *shmem_alloc_page(gfp_t gfp, 964 struct shmem_inode_info *info, pgoff_t index) 965{ 966 return alloc_page(gfp); 967} 968#endif /* CONFIG_NUMA */ 969 970#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 971static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 972{ 973 return NULL; 974} 975#endif 976 977/* 978 * When a page is moved from swapcache to shmem filecache (either by the 979 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 980 * shmem_unuse_inode()), it may have been read in earlier from swap, in 981 * ignorance of the mapping it belongs to. If that mapping has special 982 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 983 * we may need to copy to a suitable page before moving to filecache. 984 * 985 * In a future release, this may well be extended to respect cpuset and 986 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 987 * but for now it is a simple matter of zone. 988 */ 989static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 990{ 991 return page_zonenum(page) > gfp_zone(gfp); 992} 993 994static int shmem_replace_page(struct page **pagep, gfp_t gfp, 995 struct shmem_inode_info *info, pgoff_t index) 996{ 997 struct page *oldpage, *newpage; 998 struct address_space *swap_mapping; 999 pgoff_t swap_index; 1000 int error; 1001 1002 oldpage = *pagep; 1003 swap_index = page_private(oldpage); 1004 swap_mapping = page_mapping(oldpage); 1005 1006 /* 1007 * We have arrived here because our zones are constrained, so don't 1008 * limit chance of success by further cpuset and node constraints. 1009 */ 1010 gfp &= ~GFP_CONSTRAINT_MASK; 1011 newpage = shmem_alloc_page(gfp, info, index); 1012 if (!newpage) 1013 return -ENOMEM; 1014 1015 page_cache_get(newpage); 1016 copy_highpage(newpage, oldpage); 1017 flush_dcache_page(newpage); 1018 1019 __set_page_locked(newpage); 1020 SetPageUptodate(newpage); 1021 SetPageSwapBacked(newpage); 1022 set_page_private(newpage, swap_index); 1023 SetPageSwapCache(newpage); 1024 1025 /* 1026 * Our caller will very soon move newpage out of swapcache, but it's 1027 * a nice clean interface for us to replace oldpage by newpage there. 1028 */ 1029 spin_lock_irq(&swap_mapping->tree_lock); 1030 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1031 newpage); 1032 if (!error) { 1033 __inc_zone_page_state(newpage, NR_FILE_PAGES); 1034 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1035 } 1036 spin_unlock_irq(&swap_mapping->tree_lock); 1037 1038 if (unlikely(error)) { 1039 /* 1040 * Is this possible? I think not, now that our callers check 1041 * both PageSwapCache and page_private after getting page lock; 1042 * but be defensive. Reverse old to newpage for clear and free. 1043 */ 1044 oldpage = newpage; 1045 } else { 1046 mem_cgroup_replace_page_cache(oldpage, newpage); 1047 lru_cache_add_anon(newpage); 1048 *pagep = newpage; 1049 } 1050 1051 ClearPageSwapCache(oldpage); 1052 set_page_private(oldpage, 0); 1053 1054 unlock_page(oldpage); 1055 page_cache_release(oldpage); 1056 page_cache_release(oldpage); 1057 return error; 1058} 1059 1060/* 1061 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1062 * 1063 * If we allocate a new one we do not mark it dirty. That's up to the 1064 * vm. If we swap it in we mark it dirty since we also free the swap 1065 * entry since a page cannot live in both the swap and page cache 1066 */ 1067static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1068 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 1069{ 1070 struct address_space *mapping = inode->i_mapping; 1071 struct shmem_inode_info *info; 1072 struct shmem_sb_info *sbinfo; 1073 struct page *page; 1074 swp_entry_t swap; 1075 int error; 1076 int once = 0; 1077 int alloced = 0; 1078 1079 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1080 return -EFBIG; 1081repeat: 1082 swap.val = 0; 1083 page = find_lock_page(mapping, index); 1084 if (radix_tree_exceptional_entry(page)) { 1085 swap = radix_to_swp_entry(page); 1086 page = NULL; 1087 } 1088 1089 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1090 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1091 error = -EINVAL; 1092 goto failed; 1093 } 1094 1095 /* fallocated page? */ 1096 if (page && !PageUptodate(page)) { 1097 if (sgp != SGP_READ) 1098 goto clear; 1099 unlock_page(page); 1100 page_cache_release(page); 1101 page = NULL; 1102 } 1103 if (page || (sgp == SGP_READ && !swap.val)) { 1104 *pagep = page; 1105 return 0; 1106 } 1107 1108 /* 1109 * Fast cache lookup did not find it: 1110 * bring it back from swap or allocate. 1111 */ 1112 info = SHMEM_I(inode); 1113 sbinfo = SHMEM_SB(inode->i_sb); 1114 1115 if (swap.val) { 1116 /* Look it up and read it in.. */ 1117 page = lookup_swap_cache(swap); 1118 if (!page) { 1119 /* here we actually do the io */ 1120 if (fault_type) 1121 *fault_type |= VM_FAULT_MAJOR; 1122 page = shmem_swapin(swap, gfp, info, index); 1123 if (!page) { 1124 error = -ENOMEM; 1125 goto failed; 1126 } 1127 } 1128 1129 /* We have to do this with page locked to prevent races */ 1130 lock_page(page); 1131 if (!PageSwapCache(page) || page_private(page) != swap.val || 1132 !shmem_confirm_swap(mapping, index, swap)) { 1133 error = -EEXIST; /* try again */ 1134 goto unlock; 1135 } 1136 if (!PageUptodate(page)) { 1137 error = -EIO; 1138 goto failed; 1139 } 1140 wait_on_page_writeback(page); 1141 1142 if (shmem_should_replace_page(page, gfp)) { 1143 error = shmem_replace_page(&page, gfp, info, index); 1144 if (error) 1145 goto failed; 1146 } 1147 1148 error = mem_cgroup_cache_charge(page, current->mm, 1149 gfp & GFP_RECLAIM_MASK); 1150 if (!error) { 1151 error = shmem_add_to_page_cache(page, mapping, index, 1152 gfp, swp_to_radix_entry(swap)); 1153 /* 1154 * We already confirmed swap under page lock, and make 1155 * no memory allocation here, so usually no possibility 1156 * of error; but free_swap_and_cache() only trylocks a 1157 * page, so it is just possible that the entry has been 1158 * truncated or holepunched since swap was confirmed. 1159 * shmem_undo_range() will have done some of the 1160 * unaccounting, now delete_from_swap_cache() will do 1161 * the rest (including mem_cgroup_uncharge_swapcache). 1162 * Reset swap.val? No, leave it so "failed" goes back to 1163 * "repeat": reading a hole and writing should succeed. 1164 */ 1165 if (error) 1166 delete_from_swap_cache(page); 1167 } 1168 if (error) 1169 goto failed; 1170 1171 spin_lock(&info->lock); 1172 info->swapped--; 1173 shmem_recalc_inode(inode); 1174 spin_unlock(&info->lock); 1175 1176 delete_from_swap_cache(page); 1177 set_page_dirty(page); 1178 swap_free(swap); 1179 1180 } else { 1181 if (shmem_acct_block(info->flags)) { 1182 error = -ENOSPC; 1183 goto failed; 1184 } 1185 if (sbinfo->max_blocks) { 1186 if (percpu_counter_compare(&sbinfo->used_blocks, 1187 sbinfo->max_blocks) >= 0) { 1188 error = -ENOSPC; 1189 goto unacct; 1190 } 1191 percpu_counter_inc(&sbinfo->used_blocks); 1192 } 1193 1194 page = shmem_alloc_page(gfp, info, index); 1195 if (!page) { 1196 error = -ENOMEM; 1197 goto decused; 1198 } 1199 1200 SetPageSwapBacked(page); 1201 __set_page_locked(page); 1202 error = mem_cgroup_cache_charge(page, current->mm, 1203 gfp & GFP_RECLAIM_MASK); 1204 if (error) 1205 goto decused; 1206 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 1207 if (!error) { 1208 error = shmem_add_to_page_cache(page, mapping, index, 1209 gfp, NULL); 1210 radix_tree_preload_end(); 1211 } 1212 if (error) { 1213 mem_cgroup_uncharge_cache_page(page); 1214 goto decused; 1215 } 1216 lru_cache_add_anon(page); 1217 1218 spin_lock(&info->lock); 1219 info->alloced++; 1220 inode->i_blocks += BLOCKS_PER_PAGE; 1221 shmem_recalc_inode(inode); 1222 spin_unlock(&info->lock); 1223 alloced = true; 1224 1225 /* 1226 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1227 */ 1228 if (sgp == SGP_FALLOC) 1229 sgp = SGP_WRITE; 1230clear: 1231 /* 1232 * Let SGP_WRITE caller clear ends if write does not fill page; 1233 * but SGP_FALLOC on a page fallocated earlier must initialize 1234 * it now, lest undo on failure cancel our earlier guarantee. 1235 */ 1236 if (sgp != SGP_WRITE) { 1237 clear_highpage(page); 1238 flush_dcache_page(page); 1239 SetPageUptodate(page); 1240 } 1241 if (sgp == SGP_DIRTY) 1242 set_page_dirty(page); 1243 } 1244 1245 /* Perhaps the file has been truncated since we checked */ 1246 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1247 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1248 error = -EINVAL; 1249 if (alloced) 1250 goto trunc; 1251 else 1252 goto failed; 1253 } 1254 *pagep = page; 1255 return 0; 1256 1257 /* 1258 * Error recovery. 1259 */ 1260trunc: 1261 info = SHMEM_I(inode); 1262 ClearPageDirty(page); 1263 delete_from_page_cache(page); 1264 spin_lock(&info->lock); 1265 info->alloced--; 1266 inode->i_blocks -= BLOCKS_PER_PAGE; 1267 spin_unlock(&info->lock); 1268decused: 1269 sbinfo = SHMEM_SB(inode->i_sb); 1270 if (sbinfo->max_blocks) 1271 percpu_counter_add(&sbinfo->used_blocks, -1); 1272unacct: 1273 shmem_unacct_blocks(info->flags, 1); 1274failed: 1275 if (swap.val && error != -EINVAL && 1276 !shmem_confirm_swap(mapping, index, swap)) 1277 error = -EEXIST; 1278unlock: 1279 if (page) { 1280 unlock_page(page); 1281 page_cache_release(page); 1282 } 1283 if (error == -ENOSPC && !once++) { 1284 info = SHMEM_I(inode); 1285 spin_lock(&info->lock); 1286 shmem_recalc_inode(inode); 1287 spin_unlock(&info->lock); 1288 goto repeat; 1289 } 1290 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1291 goto repeat; 1292 return error; 1293} 1294 1295static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1296{ 1297 struct inode *inode = file_inode(vma->vm_file); 1298 int error; 1299 int ret = VM_FAULT_LOCKED; 1300 1301 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1302 if (error) 1303 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1304 1305 if (ret & VM_FAULT_MAJOR) { 1306 count_vm_event(PGMAJFAULT); 1307 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1308 } 1309 return ret; 1310} 1311 1312#ifdef CONFIG_NUMA 1313static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1314{ 1315 struct inode *inode = file_inode(vma->vm_file); 1316 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1317} 1318 1319static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1320 unsigned long addr) 1321{ 1322 struct inode *inode = file_inode(vma->vm_file); 1323 pgoff_t index; 1324 1325 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1326 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1327} 1328#endif 1329 1330int shmem_lock(struct file *file, int lock, struct user_struct *user) 1331{ 1332 struct inode *inode = file_inode(file); 1333 struct shmem_inode_info *info = SHMEM_I(inode); 1334 int retval = -ENOMEM; 1335 1336 spin_lock(&info->lock); 1337 if (lock && !(info->flags & VM_LOCKED)) { 1338 if (!user_shm_lock(inode->i_size, user)) 1339 goto out_nomem; 1340 info->flags |= VM_LOCKED; 1341 mapping_set_unevictable(file->f_mapping); 1342 } 1343 if (!lock && (info->flags & VM_LOCKED) && user) { 1344 user_shm_unlock(inode->i_size, user); 1345 info->flags &= ~VM_LOCKED; 1346 mapping_clear_unevictable(file->f_mapping); 1347 } 1348 retval = 0; 1349 1350out_nomem: 1351 spin_unlock(&info->lock); 1352 return retval; 1353} 1354 1355static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1356{ 1357 file_accessed(file); 1358 vma->vm_ops = &shmem_vm_ops; 1359 return 0; 1360} 1361 1362static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1363 umode_t mode, dev_t dev, unsigned long flags) 1364{ 1365 struct inode *inode; 1366 struct shmem_inode_info *info; 1367 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1368 1369 if (shmem_reserve_inode(sb)) 1370 return NULL; 1371 1372 inode = new_inode(sb); 1373 if (inode) { 1374 inode->i_ino = get_next_ino(); 1375 inode_init_owner(inode, dir, mode); 1376 inode->i_blocks = 0; 1377 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1378 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1379 inode->i_generation = get_seconds(); 1380 info = SHMEM_I(inode); 1381 memset(info, 0, (char *)inode - (char *)info); 1382 spin_lock_init(&info->lock); 1383 info->flags = flags & VM_NORESERVE; 1384 INIT_LIST_HEAD(&info->swaplist); 1385 simple_xattrs_init(&info->xattrs); 1386 cache_no_acl(inode); 1387 1388 switch (mode & S_IFMT) { 1389 default: 1390 inode->i_op = &shmem_special_inode_operations; 1391 init_special_inode(inode, mode, dev); 1392 break; 1393 case S_IFREG: 1394 inode->i_mapping->a_ops = &shmem_aops; 1395 inode->i_op = &shmem_inode_operations; 1396 inode->i_fop = &shmem_file_operations; 1397 mpol_shared_policy_init(&info->policy, 1398 shmem_get_sbmpol(sbinfo)); 1399 break; 1400 case S_IFDIR: 1401 inc_nlink(inode); 1402 /* Some things misbehave if size == 0 on a directory */ 1403 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1404 inode->i_op = &shmem_dir_inode_operations; 1405 inode->i_fop = &simple_dir_operations; 1406 break; 1407 case S_IFLNK: 1408 /* 1409 * Must not load anything in the rbtree, 1410 * mpol_free_shared_policy will not be called. 1411 */ 1412 mpol_shared_policy_init(&info->policy, NULL); 1413 break; 1414 } 1415 } else 1416 shmem_free_inode(sb); 1417 return inode; 1418} 1419 1420#ifdef CONFIG_TMPFS 1421static const struct inode_operations shmem_symlink_inode_operations; 1422static const struct inode_operations shmem_short_symlink_operations; 1423 1424#ifdef CONFIG_TMPFS_XATTR 1425static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 1426#else 1427#define shmem_initxattrs NULL 1428#endif 1429 1430static int 1431shmem_write_begin(struct file *file, struct address_space *mapping, 1432 loff_t pos, unsigned len, unsigned flags, 1433 struct page **pagep, void **fsdata) 1434{ 1435 struct inode *inode = mapping->host; 1436 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1437 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1438} 1439 1440static int 1441shmem_write_end(struct file *file, struct address_space *mapping, 1442 loff_t pos, unsigned len, unsigned copied, 1443 struct page *page, void *fsdata) 1444{ 1445 struct inode *inode = mapping->host; 1446 1447 if (pos + copied > inode->i_size) 1448 i_size_write(inode, pos + copied); 1449 1450 if (!PageUptodate(page)) { 1451 if (copied < PAGE_CACHE_SIZE) { 1452 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1453 zero_user_segments(page, 0, from, 1454 from + copied, PAGE_CACHE_SIZE); 1455 } 1456 SetPageUptodate(page); 1457 } 1458 set_page_dirty(page); 1459 unlock_page(page); 1460 page_cache_release(page); 1461 1462 return copied; 1463} 1464 1465static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1466 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1467{ 1468 struct file *file = iocb->ki_filp; 1469 struct inode *inode = file_inode(file); 1470 struct address_space *mapping = inode->i_mapping; 1471 pgoff_t index; 1472 unsigned long offset; 1473 enum sgp_type sgp = SGP_READ; 1474 int error; 1475 ssize_t retval; 1476 size_t count; 1477 loff_t *ppos = &iocb->ki_pos; 1478 struct iov_iter iter; 1479 1480 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1481 if (retval) 1482 return retval; 1483 iov_iter_init(&iter, iov, nr_segs, count, 0); 1484 1485 /* 1486 * Might this read be for a stacking filesystem? Then when reading 1487 * holes of a sparse file, we actually need to allocate those pages, 1488 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1489 */ 1490 if (segment_eq(get_fs(), KERNEL_DS)) 1491 sgp = SGP_DIRTY; 1492 1493 index = *ppos >> PAGE_CACHE_SHIFT; 1494 offset = *ppos & ~PAGE_CACHE_MASK; 1495 1496 for (;;) { 1497 struct page *page = NULL; 1498 pgoff_t end_index; 1499 unsigned long nr, ret; 1500 loff_t i_size = i_size_read(inode); 1501 1502 end_index = i_size >> PAGE_CACHE_SHIFT; 1503 if (index > end_index) 1504 break; 1505 if (index == end_index) { 1506 nr = i_size & ~PAGE_CACHE_MASK; 1507 if (nr <= offset) 1508 break; 1509 } 1510 1511 error = shmem_getpage(inode, index, &page, sgp, NULL); 1512 if (error) { 1513 if (error == -EINVAL) 1514 error = 0; 1515 break; 1516 } 1517 if (page) 1518 unlock_page(page); 1519 1520 /* 1521 * We must evaluate after, since reads (unlike writes) 1522 * are called without i_mutex protection against truncate 1523 */ 1524 nr = PAGE_CACHE_SIZE; 1525 i_size = i_size_read(inode); 1526 end_index = i_size >> PAGE_CACHE_SHIFT; 1527 if (index == end_index) { 1528 nr = i_size & ~PAGE_CACHE_MASK; 1529 if (nr <= offset) { 1530 if (page) 1531 page_cache_release(page); 1532 break; 1533 } 1534 } 1535 nr -= offset; 1536 1537 if (page) { 1538 /* 1539 * If users can be writing to this page using arbitrary 1540 * virtual addresses, take care about potential aliasing 1541 * before reading the page on the kernel side. 1542 */ 1543 if (mapping_writably_mapped(mapping)) 1544 flush_dcache_page(page); 1545 /* 1546 * Mark the page accessed if we read the beginning. 1547 */ 1548 if (!offset) 1549 mark_page_accessed(page); 1550 } else { 1551 page = ZERO_PAGE(0); 1552 page_cache_get(page); 1553 } 1554 1555 /* 1556 * Ok, we have the page, and it's up-to-date, so 1557 * now we can copy it to user space... 1558 */ 1559 ret = copy_page_to_iter(page, offset, nr, &iter); 1560 retval += ret; 1561 offset += ret; 1562 index += offset >> PAGE_CACHE_SHIFT; 1563 offset &= ~PAGE_CACHE_MASK; 1564 1565 page_cache_release(page); 1566 if (!iov_iter_count(&iter)) 1567 break; 1568 if (ret < nr) { 1569 error = -EFAULT; 1570 break; 1571 } 1572 cond_resched(); 1573 } 1574 1575 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1576 file_accessed(file); 1577 return retval ? retval : error; 1578} 1579 1580static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1581 struct pipe_inode_info *pipe, size_t len, 1582 unsigned int flags) 1583{ 1584 struct address_space *mapping = in->f_mapping; 1585 struct inode *inode = mapping->host; 1586 unsigned int loff, nr_pages, req_pages; 1587 struct page *pages[PIPE_DEF_BUFFERS]; 1588 struct partial_page partial[PIPE_DEF_BUFFERS]; 1589 struct page *page; 1590 pgoff_t index, end_index; 1591 loff_t isize, left; 1592 int error, page_nr; 1593 struct splice_pipe_desc spd = { 1594 .pages = pages, 1595 .partial = partial, 1596 .nr_pages_max = PIPE_DEF_BUFFERS, 1597 .flags = flags, 1598 .ops = &page_cache_pipe_buf_ops, 1599 .spd_release = spd_release_page, 1600 }; 1601 1602 isize = i_size_read(inode); 1603 if (unlikely(*ppos >= isize)) 1604 return 0; 1605 1606 left = isize - *ppos; 1607 if (unlikely(left < len)) 1608 len = left; 1609 1610 if (splice_grow_spd(pipe, &spd)) 1611 return -ENOMEM; 1612 1613 index = *ppos >> PAGE_CACHE_SHIFT; 1614 loff = *ppos & ~PAGE_CACHE_MASK; 1615 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1616 nr_pages = min(req_pages, spd.nr_pages_max); 1617 1618 spd.nr_pages = find_get_pages_contig(mapping, index, 1619 nr_pages, spd.pages); 1620 index += spd.nr_pages; 1621 error = 0; 1622 1623 while (spd.nr_pages < nr_pages) { 1624 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1625 if (error) 1626 break; 1627 unlock_page(page); 1628 spd.pages[spd.nr_pages++] = page; 1629 index++; 1630 } 1631 1632 index = *ppos >> PAGE_CACHE_SHIFT; 1633 nr_pages = spd.nr_pages; 1634 spd.nr_pages = 0; 1635 1636 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1637 unsigned int this_len; 1638 1639 if (!len) 1640 break; 1641 1642 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1643 page = spd.pages[page_nr]; 1644 1645 if (!PageUptodate(page) || page->mapping != mapping) { 1646 error = shmem_getpage(inode, index, &page, 1647 SGP_CACHE, NULL); 1648 if (error) 1649 break; 1650 unlock_page(page); 1651 page_cache_release(spd.pages[page_nr]); 1652 spd.pages[page_nr] = page; 1653 } 1654 1655 isize = i_size_read(inode); 1656 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1657 if (unlikely(!isize || index > end_index)) 1658 break; 1659 1660 if (end_index == index) { 1661 unsigned int plen; 1662 1663 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1664 if (plen <= loff) 1665 break; 1666 1667 this_len = min(this_len, plen - loff); 1668 len = this_len; 1669 } 1670 1671 spd.partial[page_nr].offset = loff; 1672 spd.partial[page_nr].len = this_len; 1673 len -= this_len; 1674 loff = 0; 1675 spd.nr_pages++; 1676 index++; 1677 } 1678 1679 while (page_nr < nr_pages) 1680 page_cache_release(spd.pages[page_nr++]); 1681 1682 if (spd.nr_pages) 1683 error = splice_to_pipe(pipe, &spd); 1684 1685 splice_shrink_spd(&spd); 1686 1687 if (error > 0) { 1688 *ppos += error; 1689 file_accessed(in); 1690 } 1691 return error; 1692} 1693 1694/* 1695 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 1696 */ 1697static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 1698 pgoff_t index, pgoff_t end, int whence) 1699{ 1700 struct page *page; 1701 struct pagevec pvec; 1702 pgoff_t indices[PAGEVEC_SIZE]; 1703 bool done = false; 1704 int i; 1705 1706 pagevec_init(&pvec, 0); 1707 pvec.nr = 1; /* start small: we may be there already */ 1708 while (!done) { 1709 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 1710 pvec.nr, pvec.pages, indices); 1711 if (!pvec.nr) { 1712 if (whence == SEEK_DATA) 1713 index = end; 1714 break; 1715 } 1716 for (i = 0; i < pvec.nr; i++, index++) { 1717 if (index < indices[i]) { 1718 if (whence == SEEK_HOLE) { 1719 done = true; 1720 break; 1721 } 1722 index = indices[i]; 1723 } 1724 page = pvec.pages[i]; 1725 if (page && !radix_tree_exceptional_entry(page)) { 1726 if (!PageUptodate(page)) 1727 page = NULL; 1728 } 1729 if (index >= end || 1730 (page && whence == SEEK_DATA) || 1731 (!page && whence == SEEK_HOLE)) { 1732 done = true; 1733 break; 1734 } 1735 } 1736 shmem_deswap_pagevec(&pvec); 1737 pagevec_release(&pvec); 1738 pvec.nr = PAGEVEC_SIZE; 1739 cond_resched(); 1740 } 1741 return index; 1742} 1743 1744static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 1745{ 1746 struct address_space *mapping = file->f_mapping; 1747 struct inode *inode = mapping->host; 1748 pgoff_t start, end; 1749 loff_t new_offset; 1750 1751 if (whence != SEEK_DATA && whence != SEEK_HOLE) 1752 return generic_file_llseek_size(file, offset, whence, 1753 MAX_LFS_FILESIZE, i_size_read(inode)); 1754 mutex_lock(&inode->i_mutex); 1755 /* We're holding i_mutex so we can access i_size directly */ 1756 1757 if (offset < 0) 1758 offset = -EINVAL; 1759 else if (offset >= inode->i_size) 1760 offset = -ENXIO; 1761 else { 1762 start = offset >> PAGE_CACHE_SHIFT; 1763 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1764 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1765 new_offset <<= PAGE_CACHE_SHIFT; 1766 if (new_offset > offset) { 1767 if (new_offset < inode->i_size) 1768 offset = new_offset; 1769 else if (whence == SEEK_DATA) 1770 offset = -ENXIO; 1771 else 1772 offset = inode->i_size; 1773 } 1774 } 1775 1776 if (offset >= 0) 1777 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 1778 mutex_unlock(&inode->i_mutex); 1779 return offset; 1780} 1781 1782static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1783 loff_t len) 1784{ 1785 struct inode *inode = file_inode(file); 1786 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1787 struct shmem_falloc shmem_falloc; 1788 pgoff_t start, index, end; 1789 int error; 1790 1791 mutex_lock(&inode->i_mutex); 1792 1793 if (mode & FALLOC_FL_PUNCH_HOLE) { 1794 struct address_space *mapping = file->f_mapping; 1795 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1796 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1797 1798 if ((u64)unmap_end > (u64)unmap_start) 1799 unmap_mapping_range(mapping, unmap_start, 1800 1 + unmap_end - unmap_start, 0); 1801 shmem_truncate_range(inode, offset, offset + len - 1); 1802 /* No need to unmap again: hole-punching leaves COWed pages */ 1803 error = 0; 1804 goto out; 1805 } 1806 1807 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1808 error = inode_newsize_ok(inode, offset + len); 1809 if (error) 1810 goto out; 1811 1812 start = offset >> PAGE_CACHE_SHIFT; 1813 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1814 /* Try to avoid a swapstorm if len is impossible to satisfy */ 1815 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1816 error = -ENOSPC; 1817 goto out; 1818 } 1819 1820 shmem_falloc.start = start; 1821 shmem_falloc.next = start; 1822 shmem_falloc.nr_falloced = 0; 1823 shmem_falloc.nr_unswapped = 0; 1824 spin_lock(&inode->i_lock); 1825 inode->i_private = &shmem_falloc; 1826 spin_unlock(&inode->i_lock); 1827 1828 for (index = start; index < end; index++) { 1829 struct page *page; 1830 1831 /* 1832 * Good, the fallocate(2) manpage permits EINTR: we may have 1833 * been interrupted because we are using up too much memory. 1834 */ 1835 if (signal_pending(current)) 1836 error = -EINTR; 1837 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 1838 error = -ENOMEM; 1839 else 1840 error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1841 NULL); 1842 if (error) { 1843 /* Remove the !PageUptodate pages we added */ 1844 shmem_undo_range(inode, 1845 (loff_t)start << PAGE_CACHE_SHIFT, 1846 (loff_t)index << PAGE_CACHE_SHIFT, true); 1847 goto undone; 1848 } 1849 1850 /* 1851 * Inform shmem_writepage() how far we have reached. 1852 * No need for lock or barrier: we have the page lock. 1853 */ 1854 shmem_falloc.next++; 1855 if (!PageUptodate(page)) 1856 shmem_falloc.nr_falloced++; 1857 1858 /* 1859 * If !PageUptodate, leave it that way so that freeable pages 1860 * can be recognized if we need to rollback on error later. 1861 * But set_page_dirty so that memory pressure will swap rather 1862 * than free the pages we are allocating (and SGP_CACHE pages 1863 * might still be clean: we now need to mark those dirty too). 1864 */ 1865 set_page_dirty(page); 1866 unlock_page(page); 1867 page_cache_release(page); 1868 cond_resched(); 1869 } 1870 1871 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1872 i_size_write(inode, offset + len); 1873 inode->i_ctime = CURRENT_TIME; 1874undone: 1875 spin_lock(&inode->i_lock); 1876 inode->i_private = NULL; 1877 spin_unlock(&inode->i_lock); 1878out: 1879 mutex_unlock(&inode->i_mutex); 1880 return error; 1881} 1882 1883static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1884{ 1885 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1886 1887 buf->f_type = TMPFS_MAGIC; 1888 buf->f_bsize = PAGE_CACHE_SIZE; 1889 buf->f_namelen = NAME_MAX; 1890 if (sbinfo->max_blocks) { 1891 buf->f_blocks = sbinfo->max_blocks; 1892 buf->f_bavail = 1893 buf->f_bfree = sbinfo->max_blocks - 1894 percpu_counter_sum(&sbinfo->used_blocks); 1895 } 1896 if (sbinfo->max_inodes) { 1897 buf->f_files = sbinfo->max_inodes; 1898 buf->f_ffree = sbinfo->free_inodes; 1899 } 1900 /* else leave those fields 0 like simple_statfs */ 1901 return 0; 1902} 1903 1904/* 1905 * File creation. Allocate an inode, and we're done.. 1906 */ 1907static int 1908shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 1909{ 1910 struct inode *inode; 1911 int error = -ENOSPC; 1912 1913 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1914 if (inode) { 1915 error = simple_acl_create(dir, inode); 1916 if (error) 1917 goto out_iput; 1918 error = security_inode_init_security(inode, dir, 1919 &dentry->d_name, 1920 shmem_initxattrs, NULL); 1921 if (error && error != -EOPNOTSUPP) 1922 goto out_iput; 1923 1924 error = 0; 1925 dir->i_size += BOGO_DIRENT_SIZE; 1926 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1927 d_instantiate(dentry, inode); 1928 dget(dentry); /* Extra count - pin the dentry in core */ 1929 } 1930 return error; 1931out_iput: 1932 iput(inode); 1933 return error; 1934} 1935 1936static int 1937shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 1938{ 1939 struct inode *inode; 1940 int error = -ENOSPC; 1941 1942 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 1943 if (inode) { 1944 error = security_inode_init_security(inode, dir, 1945 NULL, 1946 shmem_initxattrs, NULL); 1947 if (error && error != -EOPNOTSUPP) 1948 goto out_iput; 1949 error = simple_acl_create(dir, inode); 1950 if (error) 1951 goto out_iput; 1952 d_tmpfile(dentry, inode); 1953 } 1954 return error; 1955out_iput: 1956 iput(inode); 1957 return error; 1958} 1959 1960static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1961{ 1962 int error; 1963 1964 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1965 return error; 1966 inc_nlink(dir); 1967 return 0; 1968} 1969 1970static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 1971 bool excl) 1972{ 1973 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1974} 1975 1976/* 1977 * Link a file.. 1978 */ 1979static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1980{ 1981 struct inode *inode = old_dentry->d_inode; 1982 int ret; 1983 1984 /* 1985 * No ordinary (disk based) filesystem counts links as inodes; 1986 * but each new link needs a new dentry, pinning lowmem, and 1987 * tmpfs dentries cannot be pruned until they are unlinked. 1988 */ 1989 ret = shmem_reserve_inode(inode->i_sb); 1990 if (ret) 1991 goto out; 1992 1993 dir->i_size += BOGO_DIRENT_SIZE; 1994 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1995 inc_nlink(inode); 1996 ihold(inode); /* New dentry reference */ 1997 dget(dentry); /* Extra pinning count for the created dentry */ 1998 d_instantiate(dentry, inode); 1999out: 2000 return ret; 2001} 2002 2003static int shmem_unlink(struct inode *dir, struct dentry *dentry) 2004{ 2005 struct inode *inode = dentry->d_inode; 2006 2007 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 2008 shmem_free_inode(inode->i_sb); 2009 2010 dir->i_size -= BOGO_DIRENT_SIZE; 2011 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2012 drop_nlink(inode); 2013 dput(dentry); /* Undo the count from "create" - this does all the work */ 2014 return 0; 2015} 2016 2017static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 2018{ 2019 if (!simple_empty(dentry)) 2020 return -ENOTEMPTY; 2021 2022 drop_nlink(dentry->d_inode); 2023 drop_nlink(dir); 2024 return shmem_unlink(dir, dentry); 2025} 2026 2027/* 2028 * The VFS layer already does all the dentry stuff for rename, 2029 * we just have to decrement the usage count for the target if 2030 * it exists so that the VFS layer correctly free's it when it 2031 * gets overwritten. 2032 */ 2033static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2034{ 2035 struct inode *inode = old_dentry->d_inode; 2036 int they_are_dirs = S_ISDIR(inode->i_mode); 2037 2038 if (!simple_empty(new_dentry)) 2039 return -ENOTEMPTY; 2040 2041 if (new_dentry->d_inode) { 2042 (void) shmem_unlink(new_dir, new_dentry); 2043 if (they_are_dirs) 2044 drop_nlink(old_dir); 2045 } else if (they_are_dirs) { 2046 drop_nlink(old_dir); 2047 inc_nlink(new_dir); 2048 } 2049 2050 old_dir->i_size -= BOGO_DIRENT_SIZE; 2051 new_dir->i_size += BOGO_DIRENT_SIZE; 2052 old_dir->i_ctime = old_dir->i_mtime = 2053 new_dir->i_ctime = new_dir->i_mtime = 2054 inode->i_ctime = CURRENT_TIME; 2055 return 0; 2056} 2057 2058static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 2059{ 2060 int error; 2061 int len; 2062 struct inode *inode; 2063 struct page *page; 2064 char *kaddr; 2065 struct shmem_inode_info *info; 2066 2067 len = strlen(symname) + 1; 2068 if (len > PAGE_CACHE_SIZE) 2069 return -ENAMETOOLONG; 2070 2071 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2072 if (!inode) 2073 return -ENOSPC; 2074 2075 error = security_inode_init_security(inode, dir, &dentry->d_name, 2076 shmem_initxattrs, NULL); 2077 if (error) { 2078 if (error != -EOPNOTSUPP) { 2079 iput(inode); 2080 return error; 2081 } 2082 error = 0; 2083 } 2084 2085 info = SHMEM_I(inode); 2086 inode->i_size = len-1; 2087 if (len <= SHORT_SYMLINK_LEN) { 2088 info->symlink = kmemdup(symname, len, GFP_KERNEL); 2089 if (!info->symlink) { 2090 iput(inode); 2091 return -ENOMEM; 2092 } 2093 inode->i_op = &shmem_short_symlink_operations; 2094 } else { 2095 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2096 if (error) { 2097 iput(inode); 2098 return error; 2099 } 2100 inode->i_mapping->a_ops = &shmem_aops; 2101 inode->i_op = &shmem_symlink_inode_operations; 2102 kaddr = kmap_atomic(page); 2103 memcpy(kaddr, symname, len); 2104 kunmap_atomic(kaddr); 2105 SetPageUptodate(page); 2106 set_page_dirty(page); 2107 unlock_page(page); 2108 page_cache_release(page); 2109 } 2110 dir->i_size += BOGO_DIRENT_SIZE; 2111 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2112 d_instantiate(dentry, inode); 2113 dget(dentry); 2114 return 0; 2115} 2116 2117static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 2118{ 2119 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 2120 return NULL; 2121} 2122 2123static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2124{ 2125 struct page *page = NULL; 2126 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2127 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 2128 if (page) 2129 unlock_page(page); 2130 return page; 2131} 2132 2133static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2134{ 2135 if (!IS_ERR(nd_get_link(nd))) { 2136 struct page *page = cookie; 2137 kunmap(page); 2138 mark_page_accessed(page); 2139 page_cache_release(page); 2140 } 2141} 2142 2143#ifdef CONFIG_TMPFS_XATTR 2144/* 2145 * Superblocks without xattr inode operations may get some security.* xattr 2146 * support from the LSM "for free". As soon as we have any other xattrs 2147 * like ACLs, we also need to implement the security.* handlers at 2148 * filesystem level, though. 2149 */ 2150 2151/* 2152 * Callback for security_inode_init_security() for acquiring xattrs. 2153 */ 2154static int shmem_initxattrs(struct inode *inode, 2155 const struct xattr *xattr_array, 2156 void *fs_info) 2157{ 2158 struct shmem_inode_info *info = SHMEM_I(inode); 2159 const struct xattr *xattr; 2160 struct simple_xattr *new_xattr; 2161 size_t len; 2162 2163 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 2164 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 2165 if (!new_xattr) 2166 return -ENOMEM; 2167 2168 len = strlen(xattr->name) + 1; 2169 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 2170 GFP_KERNEL); 2171 if (!new_xattr->name) { 2172 kfree(new_xattr); 2173 return -ENOMEM; 2174 } 2175 2176 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 2177 XATTR_SECURITY_PREFIX_LEN); 2178 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 2179 xattr->name, len); 2180 2181 simple_xattr_list_add(&info->xattrs, new_xattr); 2182 } 2183 2184 return 0; 2185} 2186 2187static const struct xattr_handler *shmem_xattr_handlers[] = { 2188#ifdef CONFIG_TMPFS_POSIX_ACL 2189 &posix_acl_access_xattr_handler, 2190 &posix_acl_default_xattr_handler, 2191#endif 2192 NULL 2193}; 2194 2195static int shmem_xattr_validate(const char *name) 2196{ 2197 struct { const char *prefix; size_t len; } arr[] = { 2198 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2199 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2200 }; 2201 int i; 2202 2203 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2204 size_t preflen = arr[i].len; 2205 if (strncmp(name, arr[i].prefix, preflen) == 0) { 2206 if (!name[preflen]) 2207 return -EINVAL; 2208 return 0; 2209 } 2210 } 2211 return -EOPNOTSUPP; 2212} 2213 2214static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2215 void *buffer, size_t size) 2216{ 2217 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2218 int err; 2219 2220 /* 2221 * If this is a request for a synthetic attribute in the system.* 2222 * namespace use the generic infrastructure to resolve a handler 2223 * for it via sb->s_xattr. 2224 */ 2225 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2226 return generic_getxattr(dentry, name, buffer, size); 2227 2228 err = shmem_xattr_validate(name); 2229 if (err) 2230 return err; 2231 2232 return simple_xattr_get(&info->xattrs, name, buffer, size); 2233} 2234 2235static int shmem_setxattr(struct dentry *dentry, const char *name, 2236 const void *value, size_t size, int flags) 2237{ 2238 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2239 int err; 2240 2241 /* 2242 * If this is a request for a synthetic attribute in the system.* 2243 * namespace use the generic infrastructure to resolve a handler 2244 * for it via sb->s_xattr. 2245 */ 2246 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2247 return generic_setxattr(dentry, name, value, size, flags); 2248 2249 err = shmem_xattr_validate(name); 2250 if (err) 2251 return err; 2252 2253 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2254} 2255 2256static int shmem_removexattr(struct dentry *dentry, const char *name) 2257{ 2258 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2259 int err; 2260 2261 /* 2262 * If this is a request for a synthetic attribute in the system.* 2263 * namespace use the generic infrastructure to resolve a handler 2264 * for it via sb->s_xattr. 2265 */ 2266 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2267 return generic_removexattr(dentry, name); 2268 2269 err = shmem_xattr_validate(name); 2270 if (err) 2271 return err; 2272 2273 return simple_xattr_remove(&info->xattrs, name); 2274} 2275 2276static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2277{ 2278 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2279 return simple_xattr_list(&info->xattrs, buffer, size); 2280} 2281#endif /* CONFIG_TMPFS_XATTR */ 2282 2283static const struct inode_operations shmem_short_symlink_operations = { 2284 .readlink = generic_readlink, 2285 .follow_link = shmem_follow_short_symlink, 2286#ifdef CONFIG_TMPFS_XATTR 2287 .setxattr = shmem_setxattr, 2288 .getxattr = shmem_getxattr, 2289 .listxattr = shmem_listxattr, 2290 .removexattr = shmem_removexattr, 2291#endif 2292}; 2293 2294static const struct inode_operations shmem_symlink_inode_operations = { 2295 .readlink = generic_readlink, 2296 .follow_link = shmem_follow_link, 2297 .put_link = shmem_put_link, 2298#ifdef CONFIG_TMPFS_XATTR 2299 .setxattr = shmem_setxattr, 2300 .getxattr = shmem_getxattr, 2301 .listxattr = shmem_listxattr, 2302 .removexattr = shmem_removexattr, 2303#endif 2304}; 2305 2306static struct dentry *shmem_get_parent(struct dentry *child) 2307{ 2308 return ERR_PTR(-ESTALE); 2309} 2310 2311static int shmem_match(struct inode *ino, void *vfh) 2312{ 2313 __u32 *fh = vfh; 2314 __u64 inum = fh[2]; 2315 inum = (inum << 32) | fh[1]; 2316 return ino->i_ino == inum && fh[0] == ino->i_generation; 2317} 2318 2319static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2320 struct fid *fid, int fh_len, int fh_type) 2321{ 2322 struct inode *inode; 2323 struct dentry *dentry = NULL; 2324 u64 inum; 2325 2326 if (fh_len < 3) 2327 return NULL; 2328 2329 inum = fid->raw[2]; 2330 inum = (inum << 32) | fid->raw[1]; 2331 2332 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2333 shmem_match, fid->raw); 2334 if (inode) { 2335 dentry = d_find_alias(inode); 2336 iput(inode); 2337 } 2338 2339 return dentry; 2340} 2341 2342static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 2343 struct inode *parent) 2344{ 2345 if (*len < 3) { 2346 *len = 3; 2347 return FILEID_INVALID; 2348 } 2349 2350 if (inode_unhashed(inode)) { 2351 /* Unfortunately insert_inode_hash is not idempotent, 2352 * so as we hash inodes here rather than at creation 2353 * time, we need a lock to ensure we only try 2354 * to do it once 2355 */ 2356 static DEFINE_SPINLOCK(lock); 2357 spin_lock(&lock); 2358 if (inode_unhashed(inode)) 2359 __insert_inode_hash(inode, 2360 inode->i_ino + inode->i_generation); 2361 spin_unlock(&lock); 2362 } 2363 2364 fh[0] = inode->i_generation; 2365 fh[1] = inode->i_ino; 2366 fh[2] = ((__u64)inode->i_ino) >> 32; 2367 2368 *len = 3; 2369 return 1; 2370} 2371 2372static const struct export_operations shmem_export_ops = { 2373 .get_parent = shmem_get_parent, 2374 .encode_fh = shmem_encode_fh, 2375 .fh_to_dentry = shmem_fh_to_dentry, 2376}; 2377 2378static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2379 bool remount) 2380{ 2381 char *this_char, *value, *rest; 2382 struct mempolicy *mpol = NULL; 2383 uid_t uid; 2384 gid_t gid; 2385 2386 while (options != NULL) { 2387 this_char = options; 2388 for (;;) { 2389 /* 2390 * NUL-terminate this option: unfortunately, 2391 * mount options form a comma-separated list, 2392 * but mpol's nodelist may also contain commas. 2393 */ 2394 options = strchr(options, ','); 2395 if (options == NULL) 2396 break; 2397 options++; 2398 if (!isdigit(*options)) { 2399 options[-1] = '\0'; 2400 break; 2401 } 2402 } 2403 if (!*this_char) 2404 continue; 2405 if ((value = strchr(this_char,'=')) != NULL) { 2406 *value++ = 0; 2407 } else { 2408 printk(KERN_ERR 2409 "tmpfs: No value for mount option '%s'\n", 2410 this_char); 2411 goto error; 2412 } 2413 2414 if (!strcmp(this_char,"size")) { 2415 unsigned long long size; 2416 size = memparse(value,&rest); 2417 if (*rest == '%') { 2418 size <<= PAGE_SHIFT; 2419 size *= totalram_pages; 2420 do_div(size, 100); 2421 rest++; 2422 } 2423 if (*rest) 2424 goto bad_val; 2425 sbinfo->max_blocks = 2426 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2427 } else if (!strcmp(this_char,"nr_blocks")) { 2428 sbinfo->max_blocks = memparse(value, &rest); 2429 if (*rest) 2430 goto bad_val; 2431 } else if (!strcmp(this_char,"nr_inodes")) { 2432 sbinfo->max_inodes = memparse(value, &rest); 2433 if (*rest) 2434 goto bad_val; 2435 } else if (!strcmp(this_char,"mode")) { 2436 if (remount) 2437 continue; 2438 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2439 if (*rest) 2440 goto bad_val; 2441 } else if (!strcmp(this_char,"uid")) { 2442 if (remount) 2443 continue; 2444 uid = simple_strtoul(value, &rest, 0); 2445 if (*rest) 2446 goto bad_val; 2447 sbinfo->uid = make_kuid(current_user_ns(), uid); 2448 if (!uid_valid(sbinfo->uid)) 2449 goto bad_val; 2450 } else if (!strcmp(this_char,"gid")) { 2451 if (remount) 2452 continue; 2453 gid = simple_strtoul(value, &rest, 0); 2454 if (*rest) 2455 goto bad_val; 2456 sbinfo->gid = make_kgid(current_user_ns(), gid); 2457 if (!gid_valid(sbinfo->gid)) 2458 goto bad_val; 2459 } else if (!strcmp(this_char,"mpol")) { 2460 mpol_put(mpol); 2461 mpol = NULL; 2462 if (mpol_parse_str(value, &mpol)) 2463 goto bad_val; 2464 } else { 2465 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2466 this_char); 2467 goto error; 2468 } 2469 } 2470 sbinfo->mpol = mpol; 2471 return 0; 2472 2473bad_val: 2474 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2475 value, this_char); 2476error: 2477 mpol_put(mpol); 2478 return 1; 2479 2480} 2481 2482static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2483{ 2484 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2485 struct shmem_sb_info config = *sbinfo; 2486 unsigned long inodes; 2487 int error = -EINVAL; 2488 2489 config.mpol = NULL; 2490 if (shmem_parse_options(data, &config, true)) 2491 return error; 2492 2493 spin_lock(&sbinfo->stat_lock); 2494 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2495 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2496 goto out; 2497 if (config.max_inodes < inodes) 2498 goto out; 2499 /* 2500 * Those tests disallow limited->unlimited while any are in use; 2501 * but we must separately disallow unlimited->limited, because 2502 * in that case we have no record of how much is already in use. 2503 */ 2504 if (config.max_blocks && !sbinfo->max_blocks) 2505 goto out; 2506 if (config.max_inodes && !sbinfo->max_inodes) 2507 goto out; 2508 2509 error = 0; 2510 sbinfo->max_blocks = config.max_blocks; 2511 sbinfo->max_inodes = config.max_inodes; 2512 sbinfo->free_inodes = config.max_inodes - inodes; 2513 2514 /* 2515 * Preserve previous mempolicy unless mpol remount option was specified. 2516 */ 2517 if (config.mpol) { 2518 mpol_put(sbinfo->mpol); 2519 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2520 } 2521out: 2522 spin_unlock(&sbinfo->stat_lock); 2523 return error; 2524} 2525 2526static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2527{ 2528 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2529 2530 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2531 seq_printf(seq, ",size=%luk", 2532 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2533 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2534 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2535 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2536 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 2537 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 2538 seq_printf(seq, ",uid=%u", 2539 from_kuid_munged(&init_user_ns, sbinfo->uid)); 2540 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 2541 seq_printf(seq, ",gid=%u", 2542 from_kgid_munged(&init_user_ns, sbinfo->gid)); 2543 shmem_show_mpol(seq, sbinfo->mpol); 2544 return 0; 2545} 2546#endif /* CONFIG_TMPFS */ 2547 2548static void shmem_put_super(struct super_block *sb) 2549{ 2550 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2551 2552 percpu_counter_destroy(&sbinfo->used_blocks); 2553 mpol_put(sbinfo->mpol); 2554 kfree(sbinfo); 2555 sb->s_fs_info = NULL; 2556} 2557 2558int shmem_fill_super(struct super_block *sb, void *data, int silent) 2559{ 2560 struct inode *inode; 2561 struct shmem_sb_info *sbinfo; 2562 int err = -ENOMEM; 2563 2564 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2565 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2566 L1_CACHE_BYTES), GFP_KERNEL); 2567 if (!sbinfo) 2568 return -ENOMEM; 2569 2570 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2571 sbinfo->uid = current_fsuid(); 2572 sbinfo->gid = current_fsgid(); 2573 sb->s_fs_info = sbinfo; 2574 2575#ifdef CONFIG_TMPFS 2576 /* 2577 * Per default we only allow half of the physical ram per 2578 * tmpfs instance, limiting inodes to one per page of lowmem; 2579 * but the internal instance is left unlimited. 2580 */ 2581 if (!(sb->s_flags & MS_KERNMOUNT)) { 2582 sbinfo->max_blocks = shmem_default_max_blocks(); 2583 sbinfo->max_inodes = shmem_default_max_inodes(); 2584 if (shmem_parse_options(data, sbinfo, false)) { 2585 err = -EINVAL; 2586 goto failed; 2587 } 2588 } else { 2589 sb->s_flags |= MS_NOUSER; 2590 } 2591 sb->s_export_op = &shmem_export_ops; 2592 sb->s_flags |= MS_NOSEC; 2593#else 2594 sb->s_flags |= MS_NOUSER; 2595#endif 2596 2597 spin_lock_init(&sbinfo->stat_lock); 2598 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2599 goto failed; 2600 sbinfo->free_inodes = sbinfo->max_inodes; 2601 2602 sb->s_maxbytes = MAX_LFS_FILESIZE; 2603 sb->s_blocksize = PAGE_CACHE_SIZE; 2604 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2605 sb->s_magic = TMPFS_MAGIC; 2606 sb->s_op = &shmem_ops; 2607 sb->s_time_gran = 1; 2608#ifdef CONFIG_TMPFS_XATTR 2609 sb->s_xattr = shmem_xattr_handlers; 2610#endif 2611#ifdef CONFIG_TMPFS_POSIX_ACL 2612 sb->s_flags |= MS_POSIXACL; 2613#endif 2614 2615 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2616 if (!inode) 2617 goto failed; 2618 inode->i_uid = sbinfo->uid; 2619 inode->i_gid = sbinfo->gid; 2620 sb->s_root = d_make_root(inode); 2621 if (!sb->s_root) 2622 goto failed; 2623 return 0; 2624 2625failed: 2626 shmem_put_super(sb); 2627 return err; 2628} 2629 2630static struct kmem_cache *shmem_inode_cachep; 2631 2632static struct inode *shmem_alloc_inode(struct super_block *sb) 2633{ 2634 struct shmem_inode_info *info; 2635 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2636 if (!info) 2637 return NULL; 2638 return &info->vfs_inode; 2639} 2640 2641static void shmem_destroy_callback(struct rcu_head *head) 2642{ 2643 struct inode *inode = container_of(head, struct inode, i_rcu); 2644 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2645} 2646 2647static void shmem_destroy_inode(struct inode *inode) 2648{ 2649 if (S_ISREG(inode->i_mode)) 2650 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2651 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2652} 2653 2654static void shmem_init_inode(void *foo) 2655{ 2656 struct shmem_inode_info *info = foo; 2657 inode_init_once(&info->vfs_inode); 2658} 2659 2660static int shmem_init_inodecache(void) 2661{ 2662 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2663 sizeof(struct shmem_inode_info), 2664 0, SLAB_PANIC, shmem_init_inode); 2665 return 0; 2666} 2667 2668static void shmem_destroy_inodecache(void) 2669{ 2670 kmem_cache_destroy(shmem_inode_cachep); 2671} 2672 2673static const struct address_space_operations shmem_aops = { 2674 .writepage = shmem_writepage, 2675 .set_page_dirty = __set_page_dirty_no_writeback, 2676#ifdef CONFIG_TMPFS 2677 .write_begin = shmem_write_begin, 2678 .write_end = shmem_write_end, 2679#endif 2680 .migratepage = migrate_page, 2681 .error_remove_page = generic_error_remove_page, 2682}; 2683 2684static const struct file_operations shmem_file_operations = { 2685 .mmap = shmem_mmap, 2686#ifdef CONFIG_TMPFS 2687 .llseek = shmem_file_llseek, 2688 .read = do_sync_read, 2689 .write = do_sync_write, 2690 .aio_read = shmem_file_aio_read, 2691 .aio_write = generic_file_aio_write, 2692 .fsync = noop_fsync, 2693 .splice_read = shmem_file_splice_read, 2694 .splice_write = generic_file_splice_write, 2695 .fallocate = shmem_fallocate, 2696#endif 2697}; 2698 2699static const struct inode_operations shmem_inode_operations = { 2700 .setattr = shmem_setattr, 2701#ifdef CONFIG_TMPFS_XATTR 2702 .setxattr = shmem_setxattr, 2703 .getxattr = shmem_getxattr, 2704 .listxattr = shmem_listxattr, 2705 .removexattr = shmem_removexattr, 2706 .set_acl = simple_set_acl, 2707#endif 2708}; 2709 2710static const struct inode_operations shmem_dir_inode_operations = { 2711#ifdef CONFIG_TMPFS 2712 .create = shmem_create, 2713 .lookup = simple_lookup, 2714 .link = shmem_link, 2715 .unlink = shmem_unlink, 2716 .symlink = shmem_symlink, 2717 .mkdir = shmem_mkdir, 2718 .rmdir = shmem_rmdir, 2719 .mknod = shmem_mknod, 2720 .rename = shmem_rename, 2721 .tmpfile = shmem_tmpfile, 2722#endif 2723#ifdef CONFIG_TMPFS_XATTR 2724 .setxattr = shmem_setxattr, 2725 .getxattr = shmem_getxattr, 2726 .listxattr = shmem_listxattr, 2727 .removexattr = shmem_removexattr, 2728#endif 2729#ifdef CONFIG_TMPFS_POSIX_ACL 2730 .setattr = shmem_setattr, 2731 .set_acl = simple_set_acl, 2732#endif 2733}; 2734 2735static const struct inode_operations shmem_special_inode_operations = { 2736#ifdef CONFIG_TMPFS_XATTR 2737 .setxattr = shmem_setxattr, 2738 .getxattr = shmem_getxattr, 2739 .listxattr = shmem_listxattr, 2740 .removexattr = shmem_removexattr, 2741#endif 2742#ifdef CONFIG_TMPFS_POSIX_ACL 2743 .setattr = shmem_setattr, 2744 .set_acl = simple_set_acl, 2745#endif 2746}; 2747 2748static const struct super_operations shmem_ops = { 2749 .alloc_inode = shmem_alloc_inode, 2750 .destroy_inode = shmem_destroy_inode, 2751#ifdef CONFIG_TMPFS 2752 .statfs = shmem_statfs, 2753 .remount_fs = shmem_remount_fs, 2754 .show_options = shmem_show_options, 2755#endif 2756 .evict_inode = shmem_evict_inode, 2757 .drop_inode = generic_delete_inode, 2758 .put_super = shmem_put_super, 2759}; 2760 2761static const struct vm_operations_struct shmem_vm_ops = { 2762 .fault = shmem_fault, 2763#ifdef CONFIG_NUMA 2764 .set_policy = shmem_set_policy, 2765 .get_policy = shmem_get_policy, 2766#endif 2767 .remap_pages = generic_file_remap_pages, 2768}; 2769 2770static struct dentry *shmem_mount(struct file_system_type *fs_type, 2771 int flags, const char *dev_name, void *data) 2772{ 2773 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2774} 2775 2776static struct file_system_type shmem_fs_type = { 2777 .owner = THIS_MODULE, 2778 .name = "tmpfs", 2779 .mount = shmem_mount, 2780 .kill_sb = kill_litter_super, 2781 .fs_flags = FS_USERNS_MOUNT, 2782}; 2783 2784int __init shmem_init(void) 2785{ 2786 int error; 2787 2788 /* If rootfs called this, don't re-init */ 2789 if (shmem_inode_cachep) 2790 return 0; 2791 2792 error = bdi_init(&shmem_backing_dev_info); 2793 if (error) 2794 goto out4; 2795 2796 error = shmem_init_inodecache(); 2797 if (error) 2798 goto out3; 2799 2800 error = register_filesystem(&shmem_fs_type); 2801 if (error) { 2802 printk(KERN_ERR "Could not register tmpfs\n"); 2803 goto out2; 2804 } 2805 2806 shm_mnt = kern_mount(&shmem_fs_type); 2807 if (IS_ERR(shm_mnt)) { 2808 error = PTR_ERR(shm_mnt); 2809 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2810 goto out1; 2811 } 2812 return 0; 2813 2814out1: 2815 unregister_filesystem(&shmem_fs_type); 2816out2: 2817 shmem_destroy_inodecache(); 2818out3: 2819 bdi_destroy(&shmem_backing_dev_info); 2820out4: 2821 shm_mnt = ERR_PTR(error); 2822 return error; 2823} 2824 2825#else /* !CONFIG_SHMEM */ 2826 2827/* 2828 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2829 * 2830 * This is intended for small system where the benefits of the full 2831 * shmem code (swap-backed and resource-limited) are outweighed by 2832 * their complexity. On systems without swap this code should be 2833 * effectively equivalent, but much lighter weight. 2834 */ 2835 2836static struct file_system_type shmem_fs_type = { 2837 .name = "tmpfs", 2838 .mount = ramfs_mount, 2839 .kill_sb = kill_litter_super, 2840 .fs_flags = FS_USERNS_MOUNT, 2841}; 2842 2843int __init shmem_init(void) 2844{ 2845 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2846 2847 shm_mnt = kern_mount(&shmem_fs_type); 2848 BUG_ON(IS_ERR(shm_mnt)); 2849 2850 return 0; 2851} 2852 2853int shmem_unuse(swp_entry_t swap, struct page *page) 2854{ 2855 return 0; 2856} 2857 2858int shmem_lock(struct file *file, int lock, struct user_struct *user) 2859{ 2860 return 0; 2861} 2862 2863void shmem_unlock_mapping(struct address_space *mapping) 2864{ 2865} 2866 2867void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2868{ 2869 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2870} 2871EXPORT_SYMBOL_GPL(shmem_truncate_range); 2872 2873#define shmem_vm_ops generic_file_vm_ops 2874#define shmem_file_operations ramfs_file_operations 2875#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2876#define shmem_acct_size(flags, size) 0 2877#define shmem_unacct_size(flags, size) do {} while (0) 2878 2879#endif /* CONFIG_SHMEM */ 2880 2881/* common code */ 2882 2883static struct dentry_operations anon_ops = { 2884 .d_dname = simple_dname 2885}; 2886 2887static struct file *__shmem_file_setup(const char *name, loff_t size, 2888 unsigned long flags, unsigned int i_flags) 2889{ 2890 struct file *res; 2891 struct inode *inode; 2892 struct path path; 2893 struct super_block *sb; 2894 struct qstr this; 2895 2896 if (IS_ERR(shm_mnt)) 2897 return ERR_CAST(shm_mnt); 2898 2899 if (size < 0 || size > MAX_LFS_FILESIZE) 2900 return ERR_PTR(-EINVAL); 2901 2902 if (shmem_acct_size(flags, size)) 2903 return ERR_PTR(-ENOMEM); 2904 2905 res = ERR_PTR(-ENOMEM); 2906 this.name = name; 2907 this.len = strlen(name); 2908 this.hash = 0; /* will go */ 2909 sb = shm_mnt->mnt_sb; 2910 path.dentry = d_alloc_pseudo(sb, &this); 2911 if (!path.dentry) 2912 goto put_memory; 2913 d_set_d_op(path.dentry, &anon_ops); 2914 path.mnt = mntget(shm_mnt); 2915 2916 res = ERR_PTR(-ENOSPC); 2917 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2918 if (!inode) 2919 goto put_dentry; 2920 2921 inode->i_flags |= i_flags; 2922 d_instantiate(path.dentry, inode); 2923 inode->i_size = size; 2924 clear_nlink(inode); /* It is unlinked */ 2925 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 2926 if (IS_ERR(res)) 2927 goto put_dentry; 2928 2929 res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2930 &shmem_file_operations); 2931 if (IS_ERR(res)) 2932 goto put_dentry; 2933 2934 return res; 2935 2936put_dentry: 2937 path_put(&path); 2938put_memory: 2939 shmem_unacct_size(flags, size); 2940 return res; 2941} 2942 2943/** 2944 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 2945 * kernel internal. There will be NO LSM permission checks against the 2946 * underlying inode. So users of this interface must do LSM checks at a 2947 * higher layer. The one user is the big_key implementation. LSM checks 2948 * are provided at the key level rather than the inode level. 2949 * @name: name for dentry (to be seen in /proc/<pid>/maps 2950 * @size: size to be set for the file 2951 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2952 */ 2953struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 2954{ 2955 return __shmem_file_setup(name, size, flags, S_PRIVATE); 2956} 2957 2958/** 2959 * shmem_file_setup - get an unlinked file living in tmpfs 2960 * @name: name for dentry (to be seen in /proc/<pid>/maps 2961 * @size: size to be set for the file 2962 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2963 */ 2964struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2965{ 2966 return __shmem_file_setup(name, size, flags, 0); 2967} 2968EXPORT_SYMBOL_GPL(shmem_file_setup); 2969 2970/** 2971 * shmem_zero_setup - setup a shared anonymous mapping 2972 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2973 */ 2974int shmem_zero_setup(struct vm_area_struct *vma) 2975{ 2976 struct file *file; 2977 loff_t size = vma->vm_end - vma->vm_start; 2978 2979 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2980 if (IS_ERR(file)) 2981 return PTR_ERR(file); 2982 2983 if (vma->vm_file) 2984 fput(vma->vm_file); 2985 vma->vm_file = file; 2986 vma->vm_ops = &shmem_vm_ops; 2987 return 0; 2988} 2989 2990/** 2991 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 2992 * @mapping: the page's address_space 2993 * @index: the page index 2994 * @gfp: the page allocator flags to use if allocating 2995 * 2996 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 2997 * with any new page allocations done using the specified allocation flags. 2998 * But read_cache_page_gfp() uses the ->readpage() method: which does not 2999 * suit tmpfs, since it may have pages in swapcache, and needs to find those 3000 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 3001 * 3002 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 3003 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 3004 */ 3005struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 3006 pgoff_t index, gfp_t gfp) 3007{ 3008#ifdef CONFIG_SHMEM 3009 struct inode *inode = mapping->host; 3010 struct page *page; 3011 int error; 3012 3013 BUG_ON(mapping->a_ops != &shmem_aops); 3014 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 3015 if (error) 3016 page = ERR_PTR(error); 3017 else 3018 unlock_page(page); 3019 return page; 3020#else 3021 /* 3022 * The tiny !SHMEM case uses ramfs without swap 3023 */ 3024 return read_cache_page_gfp(mapping, index, gfp); 3025#endif 3026} 3027EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 3028