shmem.c revision 250297edf83292c831fbf4504df54953c2aacfe4
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24#include <linux/fs.h> 25#include <linux/init.h> 26#include <linux/vfs.h> 27#include <linux/mount.h> 28#include <linux/ramfs.h> 29#include <linux/pagemap.h> 30#include <linux/file.h> 31#include <linux/mm.h> 32#include <linux/export.h> 33#include <linux/swap.h> 34 35static struct vfsmount *shm_mnt; 36 37#ifdef CONFIG_SHMEM 38/* 39 * This virtual memory filesystem is heavily based on the ramfs. It 40 * extends ramfs by the ability to use swap and honor resource limits 41 * which makes it a completely usable filesystem. 42 */ 43 44#include <linux/xattr.h> 45#include <linux/exportfs.h> 46#include <linux/posix_acl.h> 47#include <linux/generic_acl.h> 48#include <linux/mman.h> 49#include <linux/string.h> 50#include <linux/slab.h> 51#include <linux/backing-dev.h> 52#include <linux/shmem_fs.h> 53#include <linux/writeback.h> 54#include <linux/blkdev.h> 55#include <linux/pagevec.h> 56#include <linux/percpu_counter.h> 57#include <linux/falloc.h> 58#include <linux/splice.h> 59#include <linux/security.h> 60#include <linux/swapops.h> 61#include <linux/mempolicy.h> 62#include <linux/namei.h> 63#include <linux/ctype.h> 64#include <linux/migrate.h> 65#include <linux/highmem.h> 66#include <linux/seq_file.h> 67#include <linux/magic.h> 68 69#include <asm/uaccess.h> 70#include <asm/pgtable.h> 71 72#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 73#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 74 75/* Pretend that each entry is of this size in directory's i_size */ 76#define BOGO_DIRENT_SIZE 20 77 78/* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 79#define SHORT_SYMLINK_LEN 128 80 81/* 82 * shmem_fallocate and shmem_writepage communicate via inode->i_private 83 * (with i_mutex making sure that it has only one user at a time): 84 * we would prefer not to enlarge the shmem inode just for that. 85 */ 86struct shmem_falloc { 87 pgoff_t start; /* start of range currently being fallocated */ 88 pgoff_t next; /* the next page offset to be fallocated */ 89 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 90 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 91}; 92 93/* Flag allocation requirements to shmem_getpage */ 94enum sgp_type { 95 SGP_READ, /* don't exceed i_size, don't allocate page */ 96 SGP_CACHE, /* don't exceed i_size, may allocate page */ 97 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 98 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 99 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 100}; 101 102#ifdef CONFIG_TMPFS 103static unsigned long shmem_default_max_blocks(void) 104{ 105 return totalram_pages / 2; 106} 107 108static unsigned long shmem_default_max_inodes(void) 109{ 110 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 111} 112#endif 113 114static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 115static int shmem_replace_page(struct page **pagep, gfp_t gfp, 116 struct shmem_inode_info *info, pgoff_t index); 117static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 118 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 119 120static inline int shmem_getpage(struct inode *inode, pgoff_t index, 121 struct page **pagep, enum sgp_type sgp, int *fault_type) 122{ 123 return shmem_getpage_gfp(inode, index, pagep, sgp, 124 mapping_gfp_mask(inode->i_mapping), fault_type); 125} 126 127static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 128{ 129 return sb->s_fs_info; 130} 131 132/* 133 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 134 * for shared memory and for shared anonymous (/dev/zero) mappings 135 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 136 * consistent with the pre-accounting of private mappings ... 137 */ 138static inline int shmem_acct_size(unsigned long flags, loff_t size) 139{ 140 return (flags & VM_NORESERVE) ? 141 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 142} 143 144static inline void shmem_unacct_size(unsigned long flags, loff_t size) 145{ 146 if (!(flags & VM_NORESERVE)) 147 vm_unacct_memory(VM_ACCT(size)); 148} 149 150/* 151 * ... whereas tmpfs objects are accounted incrementally as 152 * pages are allocated, in order to allow huge sparse files. 153 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 154 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 155 */ 156static inline int shmem_acct_block(unsigned long flags) 157{ 158 return (flags & VM_NORESERVE) ? 159 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 160} 161 162static inline void shmem_unacct_blocks(unsigned long flags, long pages) 163{ 164 if (flags & VM_NORESERVE) 165 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 166} 167 168static const struct super_operations shmem_ops; 169static const struct address_space_operations shmem_aops; 170static const struct file_operations shmem_file_operations; 171static const struct inode_operations shmem_inode_operations; 172static const struct inode_operations shmem_dir_inode_operations; 173static const struct inode_operations shmem_special_inode_operations; 174static const struct vm_operations_struct shmem_vm_ops; 175 176static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 177 .ra_pages = 0, /* No readahead */ 178 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 179}; 180 181static LIST_HEAD(shmem_swaplist); 182static DEFINE_MUTEX(shmem_swaplist_mutex); 183 184static int shmem_reserve_inode(struct super_block *sb) 185{ 186 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 187 if (sbinfo->max_inodes) { 188 spin_lock(&sbinfo->stat_lock); 189 if (!sbinfo->free_inodes) { 190 spin_unlock(&sbinfo->stat_lock); 191 return -ENOSPC; 192 } 193 sbinfo->free_inodes--; 194 spin_unlock(&sbinfo->stat_lock); 195 } 196 return 0; 197} 198 199static void shmem_free_inode(struct super_block *sb) 200{ 201 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 202 if (sbinfo->max_inodes) { 203 spin_lock(&sbinfo->stat_lock); 204 sbinfo->free_inodes++; 205 spin_unlock(&sbinfo->stat_lock); 206 } 207} 208 209/** 210 * shmem_recalc_inode - recalculate the block usage of an inode 211 * @inode: inode to recalc 212 * 213 * We have to calculate the free blocks since the mm can drop 214 * undirtied hole pages behind our back. 215 * 216 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 217 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 218 * 219 * It has to be called with the spinlock held. 220 */ 221static void shmem_recalc_inode(struct inode *inode) 222{ 223 struct shmem_inode_info *info = SHMEM_I(inode); 224 long freed; 225 226 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 227 if (freed > 0) { 228 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 229 if (sbinfo->max_blocks) 230 percpu_counter_add(&sbinfo->used_blocks, -freed); 231 info->alloced -= freed; 232 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 233 shmem_unacct_blocks(info->flags, freed); 234 } 235} 236 237/* 238 * Replace item expected in radix tree by a new item, while holding tree lock. 239 */ 240static int shmem_radix_tree_replace(struct address_space *mapping, 241 pgoff_t index, void *expected, void *replacement) 242{ 243 void **pslot; 244 void *item = NULL; 245 246 VM_BUG_ON(!expected); 247 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 248 if (pslot) 249 item = radix_tree_deref_slot_protected(pslot, 250 &mapping->tree_lock); 251 if (item != expected) 252 return -ENOENT; 253 if (replacement) 254 radix_tree_replace_slot(pslot, replacement); 255 else 256 radix_tree_delete(&mapping->page_tree, index); 257 return 0; 258} 259 260/* 261 * Sometimes, before we decide whether to proceed or to fail, we must check 262 * that an entry was not already brought back from swap by a racing thread. 263 * 264 * Checking page is not enough: by the time a SwapCache page is locked, it 265 * might be reused, and again be SwapCache, using the same swap as before. 266 */ 267static bool shmem_confirm_swap(struct address_space *mapping, 268 pgoff_t index, swp_entry_t swap) 269{ 270 void *item; 271 272 rcu_read_lock(); 273 item = radix_tree_lookup(&mapping->page_tree, index); 274 rcu_read_unlock(); 275 return item == swp_to_radix_entry(swap); 276} 277 278/* 279 * Like add_to_page_cache_locked, but error if expected item has gone. 280 */ 281static int shmem_add_to_page_cache(struct page *page, 282 struct address_space *mapping, 283 pgoff_t index, gfp_t gfp, void *expected) 284{ 285 int error; 286 287 VM_BUG_ON(!PageLocked(page)); 288 VM_BUG_ON(!PageSwapBacked(page)); 289 290 page_cache_get(page); 291 page->mapping = mapping; 292 page->index = index; 293 294 spin_lock_irq(&mapping->tree_lock); 295 if (!expected) 296 error = radix_tree_insert(&mapping->page_tree, index, page); 297 else 298 error = shmem_radix_tree_replace(mapping, index, expected, 299 page); 300 if (!error) { 301 mapping->nrpages++; 302 __inc_zone_page_state(page, NR_FILE_PAGES); 303 __inc_zone_page_state(page, NR_SHMEM); 304 spin_unlock_irq(&mapping->tree_lock); 305 } else { 306 page->mapping = NULL; 307 spin_unlock_irq(&mapping->tree_lock); 308 page_cache_release(page); 309 } 310 return error; 311} 312 313/* 314 * Like delete_from_page_cache, but substitutes swap for page. 315 */ 316static void shmem_delete_from_page_cache(struct page *page, void *radswap) 317{ 318 struct address_space *mapping = page->mapping; 319 int error; 320 321 spin_lock_irq(&mapping->tree_lock); 322 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 323 page->mapping = NULL; 324 mapping->nrpages--; 325 __dec_zone_page_state(page, NR_FILE_PAGES); 326 __dec_zone_page_state(page, NR_SHMEM); 327 spin_unlock_irq(&mapping->tree_lock); 328 page_cache_release(page); 329 BUG_ON(error); 330} 331 332/* 333 * Like find_get_pages, but collecting swap entries as well as pages. 334 */ 335static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping, 336 pgoff_t start, unsigned int nr_pages, 337 struct page **pages, pgoff_t *indices) 338{ 339 void **slot; 340 unsigned int ret = 0; 341 struct radix_tree_iter iter; 342 343 if (!nr_pages) 344 return 0; 345 346 rcu_read_lock(); 347restart: 348 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 349 struct page *page; 350repeat: 351 page = radix_tree_deref_slot(slot); 352 if (unlikely(!page)) 353 continue; 354 if (radix_tree_exception(page)) { 355 if (radix_tree_deref_retry(page)) 356 goto restart; 357 /* 358 * Otherwise, we must be storing a swap entry 359 * here as an exceptional entry: so return it 360 * without attempting to raise page count. 361 */ 362 goto export; 363 } 364 if (!page_cache_get_speculative(page)) 365 goto repeat; 366 367 /* Has the page moved? */ 368 if (unlikely(page != *slot)) { 369 page_cache_release(page); 370 goto repeat; 371 } 372export: 373 indices[ret] = iter.index; 374 pages[ret] = page; 375 if (++ret == nr_pages) 376 break; 377 } 378 rcu_read_unlock(); 379 return ret; 380} 381 382/* 383 * Remove swap entry from radix tree, free the swap and its page cache. 384 */ 385static int shmem_free_swap(struct address_space *mapping, 386 pgoff_t index, void *radswap) 387{ 388 int error; 389 390 spin_lock_irq(&mapping->tree_lock); 391 error = shmem_radix_tree_replace(mapping, index, radswap, NULL); 392 spin_unlock_irq(&mapping->tree_lock); 393 if (!error) 394 free_swap_and_cache(radix_to_swp_entry(radswap)); 395 return error; 396} 397 398/* 399 * Pagevec may contain swap entries, so shuffle up pages before releasing. 400 */ 401static void shmem_deswap_pagevec(struct pagevec *pvec) 402{ 403 int i, j; 404 405 for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 406 struct page *page = pvec->pages[i]; 407 if (!radix_tree_exceptional_entry(page)) 408 pvec->pages[j++] = page; 409 } 410 pvec->nr = j; 411} 412 413/* 414 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 415 */ 416void shmem_unlock_mapping(struct address_space *mapping) 417{ 418 struct pagevec pvec; 419 pgoff_t indices[PAGEVEC_SIZE]; 420 pgoff_t index = 0; 421 422 pagevec_init(&pvec, 0); 423 /* 424 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 425 */ 426 while (!mapping_unevictable(mapping)) { 427 /* 428 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 429 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 430 */ 431 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 432 PAGEVEC_SIZE, pvec.pages, indices); 433 if (!pvec.nr) 434 break; 435 index = indices[pvec.nr - 1] + 1; 436 shmem_deswap_pagevec(&pvec); 437 check_move_unevictable_pages(pvec.pages, pvec.nr); 438 pagevec_release(&pvec); 439 cond_resched(); 440 } 441} 442 443/* 444 * Remove range of pages and swap entries from radix tree, and free them. 445 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 446 */ 447static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 448 bool unfalloc) 449{ 450 struct address_space *mapping = inode->i_mapping; 451 struct shmem_inode_info *info = SHMEM_I(inode); 452 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 453 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 454 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 455 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 456 struct pagevec pvec; 457 pgoff_t indices[PAGEVEC_SIZE]; 458 long nr_swaps_freed = 0; 459 pgoff_t index; 460 int i; 461 462 if (lend == -1) 463 end = -1; /* unsigned, so actually very big */ 464 465 pagevec_init(&pvec, 0); 466 index = start; 467 while (index < end) { 468 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 469 min(end - index, (pgoff_t)PAGEVEC_SIZE), 470 pvec.pages, indices); 471 if (!pvec.nr) 472 break; 473 mem_cgroup_uncharge_start(); 474 for (i = 0; i < pagevec_count(&pvec); i++) { 475 struct page *page = pvec.pages[i]; 476 477 index = indices[i]; 478 if (index >= end) 479 break; 480 481 if (radix_tree_exceptional_entry(page)) { 482 if (unfalloc) 483 continue; 484 nr_swaps_freed += !shmem_free_swap(mapping, 485 index, page); 486 continue; 487 } 488 489 if (!trylock_page(page)) 490 continue; 491 if (!unfalloc || !PageUptodate(page)) { 492 if (page->mapping == mapping) { 493 VM_BUG_ON(PageWriteback(page)); 494 truncate_inode_page(mapping, page); 495 } 496 } 497 unlock_page(page); 498 } 499 shmem_deswap_pagevec(&pvec); 500 pagevec_release(&pvec); 501 mem_cgroup_uncharge_end(); 502 cond_resched(); 503 index++; 504 } 505 506 if (partial_start) { 507 struct page *page = NULL; 508 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 509 if (page) { 510 unsigned int top = PAGE_CACHE_SIZE; 511 if (start > end) { 512 top = partial_end; 513 partial_end = 0; 514 } 515 zero_user_segment(page, partial_start, top); 516 set_page_dirty(page); 517 unlock_page(page); 518 page_cache_release(page); 519 } 520 } 521 if (partial_end) { 522 struct page *page = NULL; 523 shmem_getpage(inode, end, &page, SGP_READ, NULL); 524 if (page) { 525 zero_user_segment(page, 0, partial_end); 526 set_page_dirty(page); 527 unlock_page(page); 528 page_cache_release(page); 529 } 530 } 531 if (start >= end) 532 return; 533 534 index = start; 535 for ( ; ; ) { 536 cond_resched(); 537 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 538 min(end - index, (pgoff_t)PAGEVEC_SIZE), 539 pvec.pages, indices); 540 if (!pvec.nr) { 541 if (index == start || unfalloc) 542 break; 543 index = start; 544 continue; 545 } 546 if ((index == start || unfalloc) && indices[0] >= end) { 547 shmem_deswap_pagevec(&pvec); 548 pagevec_release(&pvec); 549 break; 550 } 551 mem_cgroup_uncharge_start(); 552 for (i = 0; i < pagevec_count(&pvec); i++) { 553 struct page *page = pvec.pages[i]; 554 555 index = indices[i]; 556 if (index >= end) 557 break; 558 559 if (radix_tree_exceptional_entry(page)) { 560 if (unfalloc) 561 continue; 562 nr_swaps_freed += !shmem_free_swap(mapping, 563 index, page); 564 continue; 565 } 566 567 lock_page(page); 568 if (!unfalloc || !PageUptodate(page)) { 569 if (page->mapping == mapping) { 570 VM_BUG_ON(PageWriteback(page)); 571 truncate_inode_page(mapping, page); 572 } 573 } 574 unlock_page(page); 575 } 576 shmem_deswap_pagevec(&pvec); 577 pagevec_release(&pvec); 578 mem_cgroup_uncharge_end(); 579 index++; 580 } 581 582 spin_lock(&info->lock); 583 info->swapped -= nr_swaps_freed; 584 shmem_recalc_inode(inode); 585 spin_unlock(&info->lock); 586} 587 588void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 589{ 590 shmem_undo_range(inode, lstart, lend, false); 591 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 592} 593EXPORT_SYMBOL_GPL(shmem_truncate_range); 594 595static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 596{ 597 struct inode *inode = dentry->d_inode; 598 int error; 599 600 error = inode_change_ok(inode, attr); 601 if (error) 602 return error; 603 604 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 605 loff_t oldsize = inode->i_size; 606 loff_t newsize = attr->ia_size; 607 608 if (newsize != oldsize) { 609 i_size_write(inode, newsize); 610 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 611 } 612 if (newsize < oldsize) { 613 loff_t holebegin = round_up(newsize, PAGE_SIZE); 614 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 615 shmem_truncate_range(inode, newsize, (loff_t)-1); 616 /* unmap again to remove racily COWed private pages */ 617 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 618 } 619 } 620 621 setattr_copy(inode, attr); 622#ifdef CONFIG_TMPFS_POSIX_ACL 623 if (attr->ia_valid & ATTR_MODE) 624 error = generic_acl_chmod(inode); 625#endif 626 return error; 627} 628 629static void shmem_evict_inode(struct inode *inode) 630{ 631 struct shmem_inode_info *info = SHMEM_I(inode); 632 633 if (inode->i_mapping->a_ops == &shmem_aops) { 634 shmem_unacct_size(info->flags, inode->i_size); 635 inode->i_size = 0; 636 shmem_truncate_range(inode, 0, (loff_t)-1); 637 if (!list_empty(&info->swaplist)) { 638 mutex_lock(&shmem_swaplist_mutex); 639 list_del_init(&info->swaplist); 640 mutex_unlock(&shmem_swaplist_mutex); 641 } 642 } else 643 kfree(info->symlink); 644 645 simple_xattrs_free(&info->xattrs); 646 WARN_ON(inode->i_blocks); 647 shmem_free_inode(inode->i_sb); 648 clear_inode(inode); 649} 650 651/* 652 * If swap found in inode, free it and move page from swapcache to filecache. 653 */ 654static int shmem_unuse_inode(struct shmem_inode_info *info, 655 swp_entry_t swap, struct page **pagep) 656{ 657 struct address_space *mapping = info->vfs_inode.i_mapping; 658 void *radswap; 659 pgoff_t index; 660 gfp_t gfp; 661 int error = 0; 662 663 radswap = swp_to_radix_entry(swap); 664 index = radix_tree_locate_item(&mapping->page_tree, radswap); 665 if (index == -1) 666 return 0; 667 668 /* 669 * Move _head_ to start search for next from here. 670 * But be careful: shmem_evict_inode checks list_empty without taking 671 * mutex, and there's an instant in list_move_tail when info->swaplist 672 * would appear empty, if it were the only one on shmem_swaplist. 673 */ 674 if (shmem_swaplist.next != &info->swaplist) 675 list_move_tail(&shmem_swaplist, &info->swaplist); 676 677 gfp = mapping_gfp_mask(mapping); 678 if (shmem_should_replace_page(*pagep, gfp)) { 679 mutex_unlock(&shmem_swaplist_mutex); 680 error = shmem_replace_page(pagep, gfp, info, index); 681 mutex_lock(&shmem_swaplist_mutex); 682 /* 683 * We needed to drop mutex to make that restrictive page 684 * allocation, but the inode might have been freed while we 685 * dropped it: although a racing shmem_evict_inode() cannot 686 * complete without emptying the radix_tree, our page lock 687 * on this swapcache page is not enough to prevent that - 688 * free_swap_and_cache() of our swap entry will only 689 * trylock_page(), removing swap from radix_tree whatever. 690 * 691 * We must not proceed to shmem_add_to_page_cache() if the 692 * inode has been freed, but of course we cannot rely on 693 * inode or mapping or info to check that. However, we can 694 * safely check if our swap entry is still in use (and here 695 * it can't have got reused for another page): if it's still 696 * in use, then the inode cannot have been freed yet, and we 697 * can safely proceed (if it's no longer in use, that tells 698 * nothing about the inode, but we don't need to unuse swap). 699 */ 700 if (!page_swapcount(*pagep)) 701 error = -ENOENT; 702 } 703 704 /* 705 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 706 * but also to hold up shmem_evict_inode(): so inode cannot be freed 707 * beneath us (pagelock doesn't help until the page is in pagecache). 708 */ 709 if (!error) 710 error = shmem_add_to_page_cache(*pagep, mapping, index, 711 GFP_NOWAIT, radswap); 712 if (error != -ENOMEM) { 713 /* 714 * Truncation and eviction use free_swap_and_cache(), which 715 * only does trylock page: if we raced, best clean up here. 716 */ 717 delete_from_swap_cache(*pagep); 718 set_page_dirty(*pagep); 719 if (!error) { 720 spin_lock(&info->lock); 721 info->swapped--; 722 spin_unlock(&info->lock); 723 swap_free(swap); 724 } 725 error = 1; /* not an error, but entry was found */ 726 } 727 return error; 728} 729 730/* 731 * Search through swapped inodes to find and replace swap by page. 732 */ 733int shmem_unuse(swp_entry_t swap, struct page *page) 734{ 735 struct list_head *this, *next; 736 struct shmem_inode_info *info; 737 int found = 0; 738 int error = 0; 739 740 /* 741 * There's a faint possibility that swap page was replaced before 742 * caller locked it: caller will come back later with the right page. 743 */ 744 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 745 goto out; 746 747 /* 748 * Charge page using GFP_KERNEL while we can wait, before taking 749 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 750 * Charged back to the user (not to caller) when swap account is used. 751 */ 752 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 753 if (error) 754 goto out; 755 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 756 757 mutex_lock(&shmem_swaplist_mutex); 758 list_for_each_safe(this, next, &shmem_swaplist) { 759 info = list_entry(this, struct shmem_inode_info, swaplist); 760 if (info->swapped) 761 found = shmem_unuse_inode(info, swap, &page); 762 else 763 list_del_init(&info->swaplist); 764 cond_resched(); 765 if (found) 766 break; 767 } 768 mutex_unlock(&shmem_swaplist_mutex); 769 770 if (found < 0) 771 error = found; 772out: 773 unlock_page(page); 774 page_cache_release(page); 775 return error; 776} 777 778/* 779 * Move the page from the page cache to the swap cache. 780 */ 781static int shmem_writepage(struct page *page, struct writeback_control *wbc) 782{ 783 struct shmem_inode_info *info; 784 struct address_space *mapping; 785 struct inode *inode; 786 swp_entry_t swap; 787 pgoff_t index; 788 789 BUG_ON(!PageLocked(page)); 790 mapping = page->mapping; 791 index = page->index; 792 inode = mapping->host; 793 info = SHMEM_I(inode); 794 if (info->flags & VM_LOCKED) 795 goto redirty; 796 if (!total_swap_pages) 797 goto redirty; 798 799 /* 800 * shmem_backing_dev_info's capabilities prevent regular writeback or 801 * sync from ever calling shmem_writepage; but a stacking filesystem 802 * might use ->writepage of its underlying filesystem, in which case 803 * tmpfs should write out to swap only in response to memory pressure, 804 * and not for the writeback threads or sync. 805 */ 806 if (!wbc->for_reclaim) { 807 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 808 goto redirty; 809 } 810 811 /* 812 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 813 * value into swapfile.c, the only way we can correctly account for a 814 * fallocated page arriving here is now to initialize it and write it. 815 * 816 * That's okay for a page already fallocated earlier, but if we have 817 * not yet completed the fallocation, then (a) we want to keep track 818 * of this page in case we have to undo it, and (b) it may not be a 819 * good idea to continue anyway, once we're pushing into swap. So 820 * reactivate the page, and let shmem_fallocate() quit when too many. 821 */ 822 if (!PageUptodate(page)) { 823 if (inode->i_private) { 824 struct shmem_falloc *shmem_falloc; 825 spin_lock(&inode->i_lock); 826 shmem_falloc = inode->i_private; 827 if (shmem_falloc && 828 index >= shmem_falloc->start && 829 index < shmem_falloc->next) 830 shmem_falloc->nr_unswapped++; 831 else 832 shmem_falloc = NULL; 833 spin_unlock(&inode->i_lock); 834 if (shmem_falloc) 835 goto redirty; 836 } 837 clear_highpage(page); 838 flush_dcache_page(page); 839 SetPageUptodate(page); 840 } 841 842 swap = get_swap_page(); 843 if (!swap.val) 844 goto redirty; 845 846 /* 847 * Add inode to shmem_unuse()'s list of swapped-out inodes, 848 * if it's not already there. Do it now before the page is 849 * moved to swap cache, when its pagelock no longer protects 850 * the inode from eviction. But don't unlock the mutex until 851 * we've incremented swapped, because shmem_unuse_inode() will 852 * prune a !swapped inode from the swaplist under this mutex. 853 */ 854 mutex_lock(&shmem_swaplist_mutex); 855 if (list_empty(&info->swaplist)) 856 list_add_tail(&info->swaplist, &shmem_swaplist); 857 858 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 859 swap_shmem_alloc(swap); 860 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 861 862 spin_lock(&info->lock); 863 info->swapped++; 864 shmem_recalc_inode(inode); 865 spin_unlock(&info->lock); 866 867 mutex_unlock(&shmem_swaplist_mutex); 868 BUG_ON(page_mapped(page)); 869 swap_writepage(page, wbc); 870 return 0; 871 } 872 873 mutex_unlock(&shmem_swaplist_mutex); 874 swapcache_free(swap, NULL); 875redirty: 876 set_page_dirty(page); 877 if (wbc->for_reclaim) 878 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 879 unlock_page(page); 880 return 0; 881} 882 883#ifdef CONFIG_NUMA 884#ifdef CONFIG_TMPFS 885static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 886{ 887 char buffer[64]; 888 889 if (!mpol || mpol->mode == MPOL_DEFAULT) 890 return; /* show nothing */ 891 892 mpol_to_str(buffer, sizeof(buffer), mpol); 893 894 seq_printf(seq, ",mpol=%s", buffer); 895} 896 897static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 898{ 899 struct mempolicy *mpol = NULL; 900 if (sbinfo->mpol) { 901 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 902 mpol = sbinfo->mpol; 903 mpol_get(mpol); 904 spin_unlock(&sbinfo->stat_lock); 905 } 906 return mpol; 907} 908#endif /* CONFIG_TMPFS */ 909 910static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 911 struct shmem_inode_info *info, pgoff_t index) 912{ 913 struct vm_area_struct pvma; 914 struct page *page; 915 916 /* Create a pseudo vma that just contains the policy */ 917 pvma.vm_start = 0; 918 /* Bias interleave by inode number to distribute better across nodes */ 919 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 920 pvma.vm_ops = NULL; 921 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 922 923 page = swapin_readahead(swap, gfp, &pvma, 0); 924 925 /* Drop reference taken by mpol_shared_policy_lookup() */ 926 mpol_cond_put(pvma.vm_policy); 927 928 return page; 929} 930 931static struct page *shmem_alloc_page(gfp_t gfp, 932 struct shmem_inode_info *info, pgoff_t index) 933{ 934 struct vm_area_struct pvma; 935 struct page *page; 936 937 /* Create a pseudo vma that just contains the policy */ 938 pvma.vm_start = 0; 939 /* Bias interleave by inode number to distribute better across nodes */ 940 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 941 pvma.vm_ops = NULL; 942 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 943 944 page = alloc_page_vma(gfp, &pvma, 0); 945 946 /* Drop reference taken by mpol_shared_policy_lookup() */ 947 mpol_cond_put(pvma.vm_policy); 948 949 return page; 950} 951#else /* !CONFIG_NUMA */ 952#ifdef CONFIG_TMPFS 953static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 954{ 955} 956#endif /* CONFIG_TMPFS */ 957 958static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 959 struct shmem_inode_info *info, pgoff_t index) 960{ 961 return swapin_readahead(swap, gfp, NULL, 0); 962} 963 964static inline struct page *shmem_alloc_page(gfp_t gfp, 965 struct shmem_inode_info *info, pgoff_t index) 966{ 967 return alloc_page(gfp); 968} 969#endif /* CONFIG_NUMA */ 970 971#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 972static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 973{ 974 return NULL; 975} 976#endif 977 978/* 979 * When a page is moved from swapcache to shmem filecache (either by the 980 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 981 * shmem_unuse_inode()), it may have been read in earlier from swap, in 982 * ignorance of the mapping it belongs to. If that mapping has special 983 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 984 * we may need to copy to a suitable page before moving to filecache. 985 * 986 * In a future release, this may well be extended to respect cpuset and 987 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 988 * but for now it is a simple matter of zone. 989 */ 990static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 991{ 992 return page_zonenum(page) > gfp_zone(gfp); 993} 994 995static int shmem_replace_page(struct page **pagep, gfp_t gfp, 996 struct shmem_inode_info *info, pgoff_t index) 997{ 998 struct page *oldpage, *newpage; 999 struct address_space *swap_mapping; 1000 pgoff_t swap_index; 1001 int error; 1002 1003 oldpage = *pagep; 1004 swap_index = page_private(oldpage); 1005 swap_mapping = page_mapping(oldpage); 1006 1007 /* 1008 * We have arrived here because our zones are constrained, so don't 1009 * limit chance of success by further cpuset and node constraints. 1010 */ 1011 gfp &= ~GFP_CONSTRAINT_MASK; 1012 newpage = shmem_alloc_page(gfp, info, index); 1013 if (!newpage) 1014 return -ENOMEM; 1015 1016 page_cache_get(newpage); 1017 copy_highpage(newpage, oldpage); 1018 flush_dcache_page(newpage); 1019 1020 __set_page_locked(newpage); 1021 SetPageUptodate(newpage); 1022 SetPageSwapBacked(newpage); 1023 set_page_private(newpage, swap_index); 1024 SetPageSwapCache(newpage); 1025 1026 /* 1027 * Our caller will very soon move newpage out of swapcache, but it's 1028 * a nice clean interface for us to replace oldpage by newpage there. 1029 */ 1030 spin_lock_irq(&swap_mapping->tree_lock); 1031 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1032 newpage); 1033 if (!error) { 1034 __inc_zone_page_state(newpage, NR_FILE_PAGES); 1035 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1036 } 1037 spin_unlock_irq(&swap_mapping->tree_lock); 1038 1039 if (unlikely(error)) { 1040 /* 1041 * Is this possible? I think not, now that our callers check 1042 * both PageSwapCache and page_private after getting page lock; 1043 * but be defensive. Reverse old to newpage for clear and free. 1044 */ 1045 oldpage = newpage; 1046 } else { 1047 mem_cgroup_replace_page_cache(oldpage, newpage); 1048 lru_cache_add_anon(newpage); 1049 *pagep = newpage; 1050 } 1051 1052 ClearPageSwapCache(oldpage); 1053 set_page_private(oldpage, 0); 1054 1055 unlock_page(oldpage); 1056 page_cache_release(oldpage); 1057 page_cache_release(oldpage); 1058 return error; 1059} 1060 1061/* 1062 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1063 * 1064 * If we allocate a new one we do not mark it dirty. That's up to the 1065 * vm. If we swap it in we mark it dirty since we also free the swap 1066 * entry since a page cannot live in both the swap and page cache 1067 */ 1068static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1069 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 1070{ 1071 struct address_space *mapping = inode->i_mapping; 1072 struct shmem_inode_info *info; 1073 struct shmem_sb_info *sbinfo; 1074 struct page *page; 1075 swp_entry_t swap; 1076 int error; 1077 int once = 0; 1078 int alloced = 0; 1079 1080 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1081 return -EFBIG; 1082repeat: 1083 swap.val = 0; 1084 page = find_lock_page(mapping, index); 1085 if (radix_tree_exceptional_entry(page)) { 1086 swap = radix_to_swp_entry(page); 1087 page = NULL; 1088 } 1089 1090 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1091 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1092 error = -EINVAL; 1093 goto failed; 1094 } 1095 1096 /* fallocated page? */ 1097 if (page && !PageUptodate(page)) { 1098 if (sgp != SGP_READ) 1099 goto clear; 1100 unlock_page(page); 1101 page_cache_release(page); 1102 page = NULL; 1103 } 1104 if (page || (sgp == SGP_READ && !swap.val)) { 1105 *pagep = page; 1106 return 0; 1107 } 1108 1109 /* 1110 * Fast cache lookup did not find it: 1111 * bring it back from swap or allocate. 1112 */ 1113 info = SHMEM_I(inode); 1114 sbinfo = SHMEM_SB(inode->i_sb); 1115 1116 if (swap.val) { 1117 /* Look it up and read it in.. */ 1118 page = lookup_swap_cache(swap); 1119 if (!page) { 1120 /* here we actually do the io */ 1121 if (fault_type) 1122 *fault_type |= VM_FAULT_MAJOR; 1123 page = shmem_swapin(swap, gfp, info, index); 1124 if (!page) { 1125 error = -ENOMEM; 1126 goto failed; 1127 } 1128 } 1129 1130 /* We have to do this with page locked to prevent races */ 1131 lock_page(page); 1132 if (!PageSwapCache(page) || page_private(page) != swap.val || 1133 !shmem_confirm_swap(mapping, index, swap)) { 1134 error = -EEXIST; /* try again */ 1135 goto unlock; 1136 } 1137 if (!PageUptodate(page)) { 1138 error = -EIO; 1139 goto failed; 1140 } 1141 wait_on_page_writeback(page); 1142 1143 if (shmem_should_replace_page(page, gfp)) { 1144 error = shmem_replace_page(&page, gfp, info, index); 1145 if (error) 1146 goto failed; 1147 } 1148 1149 error = mem_cgroup_cache_charge(page, current->mm, 1150 gfp & GFP_RECLAIM_MASK); 1151 if (!error) { 1152 error = shmem_add_to_page_cache(page, mapping, index, 1153 gfp, swp_to_radix_entry(swap)); 1154 /* 1155 * We already confirmed swap under page lock, and make 1156 * no memory allocation here, so usually no possibility 1157 * of error; but free_swap_and_cache() only trylocks a 1158 * page, so it is just possible that the entry has been 1159 * truncated or holepunched since swap was confirmed. 1160 * shmem_undo_range() will have done some of the 1161 * unaccounting, now delete_from_swap_cache() will do 1162 * the rest (including mem_cgroup_uncharge_swapcache). 1163 * Reset swap.val? No, leave it so "failed" goes back to 1164 * "repeat": reading a hole and writing should succeed. 1165 */ 1166 if (error) 1167 delete_from_swap_cache(page); 1168 } 1169 if (error) 1170 goto failed; 1171 1172 spin_lock(&info->lock); 1173 info->swapped--; 1174 shmem_recalc_inode(inode); 1175 spin_unlock(&info->lock); 1176 1177 delete_from_swap_cache(page); 1178 set_page_dirty(page); 1179 swap_free(swap); 1180 1181 } else { 1182 if (shmem_acct_block(info->flags)) { 1183 error = -ENOSPC; 1184 goto failed; 1185 } 1186 if (sbinfo->max_blocks) { 1187 if (percpu_counter_compare(&sbinfo->used_blocks, 1188 sbinfo->max_blocks) >= 0) { 1189 error = -ENOSPC; 1190 goto unacct; 1191 } 1192 percpu_counter_inc(&sbinfo->used_blocks); 1193 } 1194 1195 page = shmem_alloc_page(gfp, info, index); 1196 if (!page) { 1197 error = -ENOMEM; 1198 goto decused; 1199 } 1200 1201 SetPageSwapBacked(page); 1202 __set_page_locked(page); 1203 error = mem_cgroup_cache_charge(page, current->mm, 1204 gfp & GFP_RECLAIM_MASK); 1205 if (error) 1206 goto decused; 1207 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK); 1208 if (!error) { 1209 error = shmem_add_to_page_cache(page, mapping, index, 1210 gfp, NULL); 1211 radix_tree_preload_end(); 1212 } 1213 if (error) { 1214 mem_cgroup_uncharge_cache_page(page); 1215 goto decused; 1216 } 1217 lru_cache_add_anon(page); 1218 1219 spin_lock(&info->lock); 1220 info->alloced++; 1221 inode->i_blocks += BLOCKS_PER_PAGE; 1222 shmem_recalc_inode(inode); 1223 spin_unlock(&info->lock); 1224 alloced = true; 1225 1226 /* 1227 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1228 */ 1229 if (sgp == SGP_FALLOC) 1230 sgp = SGP_WRITE; 1231clear: 1232 /* 1233 * Let SGP_WRITE caller clear ends if write does not fill page; 1234 * but SGP_FALLOC on a page fallocated earlier must initialize 1235 * it now, lest undo on failure cancel our earlier guarantee. 1236 */ 1237 if (sgp != SGP_WRITE) { 1238 clear_highpage(page); 1239 flush_dcache_page(page); 1240 SetPageUptodate(page); 1241 } 1242 if (sgp == SGP_DIRTY) 1243 set_page_dirty(page); 1244 } 1245 1246 /* Perhaps the file has been truncated since we checked */ 1247 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1248 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1249 error = -EINVAL; 1250 if (alloced) 1251 goto trunc; 1252 else 1253 goto failed; 1254 } 1255 *pagep = page; 1256 return 0; 1257 1258 /* 1259 * Error recovery. 1260 */ 1261trunc: 1262 info = SHMEM_I(inode); 1263 ClearPageDirty(page); 1264 delete_from_page_cache(page); 1265 spin_lock(&info->lock); 1266 info->alloced--; 1267 inode->i_blocks -= BLOCKS_PER_PAGE; 1268 spin_unlock(&info->lock); 1269decused: 1270 sbinfo = SHMEM_SB(inode->i_sb); 1271 if (sbinfo->max_blocks) 1272 percpu_counter_add(&sbinfo->used_blocks, -1); 1273unacct: 1274 shmem_unacct_blocks(info->flags, 1); 1275failed: 1276 if (swap.val && error != -EINVAL && 1277 !shmem_confirm_swap(mapping, index, swap)) 1278 error = -EEXIST; 1279unlock: 1280 if (page) { 1281 unlock_page(page); 1282 page_cache_release(page); 1283 } 1284 if (error == -ENOSPC && !once++) { 1285 info = SHMEM_I(inode); 1286 spin_lock(&info->lock); 1287 shmem_recalc_inode(inode); 1288 spin_unlock(&info->lock); 1289 goto repeat; 1290 } 1291 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1292 goto repeat; 1293 return error; 1294} 1295 1296static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1297{ 1298 struct inode *inode = file_inode(vma->vm_file); 1299 int error; 1300 int ret = VM_FAULT_LOCKED; 1301 1302 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1303 if (error) 1304 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1305 1306 if (ret & VM_FAULT_MAJOR) { 1307 count_vm_event(PGMAJFAULT); 1308 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1309 } 1310 return ret; 1311} 1312 1313#ifdef CONFIG_NUMA 1314static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1315{ 1316 struct inode *inode = file_inode(vma->vm_file); 1317 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1318} 1319 1320static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1321 unsigned long addr) 1322{ 1323 struct inode *inode = file_inode(vma->vm_file); 1324 pgoff_t index; 1325 1326 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1327 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1328} 1329#endif 1330 1331int shmem_lock(struct file *file, int lock, struct user_struct *user) 1332{ 1333 struct inode *inode = file_inode(file); 1334 struct shmem_inode_info *info = SHMEM_I(inode); 1335 int retval = -ENOMEM; 1336 1337 spin_lock(&info->lock); 1338 if (lock && !(info->flags & VM_LOCKED)) { 1339 if (!user_shm_lock(inode->i_size, user)) 1340 goto out_nomem; 1341 info->flags |= VM_LOCKED; 1342 mapping_set_unevictable(file->f_mapping); 1343 } 1344 if (!lock && (info->flags & VM_LOCKED) && user) { 1345 user_shm_unlock(inode->i_size, user); 1346 info->flags &= ~VM_LOCKED; 1347 mapping_clear_unevictable(file->f_mapping); 1348 } 1349 retval = 0; 1350 1351out_nomem: 1352 spin_unlock(&info->lock); 1353 return retval; 1354} 1355 1356static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1357{ 1358 file_accessed(file); 1359 vma->vm_ops = &shmem_vm_ops; 1360 return 0; 1361} 1362 1363static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1364 umode_t mode, dev_t dev, unsigned long flags) 1365{ 1366 struct inode *inode; 1367 struct shmem_inode_info *info; 1368 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1369 1370 if (shmem_reserve_inode(sb)) 1371 return NULL; 1372 1373 inode = new_inode(sb); 1374 if (inode) { 1375 inode->i_ino = get_next_ino(); 1376 inode_init_owner(inode, dir, mode); 1377 inode->i_blocks = 0; 1378 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1379 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1380 inode->i_generation = get_seconds(); 1381 info = SHMEM_I(inode); 1382 memset(info, 0, (char *)inode - (char *)info); 1383 spin_lock_init(&info->lock); 1384 info->flags = flags & VM_NORESERVE; 1385 INIT_LIST_HEAD(&info->swaplist); 1386 simple_xattrs_init(&info->xattrs); 1387 cache_no_acl(inode); 1388 1389 switch (mode & S_IFMT) { 1390 default: 1391 inode->i_op = &shmem_special_inode_operations; 1392 init_special_inode(inode, mode, dev); 1393 break; 1394 case S_IFREG: 1395 inode->i_mapping->a_ops = &shmem_aops; 1396 inode->i_op = &shmem_inode_operations; 1397 inode->i_fop = &shmem_file_operations; 1398 mpol_shared_policy_init(&info->policy, 1399 shmem_get_sbmpol(sbinfo)); 1400 break; 1401 case S_IFDIR: 1402 inc_nlink(inode); 1403 /* Some things misbehave if size == 0 on a directory */ 1404 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1405 inode->i_op = &shmem_dir_inode_operations; 1406 inode->i_fop = &simple_dir_operations; 1407 break; 1408 case S_IFLNK: 1409 /* 1410 * Must not load anything in the rbtree, 1411 * mpol_free_shared_policy will not be called. 1412 */ 1413 mpol_shared_policy_init(&info->policy, NULL); 1414 break; 1415 } 1416 } else 1417 shmem_free_inode(sb); 1418 return inode; 1419} 1420 1421#ifdef CONFIG_TMPFS 1422static const struct inode_operations shmem_symlink_inode_operations; 1423static const struct inode_operations shmem_short_symlink_operations; 1424 1425#ifdef CONFIG_TMPFS_XATTR 1426static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 1427#else 1428#define shmem_initxattrs NULL 1429#endif 1430 1431static int 1432shmem_write_begin(struct file *file, struct address_space *mapping, 1433 loff_t pos, unsigned len, unsigned flags, 1434 struct page **pagep, void **fsdata) 1435{ 1436 struct inode *inode = mapping->host; 1437 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1438 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1439} 1440 1441static int 1442shmem_write_end(struct file *file, struct address_space *mapping, 1443 loff_t pos, unsigned len, unsigned copied, 1444 struct page *page, void *fsdata) 1445{ 1446 struct inode *inode = mapping->host; 1447 1448 if (pos + copied > inode->i_size) 1449 i_size_write(inode, pos + copied); 1450 1451 if (!PageUptodate(page)) { 1452 if (copied < PAGE_CACHE_SIZE) { 1453 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1454 zero_user_segments(page, 0, from, 1455 from + copied, PAGE_CACHE_SIZE); 1456 } 1457 SetPageUptodate(page); 1458 } 1459 set_page_dirty(page); 1460 unlock_page(page); 1461 page_cache_release(page); 1462 1463 return copied; 1464} 1465 1466static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1467{ 1468 struct inode *inode = file_inode(filp); 1469 struct address_space *mapping = inode->i_mapping; 1470 pgoff_t index; 1471 unsigned long offset; 1472 enum sgp_type sgp = SGP_READ; 1473 1474 /* 1475 * Might this read be for a stacking filesystem? Then when reading 1476 * holes of a sparse file, we actually need to allocate those pages, 1477 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1478 */ 1479 if (segment_eq(get_fs(), KERNEL_DS)) 1480 sgp = SGP_DIRTY; 1481 1482 index = *ppos >> PAGE_CACHE_SHIFT; 1483 offset = *ppos & ~PAGE_CACHE_MASK; 1484 1485 for (;;) { 1486 struct page *page = NULL; 1487 pgoff_t end_index; 1488 unsigned long nr, ret; 1489 loff_t i_size = i_size_read(inode); 1490 1491 end_index = i_size >> PAGE_CACHE_SHIFT; 1492 if (index > end_index) 1493 break; 1494 if (index == end_index) { 1495 nr = i_size & ~PAGE_CACHE_MASK; 1496 if (nr <= offset) 1497 break; 1498 } 1499 1500 desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 1501 if (desc->error) { 1502 if (desc->error == -EINVAL) 1503 desc->error = 0; 1504 break; 1505 } 1506 if (page) 1507 unlock_page(page); 1508 1509 /* 1510 * We must evaluate after, since reads (unlike writes) 1511 * are called without i_mutex protection against truncate 1512 */ 1513 nr = PAGE_CACHE_SIZE; 1514 i_size = i_size_read(inode); 1515 end_index = i_size >> PAGE_CACHE_SHIFT; 1516 if (index == end_index) { 1517 nr = i_size & ~PAGE_CACHE_MASK; 1518 if (nr <= offset) { 1519 if (page) 1520 page_cache_release(page); 1521 break; 1522 } 1523 } 1524 nr -= offset; 1525 1526 if (page) { 1527 /* 1528 * If users can be writing to this page using arbitrary 1529 * virtual addresses, take care about potential aliasing 1530 * before reading the page on the kernel side. 1531 */ 1532 if (mapping_writably_mapped(mapping)) 1533 flush_dcache_page(page); 1534 /* 1535 * Mark the page accessed if we read the beginning. 1536 */ 1537 if (!offset) 1538 mark_page_accessed(page); 1539 } else { 1540 page = ZERO_PAGE(0); 1541 page_cache_get(page); 1542 } 1543 1544 /* 1545 * Ok, we have the page, and it's up-to-date, so 1546 * now we can copy it to user space... 1547 * 1548 * The actor routine returns how many bytes were actually used.. 1549 * NOTE! This may not be the same as how much of a user buffer 1550 * we filled up (we may be padding etc), so we can only update 1551 * "pos" here (the actor routine has to update the user buffer 1552 * pointers and the remaining count). 1553 */ 1554 ret = actor(desc, page, offset, nr); 1555 offset += ret; 1556 index += offset >> PAGE_CACHE_SHIFT; 1557 offset &= ~PAGE_CACHE_MASK; 1558 1559 page_cache_release(page); 1560 if (ret != nr || !desc->count) 1561 break; 1562 1563 cond_resched(); 1564 } 1565 1566 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1567 file_accessed(filp); 1568} 1569 1570static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1571 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1572{ 1573 struct file *filp = iocb->ki_filp; 1574 ssize_t retval; 1575 unsigned long seg; 1576 size_t count; 1577 loff_t *ppos = &iocb->ki_pos; 1578 1579 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1580 if (retval) 1581 return retval; 1582 1583 for (seg = 0; seg < nr_segs; seg++) { 1584 read_descriptor_t desc; 1585 1586 desc.written = 0; 1587 desc.arg.buf = iov[seg].iov_base; 1588 desc.count = iov[seg].iov_len; 1589 if (desc.count == 0) 1590 continue; 1591 desc.error = 0; 1592 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1593 retval += desc.written; 1594 if (desc.error) { 1595 retval = retval ?: desc.error; 1596 break; 1597 } 1598 if (desc.count > 0) 1599 break; 1600 } 1601 return retval; 1602} 1603 1604static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1605 struct pipe_inode_info *pipe, size_t len, 1606 unsigned int flags) 1607{ 1608 struct address_space *mapping = in->f_mapping; 1609 struct inode *inode = mapping->host; 1610 unsigned int loff, nr_pages, req_pages; 1611 struct page *pages[PIPE_DEF_BUFFERS]; 1612 struct partial_page partial[PIPE_DEF_BUFFERS]; 1613 struct page *page; 1614 pgoff_t index, end_index; 1615 loff_t isize, left; 1616 int error, page_nr; 1617 struct splice_pipe_desc spd = { 1618 .pages = pages, 1619 .partial = partial, 1620 .nr_pages_max = PIPE_DEF_BUFFERS, 1621 .flags = flags, 1622 .ops = &page_cache_pipe_buf_ops, 1623 .spd_release = spd_release_page, 1624 }; 1625 1626 isize = i_size_read(inode); 1627 if (unlikely(*ppos >= isize)) 1628 return 0; 1629 1630 left = isize - *ppos; 1631 if (unlikely(left < len)) 1632 len = left; 1633 1634 if (splice_grow_spd(pipe, &spd)) 1635 return -ENOMEM; 1636 1637 index = *ppos >> PAGE_CACHE_SHIFT; 1638 loff = *ppos & ~PAGE_CACHE_MASK; 1639 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1640 nr_pages = min(req_pages, pipe->buffers); 1641 1642 spd.nr_pages = find_get_pages_contig(mapping, index, 1643 nr_pages, spd.pages); 1644 index += spd.nr_pages; 1645 error = 0; 1646 1647 while (spd.nr_pages < nr_pages) { 1648 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1649 if (error) 1650 break; 1651 unlock_page(page); 1652 spd.pages[spd.nr_pages++] = page; 1653 index++; 1654 } 1655 1656 index = *ppos >> PAGE_CACHE_SHIFT; 1657 nr_pages = spd.nr_pages; 1658 spd.nr_pages = 0; 1659 1660 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1661 unsigned int this_len; 1662 1663 if (!len) 1664 break; 1665 1666 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1667 page = spd.pages[page_nr]; 1668 1669 if (!PageUptodate(page) || page->mapping != mapping) { 1670 error = shmem_getpage(inode, index, &page, 1671 SGP_CACHE, NULL); 1672 if (error) 1673 break; 1674 unlock_page(page); 1675 page_cache_release(spd.pages[page_nr]); 1676 spd.pages[page_nr] = page; 1677 } 1678 1679 isize = i_size_read(inode); 1680 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1681 if (unlikely(!isize || index > end_index)) 1682 break; 1683 1684 if (end_index == index) { 1685 unsigned int plen; 1686 1687 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1688 if (plen <= loff) 1689 break; 1690 1691 this_len = min(this_len, plen - loff); 1692 len = this_len; 1693 } 1694 1695 spd.partial[page_nr].offset = loff; 1696 spd.partial[page_nr].len = this_len; 1697 len -= this_len; 1698 loff = 0; 1699 spd.nr_pages++; 1700 index++; 1701 } 1702 1703 while (page_nr < nr_pages) 1704 page_cache_release(spd.pages[page_nr++]); 1705 1706 if (spd.nr_pages) 1707 error = splice_to_pipe(pipe, &spd); 1708 1709 splice_shrink_spd(&spd); 1710 1711 if (error > 0) { 1712 *ppos += error; 1713 file_accessed(in); 1714 } 1715 return error; 1716} 1717 1718/* 1719 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 1720 */ 1721static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 1722 pgoff_t index, pgoff_t end, int whence) 1723{ 1724 struct page *page; 1725 struct pagevec pvec; 1726 pgoff_t indices[PAGEVEC_SIZE]; 1727 bool done = false; 1728 int i; 1729 1730 pagevec_init(&pvec, 0); 1731 pvec.nr = 1; /* start small: we may be there already */ 1732 while (!done) { 1733 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 1734 pvec.nr, pvec.pages, indices); 1735 if (!pvec.nr) { 1736 if (whence == SEEK_DATA) 1737 index = end; 1738 break; 1739 } 1740 for (i = 0; i < pvec.nr; i++, index++) { 1741 if (index < indices[i]) { 1742 if (whence == SEEK_HOLE) { 1743 done = true; 1744 break; 1745 } 1746 index = indices[i]; 1747 } 1748 page = pvec.pages[i]; 1749 if (page && !radix_tree_exceptional_entry(page)) { 1750 if (!PageUptodate(page)) 1751 page = NULL; 1752 } 1753 if (index >= end || 1754 (page && whence == SEEK_DATA) || 1755 (!page && whence == SEEK_HOLE)) { 1756 done = true; 1757 break; 1758 } 1759 } 1760 shmem_deswap_pagevec(&pvec); 1761 pagevec_release(&pvec); 1762 pvec.nr = PAGEVEC_SIZE; 1763 cond_resched(); 1764 } 1765 return index; 1766} 1767 1768static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 1769{ 1770 struct address_space *mapping = file->f_mapping; 1771 struct inode *inode = mapping->host; 1772 pgoff_t start, end; 1773 loff_t new_offset; 1774 1775 if (whence != SEEK_DATA && whence != SEEK_HOLE) 1776 return generic_file_llseek_size(file, offset, whence, 1777 MAX_LFS_FILESIZE, i_size_read(inode)); 1778 mutex_lock(&inode->i_mutex); 1779 /* We're holding i_mutex so we can access i_size directly */ 1780 1781 if (offset < 0) 1782 offset = -EINVAL; 1783 else if (offset >= inode->i_size) 1784 offset = -ENXIO; 1785 else { 1786 start = offset >> PAGE_CACHE_SHIFT; 1787 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1788 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1789 new_offset <<= PAGE_CACHE_SHIFT; 1790 if (new_offset > offset) { 1791 if (new_offset < inode->i_size) 1792 offset = new_offset; 1793 else if (whence == SEEK_DATA) 1794 offset = -ENXIO; 1795 else 1796 offset = inode->i_size; 1797 } 1798 } 1799 1800 if (offset >= 0 && offset != file->f_pos) { 1801 file->f_pos = offset; 1802 file->f_version = 0; 1803 } 1804 mutex_unlock(&inode->i_mutex); 1805 return offset; 1806} 1807 1808static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1809 loff_t len) 1810{ 1811 struct inode *inode = file_inode(file); 1812 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1813 struct shmem_falloc shmem_falloc; 1814 pgoff_t start, index, end; 1815 int error; 1816 1817 mutex_lock(&inode->i_mutex); 1818 1819 if (mode & FALLOC_FL_PUNCH_HOLE) { 1820 struct address_space *mapping = file->f_mapping; 1821 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1822 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1823 1824 if ((u64)unmap_end > (u64)unmap_start) 1825 unmap_mapping_range(mapping, unmap_start, 1826 1 + unmap_end - unmap_start, 0); 1827 shmem_truncate_range(inode, offset, offset + len - 1); 1828 /* No need to unmap again: hole-punching leaves COWed pages */ 1829 error = 0; 1830 goto out; 1831 } 1832 1833 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1834 error = inode_newsize_ok(inode, offset + len); 1835 if (error) 1836 goto out; 1837 1838 start = offset >> PAGE_CACHE_SHIFT; 1839 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1840 /* Try to avoid a swapstorm if len is impossible to satisfy */ 1841 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1842 error = -ENOSPC; 1843 goto out; 1844 } 1845 1846 shmem_falloc.start = start; 1847 shmem_falloc.next = start; 1848 shmem_falloc.nr_falloced = 0; 1849 shmem_falloc.nr_unswapped = 0; 1850 spin_lock(&inode->i_lock); 1851 inode->i_private = &shmem_falloc; 1852 spin_unlock(&inode->i_lock); 1853 1854 for (index = start; index < end; index++) { 1855 struct page *page; 1856 1857 /* 1858 * Good, the fallocate(2) manpage permits EINTR: we may have 1859 * been interrupted because we are using up too much memory. 1860 */ 1861 if (signal_pending(current)) 1862 error = -EINTR; 1863 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 1864 error = -ENOMEM; 1865 else 1866 error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1867 NULL); 1868 if (error) { 1869 /* Remove the !PageUptodate pages we added */ 1870 shmem_undo_range(inode, 1871 (loff_t)start << PAGE_CACHE_SHIFT, 1872 (loff_t)index << PAGE_CACHE_SHIFT, true); 1873 goto undone; 1874 } 1875 1876 /* 1877 * Inform shmem_writepage() how far we have reached. 1878 * No need for lock or barrier: we have the page lock. 1879 */ 1880 shmem_falloc.next++; 1881 if (!PageUptodate(page)) 1882 shmem_falloc.nr_falloced++; 1883 1884 /* 1885 * If !PageUptodate, leave it that way so that freeable pages 1886 * can be recognized if we need to rollback on error later. 1887 * But set_page_dirty so that memory pressure will swap rather 1888 * than free the pages we are allocating (and SGP_CACHE pages 1889 * might still be clean: we now need to mark those dirty too). 1890 */ 1891 set_page_dirty(page); 1892 unlock_page(page); 1893 page_cache_release(page); 1894 cond_resched(); 1895 } 1896 1897 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1898 i_size_write(inode, offset + len); 1899 inode->i_ctime = CURRENT_TIME; 1900undone: 1901 spin_lock(&inode->i_lock); 1902 inode->i_private = NULL; 1903 spin_unlock(&inode->i_lock); 1904out: 1905 mutex_unlock(&inode->i_mutex); 1906 return error; 1907} 1908 1909static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1910{ 1911 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1912 1913 buf->f_type = TMPFS_MAGIC; 1914 buf->f_bsize = PAGE_CACHE_SIZE; 1915 buf->f_namelen = NAME_MAX; 1916 if (sbinfo->max_blocks) { 1917 buf->f_blocks = sbinfo->max_blocks; 1918 buf->f_bavail = 1919 buf->f_bfree = sbinfo->max_blocks - 1920 percpu_counter_sum(&sbinfo->used_blocks); 1921 } 1922 if (sbinfo->max_inodes) { 1923 buf->f_files = sbinfo->max_inodes; 1924 buf->f_ffree = sbinfo->free_inodes; 1925 } 1926 /* else leave those fields 0 like simple_statfs */ 1927 return 0; 1928} 1929 1930/* 1931 * File creation. Allocate an inode, and we're done.. 1932 */ 1933static int 1934shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 1935{ 1936 struct inode *inode; 1937 int error = -ENOSPC; 1938 1939 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1940 if (inode) { 1941 error = security_inode_init_security(inode, dir, 1942 &dentry->d_name, 1943 shmem_initxattrs, NULL); 1944 if (error) { 1945 if (error != -EOPNOTSUPP) { 1946 iput(inode); 1947 return error; 1948 } 1949 } 1950#ifdef CONFIG_TMPFS_POSIX_ACL 1951 error = generic_acl_init(inode, dir); 1952 if (error) { 1953 iput(inode); 1954 return error; 1955 } 1956#else 1957 error = 0; 1958#endif 1959 dir->i_size += BOGO_DIRENT_SIZE; 1960 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1961 d_instantiate(dentry, inode); 1962 dget(dentry); /* Extra count - pin the dentry in core */ 1963 } 1964 return error; 1965} 1966 1967static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1968{ 1969 int error; 1970 1971 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1972 return error; 1973 inc_nlink(dir); 1974 return 0; 1975} 1976 1977static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 1978 bool excl) 1979{ 1980 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1981} 1982 1983/* 1984 * Link a file.. 1985 */ 1986static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1987{ 1988 struct inode *inode = old_dentry->d_inode; 1989 int ret; 1990 1991 /* 1992 * No ordinary (disk based) filesystem counts links as inodes; 1993 * but each new link needs a new dentry, pinning lowmem, and 1994 * tmpfs dentries cannot be pruned until they are unlinked. 1995 */ 1996 ret = shmem_reserve_inode(inode->i_sb); 1997 if (ret) 1998 goto out; 1999 2000 dir->i_size += BOGO_DIRENT_SIZE; 2001 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2002 inc_nlink(inode); 2003 ihold(inode); /* New dentry reference */ 2004 dget(dentry); /* Extra pinning count for the created dentry */ 2005 d_instantiate(dentry, inode); 2006out: 2007 return ret; 2008} 2009 2010static int shmem_unlink(struct inode *dir, struct dentry *dentry) 2011{ 2012 struct inode *inode = dentry->d_inode; 2013 2014 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 2015 shmem_free_inode(inode->i_sb); 2016 2017 dir->i_size -= BOGO_DIRENT_SIZE; 2018 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2019 drop_nlink(inode); 2020 dput(dentry); /* Undo the count from "create" - this does all the work */ 2021 return 0; 2022} 2023 2024static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 2025{ 2026 if (!simple_empty(dentry)) 2027 return -ENOTEMPTY; 2028 2029 drop_nlink(dentry->d_inode); 2030 drop_nlink(dir); 2031 return shmem_unlink(dir, dentry); 2032} 2033 2034/* 2035 * The VFS layer already does all the dentry stuff for rename, 2036 * we just have to decrement the usage count for the target if 2037 * it exists so that the VFS layer correctly free's it when it 2038 * gets overwritten. 2039 */ 2040static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2041{ 2042 struct inode *inode = old_dentry->d_inode; 2043 int they_are_dirs = S_ISDIR(inode->i_mode); 2044 2045 if (!simple_empty(new_dentry)) 2046 return -ENOTEMPTY; 2047 2048 if (new_dentry->d_inode) { 2049 (void) shmem_unlink(new_dir, new_dentry); 2050 if (they_are_dirs) 2051 drop_nlink(old_dir); 2052 } else if (they_are_dirs) { 2053 drop_nlink(old_dir); 2054 inc_nlink(new_dir); 2055 } 2056 2057 old_dir->i_size -= BOGO_DIRENT_SIZE; 2058 new_dir->i_size += BOGO_DIRENT_SIZE; 2059 old_dir->i_ctime = old_dir->i_mtime = 2060 new_dir->i_ctime = new_dir->i_mtime = 2061 inode->i_ctime = CURRENT_TIME; 2062 return 0; 2063} 2064 2065static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 2066{ 2067 int error; 2068 int len; 2069 struct inode *inode; 2070 struct page *page; 2071 char *kaddr; 2072 struct shmem_inode_info *info; 2073 2074 len = strlen(symname) + 1; 2075 if (len > PAGE_CACHE_SIZE) 2076 return -ENAMETOOLONG; 2077 2078 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2079 if (!inode) 2080 return -ENOSPC; 2081 2082 error = security_inode_init_security(inode, dir, &dentry->d_name, 2083 shmem_initxattrs, NULL); 2084 if (error) { 2085 if (error != -EOPNOTSUPP) { 2086 iput(inode); 2087 return error; 2088 } 2089 error = 0; 2090 } 2091 2092 info = SHMEM_I(inode); 2093 inode->i_size = len-1; 2094 if (len <= SHORT_SYMLINK_LEN) { 2095 info->symlink = kmemdup(symname, len, GFP_KERNEL); 2096 if (!info->symlink) { 2097 iput(inode); 2098 return -ENOMEM; 2099 } 2100 inode->i_op = &shmem_short_symlink_operations; 2101 } else { 2102 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2103 if (error) { 2104 iput(inode); 2105 return error; 2106 } 2107 inode->i_mapping->a_ops = &shmem_aops; 2108 inode->i_op = &shmem_symlink_inode_operations; 2109 kaddr = kmap_atomic(page); 2110 memcpy(kaddr, symname, len); 2111 kunmap_atomic(kaddr); 2112 SetPageUptodate(page); 2113 set_page_dirty(page); 2114 unlock_page(page); 2115 page_cache_release(page); 2116 } 2117 dir->i_size += BOGO_DIRENT_SIZE; 2118 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2119 d_instantiate(dentry, inode); 2120 dget(dentry); 2121 return 0; 2122} 2123 2124static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 2125{ 2126 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 2127 return NULL; 2128} 2129 2130static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2131{ 2132 struct page *page = NULL; 2133 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2134 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 2135 if (page) 2136 unlock_page(page); 2137 return page; 2138} 2139 2140static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2141{ 2142 if (!IS_ERR(nd_get_link(nd))) { 2143 struct page *page = cookie; 2144 kunmap(page); 2145 mark_page_accessed(page); 2146 page_cache_release(page); 2147 } 2148} 2149 2150#ifdef CONFIG_TMPFS_XATTR 2151/* 2152 * Superblocks without xattr inode operations may get some security.* xattr 2153 * support from the LSM "for free". As soon as we have any other xattrs 2154 * like ACLs, we also need to implement the security.* handlers at 2155 * filesystem level, though. 2156 */ 2157 2158/* 2159 * Callback for security_inode_init_security() for acquiring xattrs. 2160 */ 2161static int shmem_initxattrs(struct inode *inode, 2162 const struct xattr *xattr_array, 2163 void *fs_info) 2164{ 2165 struct shmem_inode_info *info = SHMEM_I(inode); 2166 const struct xattr *xattr; 2167 struct simple_xattr *new_xattr; 2168 size_t len; 2169 2170 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 2171 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 2172 if (!new_xattr) 2173 return -ENOMEM; 2174 2175 len = strlen(xattr->name) + 1; 2176 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 2177 GFP_KERNEL); 2178 if (!new_xattr->name) { 2179 kfree(new_xattr); 2180 return -ENOMEM; 2181 } 2182 2183 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 2184 XATTR_SECURITY_PREFIX_LEN); 2185 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 2186 xattr->name, len); 2187 2188 simple_xattr_list_add(&info->xattrs, new_xattr); 2189 } 2190 2191 return 0; 2192} 2193 2194static const struct xattr_handler *shmem_xattr_handlers[] = { 2195#ifdef CONFIG_TMPFS_POSIX_ACL 2196 &generic_acl_access_handler, 2197 &generic_acl_default_handler, 2198#endif 2199 NULL 2200}; 2201 2202static int shmem_xattr_validate(const char *name) 2203{ 2204 struct { const char *prefix; size_t len; } arr[] = { 2205 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2206 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2207 }; 2208 int i; 2209 2210 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2211 size_t preflen = arr[i].len; 2212 if (strncmp(name, arr[i].prefix, preflen) == 0) { 2213 if (!name[preflen]) 2214 return -EINVAL; 2215 return 0; 2216 } 2217 } 2218 return -EOPNOTSUPP; 2219} 2220 2221static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2222 void *buffer, size_t size) 2223{ 2224 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2225 int err; 2226 2227 /* 2228 * If this is a request for a synthetic attribute in the system.* 2229 * namespace use the generic infrastructure to resolve a handler 2230 * for it via sb->s_xattr. 2231 */ 2232 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2233 return generic_getxattr(dentry, name, buffer, size); 2234 2235 err = shmem_xattr_validate(name); 2236 if (err) 2237 return err; 2238 2239 return simple_xattr_get(&info->xattrs, name, buffer, size); 2240} 2241 2242static int shmem_setxattr(struct dentry *dentry, const char *name, 2243 const void *value, size_t size, int flags) 2244{ 2245 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2246 int err; 2247 2248 /* 2249 * If this is a request for a synthetic attribute in the system.* 2250 * namespace use the generic infrastructure to resolve a handler 2251 * for it via sb->s_xattr. 2252 */ 2253 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2254 return generic_setxattr(dentry, name, value, size, flags); 2255 2256 err = shmem_xattr_validate(name); 2257 if (err) 2258 return err; 2259 2260 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2261} 2262 2263static int shmem_removexattr(struct dentry *dentry, const char *name) 2264{ 2265 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2266 int err; 2267 2268 /* 2269 * If this is a request for a synthetic attribute in the system.* 2270 * namespace use the generic infrastructure to resolve a handler 2271 * for it via sb->s_xattr. 2272 */ 2273 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2274 return generic_removexattr(dentry, name); 2275 2276 err = shmem_xattr_validate(name); 2277 if (err) 2278 return err; 2279 2280 return simple_xattr_remove(&info->xattrs, name); 2281} 2282 2283static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2284{ 2285 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2286 return simple_xattr_list(&info->xattrs, buffer, size); 2287} 2288#endif /* CONFIG_TMPFS_XATTR */ 2289 2290static const struct inode_operations shmem_short_symlink_operations = { 2291 .readlink = generic_readlink, 2292 .follow_link = shmem_follow_short_symlink, 2293#ifdef CONFIG_TMPFS_XATTR 2294 .setxattr = shmem_setxattr, 2295 .getxattr = shmem_getxattr, 2296 .listxattr = shmem_listxattr, 2297 .removexattr = shmem_removexattr, 2298#endif 2299}; 2300 2301static const struct inode_operations shmem_symlink_inode_operations = { 2302 .readlink = generic_readlink, 2303 .follow_link = shmem_follow_link, 2304 .put_link = shmem_put_link, 2305#ifdef CONFIG_TMPFS_XATTR 2306 .setxattr = shmem_setxattr, 2307 .getxattr = shmem_getxattr, 2308 .listxattr = shmem_listxattr, 2309 .removexattr = shmem_removexattr, 2310#endif 2311}; 2312 2313static struct dentry *shmem_get_parent(struct dentry *child) 2314{ 2315 return ERR_PTR(-ESTALE); 2316} 2317 2318static int shmem_match(struct inode *ino, void *vfh) 2319{ 2320 __u32 *fh = vfh; 2321 __u64 inum = fh[2]; 2322 inum = (inum << 32) | fh[1]; 2323 return ino->i_ino == inum && fh[0] == ino->i_generation; 2324} 2325 2326static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2327 struct fid *fid, int fh_len, int fh_type) 2328{ 2329 struct inode *inode; 2330 struct dentry *dentry = NULL; 2331 u64 inum; 2332 2333 if (fh_len < 3) 2334 return NULL; 2335 2336 inum = fid->raw[2]; 2337 inum = (inum << 32) | fid->raw[1]; 2338 2339 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2340 shmem_match, fid->raw); 2341 if (inode) { 2342 dentry = d_find_alias(inode); 2343 iput(inode); 2344 } 2345 2346 return dentry; 2347} 2348 2349static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 2350 struct inode *parent) 2351{ 2352 if (*len < 3) { 2353 *len = 3; 2354 return FILEID_INVALID; 2355 } 2356 2357 if (inode_unhashed(inode)) { 2358 /* Unfortunately insert_inode_hash is not idempotent, 2359 * so as we hash inodes here rather than at creation 2360 * time, we need a lock to ensure we only try 2361 * to do it once 2362 */ 2363 static DEFINE_SPINLOCK(lock); 2364 spin_lock(&lock); 2365 if (inode_unhashed(inode)) 2366 __insert_inode_hash(inode, 2367 inode->i_ino + inode->i_generation); 2368 spin_unlock(&lock); 2369 } 2370 2371 fh[0] = inode->i_generation; 2372 fh[1] = inode->i_ino; 2373 fh[2] = ((__u64)inode->i_ino) >> 32; 2374 2375 *len = 3; 2376 return 1; 2377} 2378 2379static const struct export_operations shmem_export_ops = { 2380 .get_parent = shmem_get_parent, 2381 .encode_fh = shmem_encode_fh, 2382 .fh_to_dentry = shmem_fh_to_dentry, 2383}; 2384 2385static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2386 bool remount) 2387{ 2388 char *this_char, *value, *rest; 2389 struct mempolicy *mpol = NULL; 2390 uid_t uid; 2391 gid_t gid; 2392 2393 while (options != NULL) { 2394 this_char = options; 2395 for (;;) { 2396 /* 2397 * NUL-terminate this option: unfortunately, 2398 * mount options form a comma-separated list, 2399 * but mpol's nodelist may also contain commas. 2400 */ 2401 options = strchr(options, ','); 2402 if (options == NULL) 2403 break; 2404 options++; 2405 if (!isdigit(*options)) { 2406 options[-1] = '\0'; 2407 break; 2408 } 2409 } 2410 if (!*this_char) 2411 continue; 2412 if ((value = strchr(this_char,'=')) != NULL) { 2413 *value++ = 0; 2414 } else { 2415 printk(KERN_ERR 2416 "tmpfs: No value for mount option '%s'\n", 2417 this_char); 2418 goto error; 2419 } 2420 2421 if (!strcmp(this_char,"size")) { 2422 unsigned long long size; 2423 size = memparse(value,&rest); 2424 if (*rest == '%') { 2425 size <<= PAGE_SHIFT; 2426 size *= totalram_pages; 2427 do_div(size, 100); 2428 rest++; 2429 } 2430 if (*rest) 2431 goto bad_val; 2432 sbinfo->max_blocks = 2433 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2434 } else if (!strcmp(this_char,"nr_blocks")) { 2435 sbinfo->max_blocks = memparse(value, &rest); 2436 if (*rest) 2437 goto bad_val; 2438 } else if (!strcmp(this_char,"nr_inodes")) { 2439 sbinfo->max_inodes = memparse(value, &rest); 2440 if (*rest) 2441 goto bad_val; 2442 } else if (!strcmp(this_char,"mode")) { 2443 if (remount) 2444 continue; 2445 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2446 if (*rest) 2447 goto bad_val; 2448 } else if (!strcmp(this_char,"uid")) { 2449 if (remount) 2450 continue; 2451 uid = simple_strtoul(value, &rest, 0); 2452 if (*rest) 2453 goto bad_val; 2454 sbinfo->uid = make_kuid(current_user_ns(), uid); 2455 if (!uid_valid(sbinfo->uid)) 2456 goto bad_val; 2457 } else if (!strcmp(this_char,"gid")) { 2458 if (remount) 2459 continue; 2460 gid = simple_strtoul(value, &rest, 0); 2461 if (*rest) 2462 goto bad_val; 2463 sbinfo->gid = make_kgid(current_user_ns(), gid); 2464 if (!gid_valid(sbinfo->gid)) 2465 goto bad_val; 2466 } else if (!strcmp(this_char,"mpol")) { 2467 mpol_put(mpol); 2468 mpol = NULL; 2469 if (mpol_parse_str(value, &mpol)) 2470 goto bad_val; 2471 } else { 2472 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2473 this_char); 2474 goto error; 2475 } 2476 } 2477 sbinfo->mpol = mpol; 2478 return 0; 2479 2480bad_val: 2481 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2482 value, this_char); 2483error: 2484 mpol_put(mpol); 2485 return 1; 2486 2487} 2488 2489static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2490{ 2491 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2492 struct shmem_sb_info config = *sbinfo; 2493 unsigned long inodes; 2494 int error = -EINVAL; 2495 2496 config.mpol = NULL; 2497 if (shmem_parse_options(data, &config, true)) 2498 return error; 2499 2500 spin_lock(&sbinfo->stat_lock); 2501 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2502 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2503 goto out; 2504 if (config.max_inodes < inodes) 2505 goto out; 2506 /* 2507 * Those tests disallow limited->unlimited while any are in use; 2508 * but we must separately disallow unlimited->limited, because 2509 * in that case we have no record of how much is already in use. 2510 */ 2511 if (config.max_blocks && !sbinfo->max_blocks) 2512 goto out; 2513 if (config.max_inodes && !sbinfo->max_inodes) 2514 goto out; 2515 2516 error = 0; 2517 sbinfo->max_blocks = config.max_blocks; 2518 sbinfo->max_inodes = config.max_inodes; 2519 sbinfo->free_inodes = config.max_inodes - inodes; 2520 2521 /* 2522 * Preserve previous mempolicy unless mpol remount option was specified. 2523 */ 2524 if (config.mpol) { 2525 mpol_put(sbinfo->mpol); 2526 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2527 } 2528out: 2529 spin_unlock(&sbinfo->stat_lock); 2530 return error; 2531} 2532 2533static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2534{ 2535 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2536 2537 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2538 seq_printf(seq, ",size=%luk", 2539 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2540 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2541 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2542 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2543 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 2544 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 2545 seq_printf(seq, ",uid=%u", 2546 from_kuid_munged(&init_user_ns, sbinfo->uid)); 2547 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 2548 seq_printf(seq, ",gid=%u", 2549 from_kgid_munged(&init_user_ns, sbinfo->gid)); 2550 shmem_show_mpol(seq, sbinfo->mpol); 2551 return 0; 2552} 2553#endif /* CONFIG_TMPFS */ 2554 2555static void shmem_put_super(struct super_block *sb) 2556{ 2557 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2558 2559 percpu_counter_destroy(&sbinfo->used_blocks); 2560 mpol_put(sbinfo->mpol); 2561 kfree(sbinfo); 2562 sb->s_fs_info = NULL; 2563} 2564 2565int shmem_fill_super(struct super_block *sb, void *data, int silent) 2566{ 2567 struct inode *inode; 2568 struct shmem_sb_info *sbinfo; 2569 int err = -ENOMEM; 2570 2571 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2572 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2573 L1_CACHE_BYTES), GFP_KERNEL); 2574 if (!sbinfo) 2575 return -ENOMEM; 2576 2577 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2578 sbinfo->uid = current_fsuid(); 2579 sbinfo->gid = current_fsgid(); 2580 sb->s_fs_info = sbinfo; 2581 2582#ifdef CONFIG_TMPFS 2583 /* 2584 * Per default we only allow half of the physical ram per 2585 * tmpfs instance, limiting inodes to one per page of lowmem; 2586 * but the internal instance is left unlimited. 2587 */ 2588 if (!(sb->s_flags & MS_NOUSER)) { 2589 sbinfo->max_blocks = shmem_default_max_blocks(); 2590 sbinfo->max_inodes = shmem_default_max_inodes(); 2591 if (shmem_parse_options(data, sbinfo, false)) { 2592 err = -EINVAL; 2593 goto failed; 2594 } 2595 } 2596 sb->s_export_op = &shmem_export_ops; 2597 sb->s_flags |= MS_NOSEC; 2598#else 2599 sb->s_flags |= MS_NOUSER; 2600#endif 2601 2602 spin_lock_init(&sbinfo->stat_lock); 2603 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2604 goto failed; 2605 sbinfo->free_inodes = sbinfo->max_inodes; 2606 2607 sb->s_maxbytes = MAX_LFS_FILESIZE; 2608 sb->s_blocksize = PAGE_CACHE_SIZE; 2609 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2610 sb->s_magic = TMPFS_MAGIC; 2611 sb->s_op = &shmem_ops; 2612 sb->s_time_gran = 1; 2613#ifdef CONFIG_TMPFS_XATTR 2614 sb->s_xattr = shmem_xattr_handlers; 2615#endif 2616#ifdef CONFIG_TMPFS_POSIX_ACL 2617 sb->s_flags |= MS_POSIXACL; 2618#endif 2619 2620 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2621 if (!inode) 2622 goto failed; 2623 inode->i_uid = sbinfo->uid; 2624 inode->i_gid = sbinfo->gid; 2625 sb->s_root = d_make_root(inode); 2626 if (!sb->s_root) 2627 goto failed; 2628 return 0; 2629 2630failed: 2631 shmem_put_super(sb); 2632 return err; 2633} 2634 2635static struct kmem_cache *shmem_inode_cachep; 2636 2637static struct inode *shmem_alloc_inode(struct super_block *sb) 2638{ 2639 struct shmem_inode_info *info; 2640 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2641 if (!info) 2642 return NULL; 2643 return &info->vfs_inode; 2644} 2645 2646static void shmem_destroy_callback(struct rcu_head *head) 2647{ 2648 struct inode *inode = container_of(head, struct inode, i_rcu); 2649 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2650} 2651 2652static void shmem_destroy_inode(struct inode *inode) 2653{ 2654 if (S_ISREG(inode->i_mode)) 2655 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2656 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2657} 2658 2659static void shmem_init_inode(void *foo) 2660{ 2661 struct shmem_inode_info *info = foo; 2662 inode_init_once(&info->vfs_inode); 2663} 2664 2665static int shmem_init_inodecache(void) 2666{ 2667 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2668 sizeof(struct shmem_inode_info), 2669 0, SLAB_PANIC, shmem_init_inode); 2670 return 0; 2671} 2672 2673static void shmem_destroy_inodecache(void) 2674{ 2675 kmem_cache_destroy(shmem_inode_cachep); 2676} 2677 2678static const struct address_space_operations shmem_aops = { 2679 .writepage = shmem_writepage, 2680 .set_page_dirty = __set_page_dirty_no_writeback, 2681#ifdef CONFIG_TMPFS 2682 .write_begin = shmem_write_begin, 2683 .write_end = shmem_write_end, 2684#endif 2685 .migratepage = migrate_page, 2686 .error_remove_page = generic_error_remove_page, 2687}; 2688 2689static const struct file_operations shmem_file_operations = { 2690 .mmap = shmem_mmap, 2691#ifdef CONFIG_TMPFS 2692 .llseek = shmem_file_llseek, 2693 .read = do_sync_read, 2694 .write = do_sync_write, 2695 .aio_read = shmem_file_aio_read, 2696 .aio_write = generic_file_aio_write, 2697 .fsync = noop_fsync, 2698 .splice_read = shmem_file_splice_read, 2699 .splice_write = generic_file_splice_write, 2700 .fallocate = shmem_fallocate, 2701#endif 2702}; 2703 2704static const struct inode_operations shmem_inode_operations = { 2705 .setattr = shmem_setattr, 2706#ifdef CONFIG_TMPFS_XATTR 2707 .setxattr = shmem_setxattr, 2708 .getxattr = shmem_getxattr, 2709 .listxattr = shmem_listxattr, 2710 .removexattr = shmem_removexattr, 2711#endif 2712}; 2713 2714static const struct inode_operations shmem_dir_inode_operations = { 2715#ifdef CONFIG_TMPFS 2716 .create = shmem_create, 2717 .lookup = simple_lookup, 2718 .link = shmem_link, 2719 .unlink = shmem_unlink, 2720 .symlink = shmem_symlink, 2721 .mkdir = shmem_mkdir, 2722 .rmdir = shmem_rmdir, 2723 .mknod = shmem_mknod, 2724 .rename = shmem_rename, 2725#endif 2726#ifdef CONFIG_TMPFS_XATTR 2727 .setxattr = shmem_setxattr, 2728 .getxattr = shmem_getxattr, 2729 .listxattr = shmem_listxattr, 2730 .removexattr = shmem_removexattr, 2731#endif 2732#ifdef CONFIG_TMPFS_POSIX_ACL 2733 .setattr = shmem_setattr, 2734#endif 2735}; 2736 2737static const struct inode_operations shmem_special_inode_operations = { 2738#ifdef CONFIG_TMPFS_XATTR 2739 .setxattr = shmem_setxattr, 2740 .getxattr = shmem_getxattr, 2741 .listxattr = shmem_listxattr, 2742 .removexattr = shmem_removexattr, 2743#endif 2744#ifdef CONFIG_TMPFS_POSIX_ACL 2745 .setattr = shmem_setattr, 2746#endif 2747}; 2748 2749static const struct super_operations shmem_ops = { 2750 .alloc_inode = shmem_alloc_inode, 2751 .destroy_inode = shmem_destroy_inode, 2752#ifdef CONFIG_TMPFS 2753 .statfs = shmem_statfs, 2754 .remount_fs = shmem_remount_fs, 2755 .show_options = shmem_show_options, 2756#endif 2757 .evict_inode = shmem_evict_inode, 2758 .drop_inode = generic_delete_inode, 2759 .put_super = shmem_put_super, 2760}; 2761 2762static const struct vm_operations_struct shmem_vm_ops = { 2763 .fault = shmem_fault, 2764#ifdef CONFIG_NUMA 2765 .set_policy = shmem_set_policy, 2766 .get_policy = shmem_get_policy, 2767#endif 2768 .remap_pages = generic_file_remap_pages, 2769}; 2770 2771static struct dentry *shmem_mount(struct file_system_type *fs_type, 2772 int flags, const char *dev_name, void *data) 2773{ 2774 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2775} 2776 2777static struct file_system_type shmem_fs_type = { 2778 .owner = THIS_MODULE, 2779 .name = "tmpfs", 2780 .mount = shmem_mount, 2781 .kill_sb = kill_litter_super, 2782 .fs_flags = FS_USERNS_MOUNT, 2783}; 2784 2785int __init shmem_init(void) 2786{ 2787 int error; 2788 2789 error = bdi_init(&shmem_backing_dev_info); 2790 if (error) 2791 goto out4; 2792 2793 error = shmem_init_inodecache(); 2794 if (error) 2795 goto out3; 2796 2797 error = register_filesystem(&shmem_fs_type); 2798 if (error) { 2799 printk(KERN_ERR "Could not register tmpfs\n"); 2800 goto out2; 2801 } 2802 2803 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER, 2804 shmem_fs_type.name, NULL); 2805 if (IS_ERR(shm_mnt)) { 2806 error = PTR_ERR(shm_mnt); 2807 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2808 goto out1; 2809 } 2810 return 0; 2811 2812out1: 2813 unregister_filesystem(&shmem_fs_type); 2814out2: 2815 shmem_destroy_inodecache(); 2816out3: 2817 bdi_destroy(&shmem_backing_dev_info); 2818out4: 2819 shm_mnt = ERR_PTR(error); 2820 return error; 2821} 2822 2823#else /* !CONFIG_SHMEM */ 2824 2825/* 2826 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2827 * 2828 * This is intended for small system where the benefits of the full 2829 * shmem code (swap-backed and resource-limited) are outweighed by 2830 * their complexity. On systems without swap this code should be 2831 * effectively equivalent, but much lighter weight. 2832 */ 2833 2834static struct file_system_type shmem_fs_type = { 2835 .name = "tmpfs", 2836 .mount = ramfs_mount, 2837 .kill_sb = kill_litter_super, 2838 .fs_flags = FS_USERNS_MOUNT, 2839}; 2840 2841int __init shmem_init(void) 2842{ 2843 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2844 2845 shm_mnt = kern_mount(&shmem_fs_type); 2846 BUG_ON(IS_ERR(shm_mnt)); 2847 2848 return 0; 2849} 2850 2851int shmem_unuse(swp_entry_t swap, struct page *page) 2852{ 2853 return 0; 2854} 2855 2856int shmem_lock(struct file *file, int lock, struct user_struct *user) 2857{ 2858 return 0; 2859} 2860 2861void shmem_unlock_mapping(struct address_space *mapping) 2862{ 2863} 2864 2865void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2866{ 2867 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2868} 2869EXPORT_SYMBOL_GPL(shmem_truncate_range); 2870 2871#define shmem_vm_ops generic_file_vm_ops 2872#define shmem_file_operations ramfs_file_operations 2873#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2874#define shmem_acct_size(flags, size) 0 2875#define shmem_unacct_size(flags, size) do {} while (0) 2876 2877#endif /* CONFIG_SHMEM */ 2878 2879/* common code */ 2880 2881static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen) 2882{ 2883 return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)", 2884 dentry->d_name.name); 2885} 2886 2887static struct dentry_operations anon_ops = { 2888 .d_dname = shmem_dname 2889}; 2890 2891/** 2892 * shmem_file_setup - get an unlinked file living in tmpfs 2893 * @name: name for dentry (to be seen in /proc/<pid>/maps 2894 * @size: size to be set for the file 2895 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2896 */ 2897struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2898{ 2899 struct file *res; 2900 struct inode *inode; 2901 struct path path; 2902 struct super_block *sb; 2903 struct qstr this; 2904 2905 if (IS_ERR(shm_mnt)) 2906 return ERR_CAST(shm_mnt); 2907 2908 if (size < 0 || size > MAX_LFS_FILESIZE) 2909 return ERR_PTR(-EINVAL); 2910 2911 if (shmem_acct_size(flags, size)) 2912 return ERR_PTR(-ENOMEM); 2913 2914 res = ERR_PTR(-ENOMEM); 2915 this.name = name; 2916 this.len = strlen(name); 2917 this.hash = 0; /* will go */ 2918 sb = shm_mnt->mnt_sb; 2919 path.dentry = d_alloc_pseudo(sb, &this); 2920 if (!path.dentry) 2921 goto put_memory; 2922 d_set_d_op(path.dentry, &anon_ops); 2923 path.mnt = mntget(shm_mnt); 2924 2925 res = ERR_PTR(-ENOSPC); 2926 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2927 if (!inode) 2928 goto put_dentry; 2929 2930 d_instantiate(path.dentry, inode); 2931 inode->i_size = size; 2932 clear_nlink(inode); /* It is unlinked */ 2933 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 2934 if (IS_ERR(res)) 2935 goto put_dentry; 2936 2937 res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2938 &shmem_file_operations); 2939 if (IS_ERR(res)) 2940 goto put_dentry; 2941 2942 return res; 2943 2944put_dentry: 2945 path_put(&path); 2946put_memory: 2947 shmem_unacct_size(flags, size); 2948 return res; 2949} 2950EXPORT_SYMBOL_GPL(shmem_file_setup); 2951 2952/** 2953 * shmem_zero_setup - setup a shared anonymous mapping 2954 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2955 */ 2956int shmem_zero_setup(struct vm_area_struct *vma) 2957{ 2958 struct file *file; 2959 loff_t size = vma->vm_end - vma->vm_start; 2960 2961 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2962 if (IS_ERR(file)) 2963 return PTR_ERR(file); 2964 2965 if (vma->vm_file) 2966 fput(vma->vm_file); 2967 vma->vm_file = file; 2968 vma->vm_ops = &shmem_vm_ops; 2969 return 0; 2970} 2971 2972/** 2973 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 2974 * @mapping: the page's address_space 2975 * @index: the page index 2976 * @gfp: the page allocator flags to use if allocating 2977 * 2978 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 2979 * with any new page allocations done using the specified allocation flags. 2980 * But read_cache_page_gfp() uses the ->readpage() method: which does not 2981 * suit tmpfs, since it may have pages in swapcache, and needs to find those 2982 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 2983 * 2984 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 2985 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 2986 */ 2987struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 2988 pgoff_t index, gfp_t gfp) 2989{ 2990#ifdef CONFIG_SHMEM 2991 struct inode *inode = mapping->host; 2992 struct page *page; 2993 int error; 2994 2995 BUG_ON(mapping->a_ops != &shmem_aops); 2996 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 2997 if (error) 2998 page = ERR_PTR(error); 2999 else 3000 unlock_page(page); 3001 return page; 3002#else 3003 /* 3004 * The tiny !SHMEM case uses ramfs without swap 3005 */ 3006 return read_cache_page_gfp(mapping, index, gfp); 3007#endif 3008} 3009EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 3010