shmem.c revision 8e205f779d1443a94b5ae81aa359cb535dd3021e
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 21 * This file is released under the GPL. 22 */ 23 24#include <linux/fs.h> 25#include <linux/init.h> 26#include <linux/vfs.h> 27#include <linux/mount.h> 28#include <linux/ramfs.h> 29#include <linux/pagemap.h> 30#include <linux/file.h> 31#include <linux/mm.h> 32#include <linux/export.h> 33#include <linux/swap.h> 34#include <linux/aio.h> 35 36static struct vfsmount *shm_mnt; 37 38#ifdef CONFIG_SHMEM 39/* 40 * This virtual memory filesystem is heavily based on the ramfs. It 41 * extends ramfs by the ability to use swap and honor resource limits 42 * which makes it a completely usable filesystem. 43 */ 44 45#include <linux/xattr.h> 46#include <linux/exportfs.h> 47#include <linux/posix_acl.h> 48#include <linux/posix_acl_xattr.h> 49#include <linux/mman.h> 50#include <linux/string.h> 51#include <linux/slab.h> 52#include <linux/backing-dev.h> 53#include <linux/shmem_fs.h> 54#include <linux/writeback.h> 55#include <linux/blkdev.h> 56#include <linux/pagevec.h> 57#include <linux/percpu_counter.h> 58#include <linux/falloc.h> 59#include <linux/splice.h> 60#include <linux/security.h> 61#include <linux/swapops.h> 62#include <linux/mempolicy.h> 63#include <linux/namei.h> 64#include <linux/ctype.h> 65#include <linux/migrate.h> 66#include <linux/highmem.h> 67#include <linux/seq_file.h> 68#include <linux/magic.h> 69 70#include <asm/uaccess.h> 71#include <asm/pgtable.h> 72 73#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 74#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 75 76/* Pretend that each entry is of this size in directory's i_size */ 77#define BOGO_DIRENT_SIZE 20 78 79/* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 80#define SHORT_SYMLINK_LEN 128 81 82/* 83 * shmem_fallocate communicates with shmem_fault or shmem_writepage via 84 * inode->i_private (with i_mutex making sure that it has only one user at 85 * a time): we would prefer not to enlarge the shmem inode just for that. 86 */ 87struct shmem_falloc { 88 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 89 pgoff_t start; /* start of range currently being fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 92 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 93}; 94 95/* Flag allocation requirements to shmem_getpage */ 96enum sgp_type { 97 SGP_READ, /* don't exceed i_size, don't allocate page */ 98 SGP_CACHE, /* don't exceed i_size, may allocate page */ 99 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 100 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ 101 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ 102}; 103 104#ifdef CONFIG_TMPFS 105static unsigned long shmem_default_max_blocks(void) 106{ 107 return totalram_pages / 2; 108} 109 110static unsigned long shmem_default_max_inodes(void) 111{ 112 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 113} 114#endif 115 116static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 117static int shmem_replace_page(struct page **pagep, gfp_t gfp, 118 struct shmem_inode_info *info, pgoff_t index); 119static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 120 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 121 122static inline int shmem_getpage(struct inode *inode, pgoff_t index, 123 struct page **pagep, enum sgp_type sgp, int *fault_type) 124{ 125 return shmem_getpage_gfp(inode, index, pagep, sgp, 126 mapping_gfp_mask(inode->i_mapping), fault_type); 127} 128 129static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 130{ 131 return sb->s_fs_info; 132} 133 134/* 135 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 136 * for shared memory and for shared anonymous (/dev/zero) mappings 137 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 138 * consistent with the pre-accounting of private mappings ... 139 */ 140static inline int shmem_acct_size(unsigned long flags, loff_t size) 141{ 142 return (flags & VM_NORESERVE) ? 143 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 144} 145 146static inline void shmem_unacct_size(unsigned long flags, loff_t size) 147{ 148 if (!(flags & VM_NORESERVE)) 149 vm_unacct_memory(VM_ACCT(size)); 150} 151 152/* 153 * ... whereas tmpfs objects are accounted incrementally as 154 * pages are allocated, in order to allow huge sparse files. 155 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 156 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 157 */ 158static inline int shmem_acct_block(unsigned long flags) 159{ 160 return (flags & VM_NORESERVE) ? 161 security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; 162} 163 164static inline void shmem_unacct_blocks(unsigned long flags, long pages) 165{ 166 if (flags & VM_NORESERVE) 167 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 168} 169 170static const struct super_operations shmem_ops; 171static const struct address_space_operations shmem_aops; 172static const struct file_operations shmem_file_operations; 173static const struct inode_operations shmem_inode_operations; 174static const struct inode_operations shmem_dir_inode_operations; 175static const struct inode_operations shmem_special_inode_operations; 176static const struct vm_operations_struct shmem_vm_ops; 177 178static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 179 .ra_pages = 0, /* No readahead */ 180 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 181}; 182 183static LIST_HEAD(shmem_swaplist); 184static DEFINE_MUTEX(shmem_swaplist_mutex); 185 186static int shmem_reserve_inode(struct super_block *sb) 187{ 188 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 189 if (sbinfo->max_inodes) { 190 spin_lock(&sbinfo->stat_lock); 191 if (!sbinfo->free_inodes) { 192 spin_unlock(&sbinfo->stat_lock); 193 return -ENOSPC; 194 } 195 sbinfo->free_inodes--; 196 spin_unlock(&sbinfo->stat_lock); 197 } 198 return 0; 199} 200 201static void shmem_free_inode(struct super_block *sb) 202{ 203 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 204 if (sbinfo->max_inodes) { 205 spin_lock(&sbinfo->stat_lock); 206 sbinfo->free_inodes++; 207 spin_unlock(&sbinfo->stat_lock); 208 } 209} 210 211/** 212 * shmem_recalc_inode - recalculate the block usage of an inode 213 * @inode: inode to recalc 214 * 215 * We have to calculate the free blocks since the mm can drop 216 * undirtied hole pages behind our back. 217 * 218 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 219 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 220 * 221 * It has to be called with the spinlock held. 222 */ 223static void shmem_recalc_inode(struct inode *inode) 224{ 225 struct shmem_inode_info *info = SHMEM_I(inode); 226 long freed; 227 228 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 229 if (freed > 0) { 230 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 231 if (sbinfo->max_blocks) 232 percpu_counter_add(&sbinfo->used_blocks, -freed); 233 info->alloced -= freed; 234 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 235 shmem_unacct_blocks(info->flags, freed); 236 } 237} 238 239/* 240 * Replace item expected in radix tree by a new item, while holding tree lock. 241 */ 242static int shmem_radix_tree_replace(struct address_space *mapping, 243 pgoff_t index, void *expected, void *replacement) 244{ 245 void **pslot; 246 void *item; 247 248 VM_BUG_ON(!expected); 249 VM_BUG_ON(!replacement); 250 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); 251 if (!pslot) 252 return -ENOENT; 253 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); 254 if (item != expected) 255 return -ENOENT; 256 radix_tree_replace_slot(pslot, replacement); 257 return 0; 258} 259 260/* 261 * Sometimes, before we decide whether to proceed or to fail, we must check 262 * that an entry was not already brought back from swap by a racing thread. 263 * 264 * Checking page is not enough: by the time a SwapCache page is locked, it 265 * might be reused, and again be SwapCache, using the same swap as before. 266 */ 267static bool shmem_confirm_swap(struct address_space *mapping, 268 pgoff_t index, swp_entry_t swap) 269{ 270 void *item; 271 272 rcu_read_lock(); 273 item = radix_tree_lookup(&mapping->page_tree, index); 274 rcu_read_unlock(); 275 return item == swp_to_radix_entry(swap); 276} 277 278/* 279 * Like add_to_page_cache_locked, but error if expected item has gone. 280 */ 281static int shmem_add_to_page_cache(struct page *page, 282 struct address_space *mapping, 283 pgoff_t index, gfp_t gfp, void *expected) 284{ 285 int error; 286 287 VM_BUG_ON_PAGE(!PageLocked(page), page); 288 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 289 290 page_cache_get(page); 291 page->mapping = mapping; 292 page->index = index; 293 294 spin_lock_irq(&mapping->tree_lock); 295 if (!expected) 296 error = radix_tree_insert(&mapping->page_tree, index, page); 297 else 298 error = shmem_radix_tree_replace(mapping, index, expected, 299 page); 300 if (!error) { 301 mapping->nrpages++; 302 __inc_zone_page_state(page, NR_FILE_PAGES); 303 __inc_zone_page_state(page, NR_SHMEM); 304 spin_unlock_irq(&mapping->tree_lock); 305 } else { 306 page->mapping = NULL; 307 spin_unlock_irq(&mapping->tree_lock); 308 page_cache_release(page); 309 } 310 return error; 311} 312 313/* 314 * Like delete_from_page_cache, but substitutes swap for page. 315 */ 316static void shmem_delete_from_page_cache(struct page *page, void *radswap) 317{ 318 struct address_space *mapping = page->mapping; 319 int error; 320 321 spin_lock_irq(&mapping->tree_lock); 322 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 323 page->mapping = NULL; 324 mapping->nrpages--; 325 __dec_zone_page_state(page, NR_FILE_PAGES); 326 __dec_zone_page_state(page, NR_SHMEM); 327 spin_unlock_irq(&mapping->tree_lock); 328 page_cache_release(page); 329 BUG_ON(error); 330} 331 332/* 333 * Remove swap entry from radix tree, free the swap and its page cache. 334 */ 335static int shmem_free_swap(struct address_space *mapping, 336 pgoff_t index, void *radswap) 337{ 338 void *old; 339 340 spin_lock_irq(&mapping->tree_lock); 341 old = radix_tree_delete_item(&mapping->page_tree, index, radswap); 342 spin_unlock_irq(&mapping->tree_lock); 343 if (old != radswap) 344 return -ENOENT; 345 free_swap_and_cache(radix_to_swp_entry(radswap)); 346 return 0; 347} 348 349/* 350 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 351 */ 352void shmem_unlock_mapping(struct address_space *mapping) 353{ 354 struct pagevec pvec; 355 pgoff_t indices[PAGEVEC_SIZE]; 356 pgoff_t index = 0; 357 358 pagevec_init(&pvec, 0); 359 /* 360 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 361 */ 362 while (!mapping_unevictable(mapping)) { 363 /* 364 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 365 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. 366 */ 367 pvec.nr = find_get_entries(mapping, index, 368 PAGEVEC_SIZE, pvec.pages, indices); 369 if (!pvec.nr) 370 break; 371 index = indices[pvec.nr - 1] + 1; 372 pagevec_remove_exceptionals(&pvec); 373 check_move_unevictable_pages(pvec.pages, pvec.nr); 374 pagevec_release(&pvec); 375 cond_resched(); 376 } 377} 378 379/* 380 * Remove range of pages and swap entries from radix tree, and free them. 381 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 382 */ 383static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 384 bool unfalloc) 385{ 386 struct address_space *mapping = inode->i_mapping; 387 struct shmem_inode_info *info = SHMEM_I(inode); 388 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 389 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; 390 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); 391 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); 392 struct pagevec pvec; 393 pgoff_t indices[PAGEVEC_SIZE]; 394 long nr_swaps_freed = 0; 395 pgoff_t index; 396 int i; 397 398 if (lend == -1) 399 end = -1; /* unsigned, so actually very big */ 400 401 pagevec_init(&pvec, 0); 402 index = start; 403 while (index < end) { 404 pvec.nr = find_get_entries(mapping, index, 405 min(end - index, (pgoff_t)PAGEVEC_SIZE), 406 pvec.pages, indices); 407 if (!pvec.nr) 408 break; 409 mem_cgroup_uncharge_start(); 410 for (i = 0; i < pagevec_count(&pvec); i++) { 411 struct page *page = pvec.pages[i]; 412 413 index = indices[i]; 414 if (index >= end) 415 break; 416 417 if (radix_tree_exceptional_entry(page)) { 418 if (unfalloc) 419 continue; 420 nr_swaps_freed += !shmem_free_swap(mapping, 421 index, page); 422 continue; 423 } 424 425 if (!trylock_page(page)) 426 continue; 427 if (!unfalloc || !PageUptodate(page)) { 428 if (page->mapping == mapping) { 429 VM_BUG_ON_PAGE(PageWriteback(page), page); 430 truncate_inode_page(mapping, page); 431 } 432 } 433 unlock_page(page); 434 } 435 pagevec_remove_exceptionals(&pvec); 436 pagevec_release(&pvec); 437 mem_cgroup_uncharge_end(); 438 cond_resched(); 439 index++; 440 } 441 442 if (partial_start) { 443 struct page *page = NULL; 444 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 445 if (page) { 446 unsigned int top = PAGE_CACHE_SIZE; 447 if (start > end) { 448 top = partial_end; 449 partial_end = 0; 450 } 451 zero_user_segment(page, partial_start, top); 452 set_page_dirty(page); 453 unlock_page(page); 454 page_cache_release(page); 455 } 456 } 457 if (partial_end) { 458 struct page *page = NULL; 459 shmem_getpage(inode, end, &page, SGP_READ, NULL); 460 if (page) { 461 zero_user_segment(page, 0, partial_end); 462 set_page_dirty(page); 463 unlock_page(page); 464 page_cache_release(page); 465 } 466 } 467 if (start >= end) 468 return; 469 470 index = start; 471 for ( ; ; ) { 472 cond_resched(); 473 474 pvec.nr = find_get_entries(mapping, index, 475 min(end - index, (pgoff_t)PAGEVEC_SIZE), 476 pvec.pages, indices); 477 if (!pvec.nr) { 478 if (index == start || unfalloc) 479 break; 480 index = start; 481 continue; 482 } 483 if ((index == start || unfalloc) && indices[0] >= end) { 484 pagevec_remove_exceptionals(&pvec); 485 pagevec_release(&pvec); 486 break; 487 } 488 mem_cgroup_uncharge_start(); 489 for (i = 0; i < pagevec_count(&pvec); i++) { 490 struct page *page = pvec.pages[i]; 491 492 index = indices[i]; 493 if (index >= end) 494 break; 495 496 if (radix_tree_exceptional_entry(page)) { 497 if (unfalloc) 498 continue; 499 nr_swaps_freed += !shmem_free_swap(mapping, 500 index, page); 501 continue; 502 } 503 504 lock_page(page); 505 if (!unfalloc || !PageUptodate(page)) { 506 if (page->mapping == mapping) { 507 VM_BUG_ON_PAGE(PageWriteback(page), page); 508 truncate_inode_page(mapping, page); 509 } 510 } 511 unlock_page(page); 512 } 513 pagevec_remove_exceptionals(&pvec); 514 pagevec_release(&pvec); 515 mem_cgroup_uncharge_end(); 516 index++; 517 } 518 519 spin_lock(&info->lock); 520 info->swapped -= nr_swaps_freed; 521 shmem_recalc_inode(inode); 522 spin_unlock(&info->lock); 523} 524 525void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 526{ 527 shmem_undo_range(inode, lstart, lend, false); 528 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 529} 530EXPORT_SYMBOL_GPL(shmem_truncate_range); 531 532static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 533{ 534 struct inode *inode = dentry->d_inode; 535 int error; 536 537 error = inode_change_ok(inode, attr); 538 if (error) 539 return error; 540 541 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 542 loff_t oldsize = inode->i_size; 543 loff_t newsize = attr->ia_size; 544 545 if (newsize != oldsize) { 546 i_size_write(inode, newsize); 547 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 548 } 549 if (newsize < oldsize) { 550 loff_t holebegin = round_up(newsize, PAGE_SIZE); 551 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 552 shmem_truncate_range(inode, newsize, (loff_t)-1); 553 /* unmap again to remove racily COWed private pages */ 554 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1); 555 } 556 } 557 558 setattr_copy(inode, attr); 559 if (attr->ia_valid & ATTR_MODE) 560 error = posix_acl_chmod(inode, inode->i_mode); 561 return error; 562} 563 564static void shmem_evict_inode(struct inode *inode) 565{ 566 struct shmem_inode_info *info = SHMEM_I(inode); 567 568 if (inode->i_mapping->a_ops == &shmem_aops) { 569 shmem_unacct_size(info->flags, inode->i_size); 570 inode->i_size = 0; 571 shmem_truncate_range(inode, 0, (loff_t)-1); 572 if (!list_empty(&info->swaplist)) { 573 mutex_lock(&shmem_swaplist_mutex); 574 list_del_init(&info->swaplist); 575 mutex_unlock(&shmem_swaplist_mutex); 576 } 577 } else 578 kfree(info->symlink); 579 580 simple_xattrs_free(&info->xattrs); 581 WARN_ON(inode->i_blocks); 582 shmem_free_inode(inode->i_sb); 583 clear_inode(inode); 584} 585 586/* 587 * If swap found in inode, free it and move page from swapcache to filecache. 588 */ 589static int shmem_unuse_inode(struct shmem_inode_info *info, 590 swp_entry_t swap, struct page **pagep) 591{ 592 struct address_space *mapping = info->vfs_inode.i_mapping; 593 void *radswap; 594 pgoff_t index; 595 gfp_t gfp; 596 int error = 0; 597 598 radswap = swp_to_radix_entry(swap); 599 index = radix_tree_locate_item(&mapping->page_tree, radswap); 600 if (index == -1) 601 return 0; 602 603 /* 604 * Move _head_ to start search for next from here. 605 * But be careful: shmem_evict_inode checks list_empty without taking 606 * mutex, and there's an instant in list_move_tail when info->swaplist 607 * would appear empty, if it were the only one on shmem_swaplist. 608 */ 609 if (shmem_swaplist.next != &info->swaplist) 610 list_move_tail(&shmem_swaplist, &info->swaplist); 611 612 gfp = mapping_gfp_mask(mapping); 613 if (shmem_should_replace_page(*pagep, gfp)) { 614 mutex_unlock(&shmem_swaplist_mutex); 615 error = shmem_replace_page(pagep, gfp, info, index); 616 mutex_lock(&shmem_swaplist_mutex); 617 /* 618 * We needed to drop mutex to make that restrictive page 619 * allocation, but the inode might have been freed while we 620 * dropped it: although a racing shmem_evict_inode() cannot 621 * complete without emptying the radix_tree, our page lock 622 * on this swapcache page is not enough to prevent that - 623 * free_swap_and_cache() of our swap entry will only 624 * trylock_page(), removing swap from radix_tree whatever. 625 * 626 * We must not proceed to shmem_add_to_page_cache() if the 627 * inode has been freed, but of course we cannot rely on 628 * inode or mapping or info to check that. However, we can 629 * safely check if our swap entry is still in use (and here 630 * it can't have got reused for another page): if it's still 631 * in use, then the inode cannot have been freed yet, and we 632 * can safely proceed (if it's no longer in use, that tells 633 * nothing about the inode, but we don't need to unuse swap). 634 */ 635 if (!page_swapcount(*pagep)) 636 error = -ENOENT; 637 } 638 639 /* 640 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 641 * but also to hold up shmem_evict_inode(): so inode cannot be freed 642 * beneath us (pagelock doesn't help until the page is in pagecache). 643 */ 644 if (!error) 645 error = shmem_add_to_page_cache(*pagep, mapping, index, 646 GFP_NOWAIT, radswap); 647 if (error != -ENOMEM) { 648 /* 649 * Truncation and eviction use free_swap_and_cache(), which 650 * only does trylock page: if we raced, best clean up here. 651 */ 652 delete_from_swap_cache(*pagep); 653 set_page_dirty(*pagep); 654 if (!error) { 655 spin_lock(&info->lock); 656 info->swapped--; 657 spin_unlock(&info->lock); 658 swap_free(swap); 659 } 660 error = 1; /* not an error, but entry was found */ 661 } 662 return error; 663} 664 665/* 666 * Search through swapped inodes to find and replace swap by page. 667 */ 668int shmem_unuse(swp_entry_t swap, struct page *page) 669{ 670 struct list_head *this, *next; 671 struct shmem_inode_info *info; 672 int found = 0; 673 int error = 0; 674 675 /* 676 * There's a faint possibility that swap page was replaced before 677 * caller locked it: caller will come back later with the right page. 678 */ 679 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) 680 goto out; 681 682 /* 683 * Charge page using GFP_KERNEL while we can wait, before taking 684 * the shmem_swaplist_mutex which might hold up shmem_writepage(). 685 * Charged back to the user (not to caller) when swap account is used. 686 */ 687 error = mem_cgroup_charge_file(page, current->mm, GFP_KERNEL); 688 if (error) 689 goto out; 690 /* No radix_tree_preload: swap entry keeps a place for page in tree */ 691 692 mutex_lock(&shmem_swaplist_mutex); 693 list_for_each_safe(this, next, &shmem_swaplist) { 694 info = list_entry(this, struct shmem_inode_info, swaplist); 695 if (info->swapped) 696 found = shmem_unuse_inode(info, swap, &page); 697 else 698 list_del_init(&info->swaplist); 699 cond_resched(); 700 if (found) 701 break; 702 } 703 mutex_unlock(&shmem_swaplist_mutex); 704 705 if (found < 0) 706 error = found; 707out: 708 unlock_page(page); 709 page_cache_release(page); 710 return error; 711} 712 713/* 714 * Move the page from the page cache to the swap cache. 715 */ 716static int shmem_writepage(struct page *page, struct writeback_control *wbc) 717{ 718 struct shmem_inode_info *info; 719 struct address_space *mapping; 720 struct inode *inode; 721 swp_entry_t swap; 722 pgoff_t index; 723 724 BUG_ON(!PageLocked(page)); 725 mapping = page->mapping; 726 index = page->index; 727 inode = mapping->host; 728 info = SHMEM_I(inode); 729 if (info->flags & VM_LOCKED) 730 goto redirty; 731 if (!total_swap_pages) 732 goto redirty; 733 734 /* 735 * shmem_backing_dev_info's capabilities prevent regular writeback or 736 * sync from ever calling shmem_writepage; but a stacking filesystem 737 * might use ->writepage of its underlying filesystem, in which case 738 * tmpfs should write out to swap only in response to memory pressure, 739 * and not for the writeback threads or sync. 740 */ 741 if (!wbc->for_reclaim) { 742 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 743 goto redirty; 744 } 745 746 /* 747 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 748 * value into swapfile.c, the only way we can correctly account for a 749 * fallocated page arriving here is now to initialize it and write it. 750 * 751 * That's okay for a page already fallocated earlier, but if we have 752 * not yet completed the fallocation, then (a) we want to keep track 753 * of this page in case we have to undo it, and (b) it may not be a 754 * good idea to continue anyway, once we're pushing into swap. So 755 * reactivate the page, and let shmem_fallocate() quit when too many. 756 */ 757 if (!PageUptodate(page)) { 758 if (inode->i_private) { 759 struct shmem_falloc *shmem_falloc; 760 spin_lock(&inode->i_lock); 761 shmem_falloc = inode->i_private; 762 if (shmem_falloc && 763 !shmem_falloc->waitq && 764 index >= shmem_falloc->start && 765 index < shmem_falloc->next) 766 shmem_falloc->nr_unswapped++; 767 else 768 shmem_falloc = NULL; 769 spin_unlock(&inode->i_lock); 770 if (shmem_falloc) 771 goto redirty; 772 } 773 clear_highpage(page); 774 flush_dcache_page(page); 775 SetPageUptodate(page); 776 } 777 778 swap = get_swap_page(); 779 if (!swap.val) 780 goto redirty; 781 782 /* 783 * Add inode to shmem_unuse()'s list of swapped-out inodes, 784 * if it's not already there. Do it now before the page is 785 * moved to swap cache, when its pagelock no longer protects 786 * the inode from eviction. But don't unlock the mutex until 787 * we've incremented swapped, because shmem_unuse_inode() will 788 * prune a !swapped inode from the swaplist under this mutex. 789 */ 790 mutex_lock(&shmem_swaplist_mutex); 791 if (list_empty(&info->swaplist)) 792 list_add_tail(&info->swaplist, &shmem_swaplist); 793 794 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 795 swap_shmem_alloc(swap); 796 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 797 798 spin_lock(&info->lock); 799 info->swapped++; 800 shmem_recalc_inode(inode); 801 spin_unlock(&info->lock); 802 803 mutex_unlock(&shmem_swaplist_mutex); 804 BUG_ON(page_mapped(page)); 805 swap_writepage(page, wbc); 806 return 0; 807 } 808 809 mutex_unlock(&shmem_swaplist_mutex); 810 swapcache_free(swap, NULL); 811redirty: 812 set_page_dirty(page); 813 if (wbc->for_reclaim) 814 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 815 unlock_page(page); 816 return 0; 817} 818 819#ifdef CONFIG_NUMA 820#ifdef CONFIG_TMPFS 821static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 822{ 823 char buffer[64]; 824 825 if (!mpol || mpol->mode == MPOL_DEFAULT) 826 return; /* show nothing */ 827 828 mpol_to_str(buffer, sizeof(buffer), mpol); 829 830 seq_printf(seq, ",mpol=%s", buffer); 831} 832 833static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 834{ 835 struct mempolicy *mpol = NULL; 836 if (sbinfo->mpol) { 837 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 838 mpol = sbinfo->mpol; 839 mpol_get(mpol); 840 spin_unlock(&sbinfo->stat_lock); 841 } 842 return mpol; 843} 844#endif /* CONFIG_TMPFS */ 845 846static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 847 struct shmem_inode_info *info, pgoff_t index) 848{ 849 struct vm_area_struct pvma; 850 struct page *page; 851 852 /* Create a pseudo vma that just contains the policy */ 853 pvma.vm_start = 0; 854 /* Bias interleave by inode number to distribute better across nodes */ 855 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 856 pvma.vm_ops = NULL; 857 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 858 859 page = swapin_readahead(swap, gfp, &pvma, 0); 860 861 /* Drop reference taken by mpol_shared_policy_lookup() */ 862 mpol_cond_put(pvma.vm_policy); 863 864 return page; 865} 866 867static struct page *shmem_alloc_page(gfp_t gfp, 868 struct shmem_inode_info *info, pgoff_t index) 869{ 870 struct vm_area_struct pvma; 871 struct page *page; 872 873 /* Create a pseudo vma that just contains the policy */ 874 pvma.vm_start = 0; 875 /* Bias interleave by inode number to distribute better across nodes */ 876 pvma.vm_pgoff = index + info->vfs_inode.i_ino; 877 pvma.vm_ops = NULL; 878 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); 879 880 page = alloc_page_vma(gfp, &pvma, 0); 881 882 /* Drop reference taken by mpol_shared_policy_lookup() */ 883 mpol_cond_put(pvma.vm_policy); 884 885 return page; 886} 887#else /* !CONFIG_NUMA */ 888#ifdef CONFIG_TMPFS 889static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 890{ 891} 892#endif /* CONFIG_TMPFS */ 893 894static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 895 struct shmem_inode_info *info, pgoff_t index) 896{ 897 return swapin_readahead(swap, gfp, NULL, 0); 898} 899 900static inline struct page *shmem_alloc_page(gfp_t gfp, 901 struct shmem_inode_info *info, pgoff_t index) 902{ 903 return alloc_page(gfp); 904} 905#endif /* CONFIG_NUMA */ 906 907#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 908static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 909{ 910 return NULL; 911} 912#endif 913 914/* 915 * When a page is moved from swapcache to shmem filecache (either by the 916 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 917 * shmem_unuse_inode()), it may have been read in earlier from swap, in 918 * ignorance of the mapping it belongs to. If that mapping has special 919 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 920 * we may need to copy to a suitable page before moving to filecache. 921 * 922 * In a future release, this may well be extended to respect cpuset and 923 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 924 * but for now it is a simple matter of zone. 925 */ 926static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 927{ 928 return page_zonenum(page) > gfp_zone(gfp); 929} 930 931static int shmem_replace_page(struct page **pagep, gfp_t gfp, 932 struct shmem_inode_info *info, pgoff_t index) 933{ 934 struct page *oldpage, *newpage; 935 struct address_space *swap_mapping; 936 pgoff_t swap_index; 937 int error; 938 939 oldpage = *pagep; 940 swap_index = page_private(oldpage); 941 swap_mapping = page_mapping(oldpage); 942 943 /* 944 * We have arrived here because our zones are constrained, so don't 945 * limit chance of success by further cpuset and node constraints. 946 */ 947 gfp &= ~GFP_CONSTRAINT_MASK; 948 newpage = shmem_alloc_page(gfp, info, index); 949 if (!newpage) 950 return -ENOMEM; 951 952 page_cache_get(newpage); 953 copy_highpage(newpage, oldpage); 954 flush_dcache_page(newpage); 955 956 __set_page_locked(newpage); 957 SetPageUptodate(newpage); 958 SetPageSwapBacked(newpage); 959 set_page_private(newpage, swap_index); 960 SetPageSwapCache(newpage); 961 962 /* 963 * Our caller will very soon move newpage out of swapcache, but it's 964 * a nice clean interface for us to replace oldpage by newpage there. 965 */ 966 spin_lock_irq(&swap_mapping->tree_lock); 967 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 968 newpage); 969 if (!error) { 970 __inc_zone_page_state(newpage, NR_FILE_PAGES); 971 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 972 } 973 spin_unlock_irq(&swap_mapping->tree_lock); 974 975 if (unlikely(error)) { 976 /* 977 * Is this possible? I think not, now that our callers check 978 * both PageSwapCache and page_private after getting page lock; 979 * but be defensive. Reverse old to newpage for clear and free. 980 */ 981 oldpage = newpage; 982 } else { 983 mem_cgroup_replace_page_cache(oldpage, newpage); 984 lru_cache_add_anon(newpage); 985 *pagep = newpage; 986 } 987 988 ClearPageSwapCache(oldpage); 989 set_page_private(oldpage, 0); 990 991 unlock_page(oldpage); 992 page_cache_release(oldpage); 993 page_cache_release(oldpage); 994 return error; 995} 996 997/* 998 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 999 * 1000 * If we allocate a new one we do not mark it dirty. That's up to the 1001 * vm. If we swap it in we mark it dirty since we also free the swap 1002 * entry since a page cannot live in both the swap and page cache 1003 */ 1004static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1005 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type) 1006{ 1007 struct address_space *mapping = inode->i_mapping; 1008 struct shmem_inode_info *info; 1009 struct shmem_sb_info *sbinfo; 1010 struct page *page; 1011 swp_entry_t swap; 1012 int error; 1013 int once = 0; 1014 int alloced = 0; 1015 1016 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1017 return -EFBIG; 1018repeat: 1019 swap.val = 0; 1020 page = find_lock_entry(mapping, index); 1021 if (radix_tree_exceptional_entry(page)) { 1022 swap = radix_to_swp_entry(page); 1023 page = NULL; 1024 } 1025 1026 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1027 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1028 error = -EINVAL; 1029 goto failed; 1030 } 1031 1032 if (page && sgp == SGP_WRITE) 1033 mark_page_accessed(page); 1034 1035 /* fallocated page? */ 1036 if (page && !PageUptodate(page)) { 1037 if (sgp != SGP_READ) 1038 goto clear; 1039 unlock_page(page); 1040 page_cache_release(page); 1041 page = NULL; 1042 } 1043 if (page || (sgp == SGP_READ && !swap.val)) { 1044 *pagep = page; 1045 return 0; 1046 } 1047 1048 /* 1049 * Fast cache lookup did not find it: 1050 * bring it back from swap or allocate. 1051 */ 1052 info = SHMEM_I(inode); 1053 sbinfo = SHMEM_SB(inode->i_sb); 1054 1055 if (swap.val) { 1056 /* Look it up and read it in.. */ 1057 page = lookup_swap_cache(swap); 1058 if (!page) { 1059 /* here we actually do the io */ 1060 if (fault_type) 1061 *fault_type |= VM_FAULT_MAJOR; 1062 page = shmem_swapin(swap, gfp, info, index); 1063 if (!page) { 1064 error = -ENOMEM; 1065 goto failed; 1066 } 1067 } 1068 1069 /* We have to do this with page locked to prevent races */ 1070 lock_page(page); 1071 if (!PageSwapCache(page) || page_private(page) != swap.val || 1072 !shmem_confirm_swap(mapping, index, swap)) { 1073 error = -EEXIST; /* try again */ 1074 goto unlock; 1075 } 1076 if (!PageUptodate(page)) { 1077 error = -EIO; 1078 goto failed; 1079 } 1080 wait_on_page_writeback(page); 1081 1082 if (shmem_should_replace_page(page, gfp)) { 1083 error = shmem_replace_page(&page, gfp, info, index); 1084 if (error) 1085 goto failed; 1086 } 1087 1088 error = mem_cgroup_charge_file(page, current->mm, 1089 gfp & GFP_RECLAIM_MASK); 1090 if (!error) { 1091 error = shmem_add_to_page_cache(page, mapping, index, 1092 gfp, swp_to_radix_entry(swap)); 1093 /* 1094 * We already confirmed swap under page lock, and make 1095 * no memory allocation here, so usually no possibility 1096 * of error; but free_swap_and_cache() only trylocks a 1097 * page, so it is just possible that the entry has been 1098 * truncated or holepunched since swap was confirmed. 1099 * shmem_undo_range() will have done some of the 1100 * unaccounting, now delete_from_swap_cache() will do 1101 * the rest (including mem_cgroup_uncharge_swapcache). 1102 * Reset swap.val? No, leave it so "failed" goes back to 1103 * "repeat": reading a hole and writing should succeed. 1104 */ 1105 if (error) 1106 delete_from_swap_cache(page); 1107 } 1108 if (error) 1109 goto failed; 1110 1111 spin_lock(&info->lock); 1112 info->swapped--; 1113 shmem_recalc_inode(inode); 1114 spin_unlock(&info->lock); 1115 1116 if (sgp == SGP_WRITE) 1117 mark_page_accessed(page); 1118 1119 delete_from_swap_cache(page); 1120 set_page_dirty(page); 1121 swap_free(swap); 1122 1123 } else { 1124 if (shmem_acct_block(info->flags)) { 1125 error = -ENOSPC; 1126 goto failed; 1127 } 1128 if (sbinfo->max_blocks) { 1129 if (percpu_counter_compare(&sbinfo->used_blocks, 1130 sbinfo->max_blocks) >= 0) { 1131 error = -ENOSPC; 1132 goto unacct; 1133 } 1134 percpu_counter_inc(&sbinfo->used_blocks); 1135 } 1136 1137 page = shmem_alloc_page(gfp, info, index); 1138 if (!page) { 1139 error = -ENOMEM; 1140 goto decused; 1141 } 1142 1143 __SetPageSwapBacked(page); 1144 __set_page_locked(page); 1145 if (sgp == SGP_WRITE) 1146 init_page_accessed(page); 1147 1148 error = mem_cgroup_charge_file(page, current->mm, 1149 gfp & GFP_RECLAIM_MASK); 1150 if (error) 1151 goto decused; 1152 error = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 1153 if (!error) { 1154 error = shmem_add_to_page_cache(page, mapping, index, 1155 gfp, NULL); 1156 radix_tree_preload_end(); 1157 } 1158 if (error) { 1159 mem_cgroup_uncharge_cache_page(page); 1160 goto decused; 1161 } 1162 lru_cache_add_anon(page); 1163 1164 spin_lock(&info->lock); 1165 info->alloced++; 1166 inode->i_blocks += BLOCKS_PER_PAGE; 1167 shmem_recalc_inode(inode); 1168 spin_unlock(&info->lock); 1169 alloced = true; 1170 1171 /* 1172 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 1173 */ 1174 if (sgp == SGP_FALLOC) 1175 sgp = SGP_WRITE; 1176clear: 1177 /* 1178 * Let SGP_WRITE caller clear ends if write does not fill page; 1179 * but SGP_FALLOC on a page fallocated earlier must initialize 1180 * it now, lest undo on failure cancel our earlier guarantee. 1181 */ 1182 if (sgp != SGP_WRITE) { 1183 clear_highpage(page); 1184 flush_dcache_page(page); 1185 SetPageUptodate(page); 1186 } 1187 if (sgp == SGP_DIRTY) 1188 set_page_dirty(page); 1189 } 1190 1191 /* Perhaps the file has been truncated since we checked */ 1192 if (sgp != SGP_WRITE && sgp != SGP_FALLOC && 1193 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1194 error = -EINVAL; 1195 if (alloced) 1196 goto trunc; 1197 else 1198 goto failed; 1199 } 1200 *pagep = page; 1201 return 0; 1202 1203 /* 1204 * Error recovery. 1205 */ 1206trunc: 1207 info = SHMEM_I(inode); 1208 ClearPageDirty(page); 1209 delete_from_page_cache(page); 1210 spin_lock(&info->lock); 1211 info->alloced--; 1212 inode->i_blocks -= BLOCKS_PER_PAGE; 1213 spin_unlock(&info->lock); 1214decused: 1215 sbinfo = SHMEM_SB(inode->i_sb); 1216 if (sbinfo->max_blocks) 1217 percpu_counter_add(&sbinfo->used_blocks, -1); 1218unacct: 1219 shmem_unacct_blocks(info->flags, 1); 1220failed: 1221 if (swap.val && error != -EINVAL && 1222 !shmem_confirm_swap(mapping, index, swap)) 1223 error = -EEXIST; 1224unlock: 1225 if (page) { 1226 unlock_page(page); 1227 page_cache_release(page); 1228 } 1229 if (error == -ENOSPC && !once++) { 1230 info = SHMEM_I(inode); 1231 spin_lock(&info->lock); 1232 shmem_recalc_inode(inode); 1233 spin_unlock(&info->lock); 1234 goto repeat; 1235 } 1236 if (error == -EEXIST) /* from above or from radix_tree_insert */ 1237 goto repeat; 1238 return error; 1239} 1240 1241static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1242{ 1243 struct inode *inode = file_inode(vma->vm_file); 1244 int error; 1245 int ret = VM_FAULT_LOCKED; 1246 1247 /* 1248 * Trinity finds that probing a hole which tmpfs is punching can 1249 * prevent the hole-punch from ever completing: which in turn 1250 * locks writers out with its hold on i_mutex. So refrain from 1251 * faulting pages into the hole while it's being punched. Although 1252 * shmem_undo_range() does remove the additions, it may be unable to 1253 * keep up, as each new page needs its own unmap_mapping_range() call, 1254 * and the i_mmap tree grows ever slower to scan if new vmas are added. 1255 * 1256 * It does not matter if we sometimes reach this check just before the 1257 * hole-punch begins, so that one fault then races with the punch: 1258 * we just need to make racing faults a rare case. 1259 * 1260 * The implementation below would be much simpler if we just used a 1261 * standard mutex or completion: but we cannot take i_mutex in fault, 1262 * and bloating every shmem inode for this unlikely case would be sad. 1263 */ 1264 if (unlikely(inode->i_private)) { 1265 struct shmem_falloc *shmem_falloc; 1266 1267 spin_lock(&inode->i_lock); 1268 shmem_falloc = inode->i_private; 1269 if (shmem_falloc && 1270 shmem_falloc->waitq && 1271 vmf->pgoff >= shmem_falloc->start && 1272 vmf->pgoff < shmem_falloc->next) { 1273 wait_queue_head_t *shmem_falloc_waitq; 1274 DEFINE_WAIT(shmem_fault_wait); 1275 1276 ret = VM_FAULT_NOPAGE; 1277 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && 1278 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { 1279 /* It's polite to up mmap_sem if we can */ 1280 up_read(&vma->vm_mm->mmap_sem); 1281 ret = VM_FAULT_RETRY; 1282 } 1283 1284 shmem_falloc_waitq = shmem_falloc->waitq; 1285 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 1286 TASK_UNINTERRUPTIBLE); 1287 spin_unlock(&inode->i_lock); 1288 schedule(); 1289 1290 /* 1291 * shmem_falloc_waitq points into the shmem_fallocate() 1292 * stack of the hole-punching task: shmem_falloc_waitq 1293 * is usually invalid by the time we reach here, but 1294 * finish_wait() does not dereference it in that case; 1295 * though i_lock needed lest racing with wake_up_all(). 1296 */ 1297 spin_lock(&inode->i_lock); 1298 finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 1299 spin_unlock(&inode->i_lock); 1300 return ret; 1301 } 1302 spin_unlock(&inode->i_lock); 1303 } 1304 1305 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1306 if (error) 1307 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1308 1309 if (ret & VM_FAULT_MAJOR) { 1310 count_vm_event(PGMAJFAULT); 1311 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); 1312 } 1313 return ret; 1314} 1315 1316#ifdef CONFIG_NUMA 1317static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 1318{ 1319 struct inode *inode = file_inode(vma->vm_file); 1320 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 1321} 1322 1323static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1324 unsigned long addr) 1325{ 1326 struct inode *inode = file_inode(vma->vm_file); 1327 pgoff_t index; 1328 1329 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1330 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 1331} 1332#endif 1333 1334int shmem_lock(struct file *file, int lock, struct user_struct *user) 1335{ 1336 struct inode *inode = file_inode(file); 1337 struct shmem_inode_info *info = SHMEM_I(inode); 1338 int retval = -ENOMEM; 1339 1340 spin_lock(&info->lock); 1341 if (lock && !(info->flags & VM_LOCKED)) { 1342 if (!user_shm_lock(inode->i_size, user)) 1343 goto out_nomem; 1344 info->flags |= VM_LOCKED; 1345 mapping_set_unevictable(file->f_mapping); 1346 } 1347 if (!lock && (info->flags & VM_LOCKED) && user) { 1348 user_shm_unlock(inode->i_size, user); 1349 info->flags &= ~VM_LOCKED; 1350 mapping_clear_unevictable(file->f_mapping); 1351 } 1352 retval = 0; 1353 1354out_nomem: 1355 spin_unlock(&info->lock); 1356 return retval; 1357} 1358 1359static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1360{ 1361 file_accessed(file); 1362 vma->vm_ops = &shmem_vm_ops; 1363 return 0; 1364} 1365 1366static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 1367 umode_t mode, dev_t dev, unsigned long flags) 1368{ 1369 struct inode *inode; 1370 struct shmem_inode_info *info; 1371 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1372 1373 if (shmem_reserve_inode(sb)) 1374 return NULL; 1375 1376 inode = new_inode(sb); 1377 if (inode) { 1378 inode->i_ino = get_next_ino(); 1379 inode_init_owner(inode, dir, mode); 1380 inode->i_blocks = 0; 1381 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1382 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1383 inode->i_generation = get_seconds(); 1384 info = SHMEM_I(inode); 1385 memset(info, 0, (char *)inode - (char *)info); 1386 spin_lock_init(&info->lock); 1387 info->flags = flags & VM_NORESERVE; 1388 INIT_LIST_HEAD(&info->swaplist); 1389 simple_xattrs_init(&info->xattrs); 1390 cache_no_acl(inode); 1391 1392 switch (mode & S_IFMT) { 1393 default: 1394 inode->i_op = &shmem_special_inode_operations; 1395 init_special_inode(inode, mode, dev); 1396 break; 1397 case S_IFREG: 1398 inode->i_mapping->a_ops = &shmem_aops; 1399 inode->i_op = &shmem_inode_operations; 1400 inode->i_fop = &shmem_file_operations; 1401 mpol_shared_policy_init(&info->policy, 1402 shmem_get_sbmpol(sbinfo)); 1403 break; 1404 case S_IFDIR: 1405 inc_nlink(inode); 1406 /* Some things misbehave if size == 0 on a directory */ 1407 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1408 inode->i_op = &shmem_dir_inode_operations; 1409 inode->i_fop = &simple_dir_operations; 1410 break; 1411 case S_IFLNK: 1412 /* 1413 * Must not load anything in the rbtree, 1414 * mpol_free_shared_policy will not be called. 1415 */ 1416 mpol_shared_policy_init(&info->policy, NULL); 1417 break; 1418 } 1419 } else 1420 shmem_free_inode(sb); 1421 return inode; 1422} 1423 1424bool shmem_mapping(struct address_space *mapping) 1425{ 1426 return mapping->backing_dev_info == &shmem_backing_dev_info; 1427} 1428 1429#ifdef CONFIG_TMPFS 1430static const struct inode_operations shmem_symlink_inode_operations; 1431static const struct inode_operations shmem_short_symlink_operations; 1432 1433#ifdef CONFIG_TMPFS_XATTR 1434static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 1435#else 1436#define shmem_initxattrs NULL 1437#endif 1438 1439static int 1440shmem_write_begin(struct file *file, struct address_space *mapping, 1441 loff_t pos, unsigned len, unsigned flags, 1442 struct page **pagep, void **fsdata) 1443{ 1444 struct inode *inode = mapping->host; 1445 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1446 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1447} 1448 1449static int 1450shmem_write_end(struct file *file, struct address_space *mapping, 1451 loff_t pos, unsigned len, unsigned copied, 1452 struct page *page, void *fsdata) 1453{ 1454 struct inode *inode = mapping->host; 1455 1456 if (pos + copied > inode->i_size) 1457 i_size_write(inode, pos + copied); 1458 1459 if (!PageUptodate(page)) { 1460 if (copied < PAGE_CACHE_SIZE) { 1461 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1462 zero_user_segments(page, 0, from, 1463 from + copied, PAGE_CACHE_SIZE); 1464 } 1465 SetPageUptodate(page); 1466 } 1467 set_page_dirty(page); 1468 unlock_page(page); 1469 page_cache_release(page); 1470 1471 return copied; 1472} 1473 1474static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 1475{ 1476 struct file *file = iocb->ki_filp; 1477 struct inode *inode = file_inode(file); 1478 struct address_space *mapping = inode->i_mapping; 1479 pgoff_t index; 1480 unsigned long offset; 1481 enum sgp_type sgp = SGP_READ; 1482 int error = 0; 1483 ssize_t retval = 0; 1484 loff_t *ppos = &iocb->ki_pos; 1485 1486 /* 1487 * Might this read be for a stacking filesystem? Then when reading 1488 * holes of a sparse file, we actually need to allocate those pages, 1489 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1490 */ 1491 if (segment_eq(get_fs(), KERNEL_DS)) 1492 sgp = SGP_DIRTY; 1493 1494 index = *ppos >> PAGE_CACHE_SHIFT; 1495 offset = *ppos & ~PAGE_CACHE_MASK; 1496 1497 for (;;) { 1498 struct page *page = NULL; 1499 pgoff_t end_index; 1500 unsigned long nr, ret; 1501 loff_t i_size = i_size_read(inode); 1502 1503 end_index = i_size >> PAGE_CACHE_SHIFT; 1504 if (index > end_index) 1505 break; 1506 if (index == end_index) { 1507 nr = i_size & ~PAGE_CACHE_MASK; 1508 if (nr <= offset) 1509 break; 1510 } 1511 1512 error = shmem_getpage(inode, index, &page, sgp, NULL); 1513 if (error) { 1514 if (error == -EINVAL) 1515 error = 0; 1516 break; 1517 } 1518 if (page) 1519 unlock_page(page); 1520 1521 /* 1522 * We must evaluate after, since reads (unlike writes) 1523 * are called without i_mutex protection against truncate 1524 */ 1525 nr = PAGE_CACHE_SIZE; 1526 i_size = i_size_read(inode); 1527 end_index = i_size >> PAGE_CACHE_SHIFT; 1528 if (index == end_index) { 1529 nr = i_size & ~PAGE_CACHE_MASK; 1530 if (nr <= offset) { 1531 if (page) 1532 page_cache_release(page); 1533 break; 1534 } 1535 } 1536 nr -= offset; 1537 1538 if (page) { 1539 /* 1540 * If users can be writing to this page using arbitrary 1541 * virtual addresses, take care about potential aliasing 1542 * before reading the page on the kernel side. 1543 */ 1544 if (mapping_writably_mapped(mapping)) 1545 flush_dcache_page(page); 1546 /* 1547 * Mark the page accessed if we read the beginning. 1548 */ 1549 if (!offset) 1550 mark_page_accessed(page); 1551 } else { 1552 page = ZERO_PAGE(0); 1553 page_cache_get(page); 1554 } 1555 1556 /* 1557 * Ok, we have the page, and it's up-to-date, so 1558 * now we can copy it to user space... 1559 */ 1560 ret = copy_page_to_iter(page, offset, nr, to); 1561 retval += ret; 1562 offset += ret; 1563 index += offset >> PAGE_CACHE_SHIFT; 1564 offset &= ~PAGE_CACHE_MASK; 1565 1566 page_cache_release(page); 1567 if (!iov_iter_count(to)) 1568 break; 1569 if (ret < nr) { 1570 error = -EFAULT; 1571 break; 1572 } 1573 cond_resched(); 1574 } 1575 1576 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1577 file_accessed(file); 1578 return retval ? retval : error; 1579} 1580 1581static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 1582 struct pipe_inode_info *pipe, size_t len, 1583 unsigned int flags) 1584{ 1585 struct address_space *mapping = in->f_mapping; 1586 struct inode *inode = mapping->host; 1587 unsigned int loff, nr_pages, req_pages; 1588 struct page *pages[PIPE_DEF_BUFFERS]; 1589 struct partial_page partial[PIPE_DEF_BUFFERS]; 1590 struct page *page; 1591 pgoff_t index, end_index; 1592 loff_t isize, left; 1593 int error, page_nr; 1594 struct splice_pipe_desc spd = { 1595 .pages = pages, 1596 .partial = partial, 1597 .nr_pages_max = PIPE_DEF_BUFFERS, 1598 .flags = flags, 1599 .ops = &page_cache_pipe_buf_ops, 1600 .spd_release = spd_release_page, 1601 }; 1602 1603 isize = i_size_read(inode); 1604 if (unlikely(*ppos >= isize)) 1605 return 0; 1606 1607 left = isize - *ppos; 1608 if (unlikely(left < len)) 1609 len = left; 1610 1611 if (splice_grow_spd(pipe, &spd)) 1612 return -ENOMEM; 1613 1614 index = *ppos >> PAGE_CACHE_SHIFT; 1615 loff = *ppos & ~PAGE_CACHE_MASK; 1616 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1617 nr_pages = min(req_pages, spd.nr_pages_max); 1618 1619 spd.nr_pages = find_get_pages_contig(mapping, index, 1620 nr_pages, spd.pages); 1621 index += spd.nr_pages; 1622 error = 0; 1623 1624 while (spd.nr_pages < nr_pages) { 1625 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); 1626 if (error) 1627 break; 1628 unlock_page(page); 1629 spd.pages[spd.nr_pages++] = page; 1630 index++; 1631 } 1632 1633 index = *ppos >> PAGE_CACHE_SHIFT; 1634 nr_pages = spd.nr_pages; 1635 spd.nr_pages = 0; 1636 1637 for (page_nr = 0; page_nr < nr_pages; page_nr++) { 1638 unsigned int this_len; 1639 1640 if (!len) 1641 break; 1642 1643 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); 1644 page = spd.pages[page_nr]; 1645 1646 if (!PageUptodate(page) || page->mapping != mapping) { 1647 error = shmem_getpage(inode, index, &page, 1648 SGP_CACHE, NULL); 1649 if (error) 1650 break; 1651 unlock_page(page); 1652 page_cache_release(spd.pages[page_nr]); 1653 spd.pages[page_nr] = page; 1654 } 1655 1656 isize = i_size_read(inode); 1657 end_index = (isize - 1) >> PAGE_CACHE_SHIFT; 1658 if (unlikely(!isize || index > end_index)) 1659 break; 1660 1661 if (end_index == index) { 1662 unsigned int plen; 1663 1664 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; 1665 if (plen <= loff) 1666 break; 1667 1668 this_len = min(this_len, plen - loff); 1669 len = this_len; 1670 } 1671 1672 spd.partial[page_nr].offset = loff; 1673 spd.partial[page_nr].len = this_len; 1674 len -= this_len; 1675 loff = 0; 1676 spd.nr_pages++; 1677 index++; 1678 } 1679 1680 while (page_nr < nr_pages) 1681 page_cache_release(spd.pages[page_nr++]); 1682 1683 if (spd.nr_pages) 1684 error = splice_to_pipe(pipe, &spd); 1685 1686 splice_shrink_spd(&spd); 1687 1688 if (error > 0) { 1689 *ppos += error; 1690 file_accessed(in); 1691 } 1692 return error; 1693} 1694 1695/* 1696 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. 1697 */ 1698static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 1699 pgoff_t index, pgoff_t end, int whence) 1700{ 1701 struct page *page; 1702 struct pagevec pvec; 1703 pgoff_t indices[PAGEVEC_SIZE]; 1704 bool done = false; 1705 int i; 1706 1707 pagevec_init(&pvec, 0); 1708 pvec.nr = 1; /* start small: we may be there already */ 1709 while (!done) { 1710 pvec.nr = find_get_entries(mapping, index, 1711 pvec.nr, pvec.pages, indices); 1712 if (!pvec.nr) { 1713 if (whence == SEEK_DATA) 1714 index = end; 1715 break; 1716 } 1717 for (i = 0; i < pvec.nr; i++, index++) { 1718 if (index < indices[i]) { 1719 if (whence == SEEK_HOLE) { 1720 done = true; 1721 break; 1722 } 1723 index = indices[i]; 1724 } 1725 page = pvec.pages[i]; 1726 if (page && !radix_tree_exceptional_entry(page)) { 1727 if (!PageUptodate(page)) 1728 page = NULL; 1729 } 1730 if (index >= end || 1731 (page && whence == SEEK_DATA) || 1732 (!page && whence == SEEK_HOLE)) { 1733 done = true; 1734 break; 1735 } 1736 } 1737 pagevec_remove_exceptionals(&pvec); 1738 pagevec_release(&pvec); 1739 pvec.nr = PAGEVEC_SIZE; 1740 cond_resched(); 1741 } 1742 return index; 1743} 1744 1745static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 1746{ 1747 struct address_space *mapping = file->f_mapping; 1748 struct inode *inode = mapping->host; 1749 pgoff_t start, end; 1750 loff_t new_offset; 1751 1752 if (whence != SEEK_DATA && whence != SEEK_HOLE) 1753 return generic_file_llseek_size(file, offset, whence, 1754 MAX_LFS_FILESIZE, i_size_read(inode)); 1755 mutex_lock(&inode->i_mutex); 1756 /* We're holding i_mutex so we can access i_size directly */ 1757 1758 if (offset < 0) 1759 offset = -EINVAL; 1760 else if (offset >= inode->i_size) 1761 offset = -ENXIO; 1762 else { 1763 start = offset >> PAGE_CACHE_SHIFT; 1764 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1765 new_offset = shmem_seek_hole_data(mapping, start, end, whence); 1766 new_offset <<= PAGE_CACHE_SHIFT; 1767 if (new_offset > offset) { 1768 if (new_offset < inode->i_size) 1769 offset = new_offset; 1770 else if (whence == SEEK_DATA) 1771 offset = -ENXIO; 1772 else 1773 offset = inode->i_size; 1774 } 1775 } 1776 1777 if (offset >= 0) 1778 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 1779 mutex_unlock(&inode->i_mutex); 1780 return offset; 1781} 1782 1783static long shmem_fallocate(struct file *file, int mode, loff_t offset, 1784 loff_t len) 1785{ 1786 struct inode *inode = file_inode(file); 1787 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1788 struct shmem_falloc shmem_falloc; 1789 pgoff_t start, index, end; 1790 int error; 1791 1792 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 1793 return -EOPNOTSUPP; 1794 1795 mutex_lock(&inode->i_mutex); 1796 1797 if (mode & FALLOC_FL_PUNCH_HOLE) { 1798 struct address_space *mapping = file->f_mapping; 1799 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1800 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1801 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 1802 1803 shmem_falloc.waitq = &shmem_falloc_waitq; 1804 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 1805 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 1806 spin_lock(&inode->i_lock); 1807 inode->i_private = &shmem_falloc; 1808 spin_unlock(&inode->i_lock); 1809 1810 if ((u64)unmap_end > (u64)unmap_start) 1811 unmap_mapping_range(mapping, unmap_start, 1812 1 + unmap_end - unmap_start, 0); 1813 shmem_truncate_range(inode, offset, offset + len - 1); 1814 /* No need to unmap again: hole-punching leaves COWed pages */ 1815 1816 spin_lock(&inode->i_lock); 1817 inode->i_private = NULL; 1818 wake_up_all(&shmem_falloc_waitq); 1819 spin_unlock(&inode->i_lock); 1820 error = 0; 1821 goto out; 1822 } 1823 1824 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1825 error = inode_newsize_ok(inode, offset + len); 1826 if (error) 1827 goto out; 1828 1829 start = offset >> PAGE_CACHE_SHIFT; 1830 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 1831 /* Try to avoid a swapstorm if len is impossible to satisfy */ 1832 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 1833 error = -ENOSPC; 1834 goto out; 1835 } 1836 1837 shmem_falloc.waitq = NULL; 1838 shmem_falloc.start = start; 1839 shmem_falloc.next = start; 1840 shmem_falloc.nr_falloced = 0; 1841 shmem_falloc.nr_unswapped = 0; 1842 spin_lock(&inode->i_lock); 1843 inode->i_private = &shmem_falloc; 1844 spin_unlock(&inode->i_lock); 1845 1846 for (index = start; index < end; index++) { 1847 struct page *page; 1848 1849 /* 1850 * Good, the fallocate(2) manpage permits EINTR: we may have 1851 * been interrupted because we are using up too much memory. 1852 */ 1853 if (signal_pending(current)) 1854 error = -EINTR; 1855 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 1856 error = -ENOMEM; 1857 else 1858 error = shmem_getpage(inode, index, &page, SGP_FALLOC, 1859 NULL); 1860 if (error) { 1861 /* Remove the !PageUptodate pages we added */ 1862 shmem_undo_range(inode, 1863 (loff_t)start << PAGE_CACHE_SHIFT, 1864 (loff_t)index << PAGE_CACHE_SHIFT, true); 1865 goto undone; 1866 } 1867 1868 /* 1869 * Inform shmem_writepage() how far we have reached. 1870 * No need for lock or barrier: we have the page lock. 1871 */ 1872 shmem_falloc.next++; 1873 if (!PageUptodate(page)) 1874 shmem_falloc.nr_falloced++; 1875 1876 /* 1877 * If !PageUptodate, leave it that way so that freeable pages 1878 * can be recognized if we need to rollback on error later. 1879 * But set_page_dirty so that memory pressure will swap rather 1880 * than free the pages we are allocating (and SGP_CACHE pages 1881 * might still be clean: we now need to mark those dirty too). 1882 */ 1883 set_page_dirty(page); 1884 unlock_page(page); 1885 page_cache_release(page); 1886 cond_resched(); 1887 } 1888 1889 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 1890 i_size_write(inode, offset + len); 1891 inode->i_ctime = CURRENT_TIME; 1892undone: 1893 spin_lock(&inode->i_lock); 1894 inode->i_private = NULL; 1895 spin_unlock(&inode->i_lock); 1896out: 1897 mutex_unlock(&inode->i_mutex); 1898 return error; 1899} 1900 1901static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1902{ 1903 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1904 1905 buf->f_type = TMPFS_MAGIC; 1906 buf->f_bsize = PAGE_CACHE_SIZE; 1907 buf->f_namelen = NAME_MAX; 1908 if (sbinfo->max_blocks) { 1909 buf->f_blocks = sbinfo->max_blocks; 1910 buf->f_bavail = 1911 buf->f_bfree = sbinfo->max_blocks - 1912 percpu_counter_sum(&sbinfo->used_blocks); 1913 } 1914 if (sbinfo->max_inodes) { 1915 buf->f_files = sbinfo->max_inodes; 1916 buf->f_ffree = sbinfo->free_inodes; 1917 } 1918 /* else leave those fields 0 like simple_statfs */ 1919 return 0; 1920} 1921 1922/* 1923 * File creation. Allocate an inode, and we're done.. 1924 */ 1925static int 1926shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 1927{ 1928 struct inode *inode; 1929 int error = -ENOSPC; 1930 1931 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 1932 if (inode) { 1933 error = simple_acl_create(dir, inode); 1934 if (error) 1935 goto out_iput; 1936 error = security_inode_init_security(inode, dir, 1937 &dentry->d_name, 1938 shmem_initxattrs, NULL); 1939 if (error && error != -EOPNOTSUPP) 1940 goto out_iput; 1941 1942 error = 0; 1943 dir->i_size += BOGO_DIRENT_SIZE; 1944 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1945 d_instantiate(dentry, inode); 1946 dget(dentry); /* Extra count - pin the dentry in core */ 1947 } 1948 return error; 1949out_iput: 1950 iput(inode); 1951 return error; 1952} 1953 1954static int 1955shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 1956{ 1957 struct inode *inode; 1958 int error = -ENOSPC; 1959 1960 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 1961 if (inode) { 1962 error = security_inode_init_security(inode, dir, 1963 NULL, 1964 shmem_initxattrs, NULL); 1965 if (error && error != -EOPNOTSUPP) 1966 goto out_iput; 1967 error = simple_acl_create(dir, inode); 1968 if (error) 1969 goto out_iput; 1970 d_tmpfile(dentry, inode); 1971 } 1972 return error; 1973out_iput: 1974 iput(inode); 1975 return error; 1976} 1977 1978static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 1979{ 1980 int error; 1981 1982 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1983 return error; 1984 inc_nlink(dir); 1985 return 0; 1986} 1987 1988static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 1989 bool excl) 1990{ 1991 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1992} 1993 1994/* 1995 * Link a file.. 1996 */ 1997static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1998{ 1999 struct inode *inode = old_dentry->d_inode; 2000 int ret; 2001 2002 /* 2003 * No ordinary (disk based) filesystem counts links as inodes; 2004 * but each new link needs a new dentry, pinning lowmem, and 2005 * tmpfs dentries cannot be pruned until they are unlinked. 2006 */ 2007 ret = shmem_reserve_inode(inode->i_sb); 2008 if (ret) 2009 goto out; 2010 2011 dir->i_size += BOGO_DIRENT_SIZE; 2012 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2013 inc_nlink(inode); 2014 ihold(inode); /* New dentry reference */ 2015 dget(dentry); /* Extra pinning count for the created dentry */ 2016 d_instantiate(dentry, inode); 2017out: 2018 return ret; 2019} 2020 2021static int shmem_unlink(struct inode *dir, struct dentry *dentry) 2022{ 2023 struct inode *inode = dentry->d_inode; 2024 2025 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 2026 shmem_free_inode(inode->i_sb); 2027 2028 dir->i_size -= BOGO_DIRENT_SIZE; 2029 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2030 drop_nlink(inode); 2031 dput(dentry); /* Undo the count from "create" - this does all the work */ 2032 return 0; 2033} 2034 2035static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 2036{ 2037 if (!simple_empty(dentry)) 2038 return -ENOTEMPTY; 2039 2040 drop_nlink(dentry->d_inode); 2041 drop_nlink(dir); 2042 return shmem_unlink(dir, dentry); 2043} 2044 2045/* 2046 * The VFS layer already does all the dentry stuff for rename, 2047 * we just have to decrement the usage count for the target if 2048 * it exists so that the VFS layer correctly free's it when it 2049 * gets overwritten. 2050 */ 2051static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 2052{ 2053 struct inode *inode = old_dentry->d_inode; 2054 int they_are_dirs = S_ISDIR(inode->i_mode); 2055 2056 if (!simple_empty(new_dentry)) 2057 return -ENOTEMPTY; 2058 2059 if (new_dentry->d_inode) { 2060 (void) shmem_unlink(new_dir, new_dentry); 2061 if (they_are_dirs) 2062 drop_nlink(old_dir); 2063 } else if (they_are_dirs) { 2064 drop_nlink(old_dir); 2065 inc_nlink(new_dir); 2066 } 2067 2068 old_dir->i_size -= BOGO_DIRENT_SIZE; 2069 new_dir->i_size += BOGO_DIRENT_SIZE; 2070 old_dir->i_ctime = old_dir->i_mtime = 2071 new_dir->i_ctime = new_dir->i_mtime = 2072 inode->i_ctime = CURRENT_TIME; 2073 return 0; 2074} 2075 2076static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 2077{ 2078 int error; 2079 int len; 2080 struct inode *inode; 2081 struct page *page; 2082 char *kaddr; 2083 struct shmem_inode_info *info; 2084 2085 len = strlen(symname) + 1; 2086 if (len > PAGE_CACHE_SIZE) 2087 return -ENAMETOOLONG; 2088 2089 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 2090 if (!inode) 2091 return -ENOSPC; 2092 2093 error = security_inode_init_security(inode, dir, &dentry->d_name, 2094 shmem_initxattrs, NULL); 2095 if (error) { 2096 if (error != -EOPNOTSUPP) { 2097 iput(inode); 2098 return error; 2099 } 2100 error = 0; 2101 } 2102 2103 info = SHMEM_I(inode); 2104 inode->i_size = len-1; 2105 if (len <= SHORT_SYMLINK_LEN) { 2106 info->symlink = kmemdup(symname, len, GFP_KERNEL); 2107 if (!info->symlink) { 2108 iput(inode); 2109 return -ENOMEM; 2110 } 2111 inode->i_op = &shmem_short_symlink_operations; 2112 } else { 2113 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 2114 if (error) { 2115 iput(inode); 2116 return error; 2117 } 2118 inode->i_mapping->a_ops = &shmem_aops; 2119 inode->i_op = &shmem_symlink_inode_operations; 2120 kaddr = kmap_atomic(page); 2121 memcpy(kaddr, symname, len); 2122 kunmap_atomic(kaddr); 2123 SetPageUptodate(page); 2124 set_page_dirty(page); 2125 unlock_page(page); 2126 page_cache_release(page); 2127 } 2128 dir->i_size += BOGO_DIRENT_SIZE; 2129 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 2130 d_instantiate(dentry, inode); 2131 dget(dentry); 2132 return 0; 2133} 2134 2135static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd) 2136{ 2137 nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink); 2138 return NULL; 2139} 2140 2141static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2142{ 2143 struct page *page = NULL; 2144 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2145 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page)); 2146 if (page) 2147 unlock_page(page); 2148 return page; 2149} 2150 2151static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2152{ 2153 if (!IS_ERR(nd_get_link(nd))) { 2154 struct page *page = cookie; 2155 kunmap(page); 2156 mark_page_accessed(page); 2157 page_cache_release(page); 2158 } 2159} 2160 2161#ifdef CONFIG_TMPFS_XATTR 2162/* 2163 * Superblocks without xattr inode operations may get some security.* xattr 2164 * support from the LSM "for free". As soon as we have any other xattrs 2165 * like ACLs, we also need to implement the security.* handlers at 2166 * filesystem level, though. 2167 */ 2168 2169/* 2170 * Callback for security_inode_init_security() for acquiring xattrs. 2171 */ 2172static int shmem_initxattrs(struct inode *inode, 2173 const struct xattr *xattr_array, 2174 void *fs_info) 2175{ 2176 struct shmem_inode_info *info = SHMEM_I(inode); 2177 const struct xattr *xattr; 2178 struct simple_xattr *new_xattr; 2179 size_t len; 2180 2181 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 2182 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 2183 if (!new_xattr) 2184 return -ENOMEM; 2185 2186 len = strlen(xattr->name) + 1; 2187 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 2188 GFP_KERNEL); 2189 if (!new_xattr->name) { 2190 kfree(new_xattr); 2191 return -ENOMEM; 2192 } 2193 2194 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 2195 XATTR_SECURITY_PREFIX_LEN); 2196 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 2197 xattr->name, len); 2198 2199 simple_xattr_list_add(&info->xattrs, new_xattr); 2200 } 2201 2202 return 0; 2203} 2204 2205static const struct xattr_handler *shmem_xattr_handlers[] = { 2206#ifdef CONFIG_TMPFS_POSIX_ACL 2207 &posix_acl_access_xattr_handler, 2208 &posix_acl_default_xattr_handler, 2209#endif 2210 NULL 2211}; 2212 2213static int shmem_xattr_validate(const char *name) 2214{ 2215 struct { const char *prefix; size_t len; } arr[] = { 2216 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, 2217 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } 2218 }; 2219 int i; 2220 2221 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2222 size_t preflen = arr[i].len; 2223 if (strncmp(name, arr[i].prefix, preflen) == 0) { 2224 if (!name[preflen]) 2225 return -EINVAL; 2226 return 0; 2227 } 2228 } 2229 return -EOPNOTSUPP; 2230} 2231 2232static ssize_t shmem_getxattr(struct dentry *dentry, const char *name, 2233 void *buffer, size_t size) 2234{ 2235 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2236 int err; 2237 2238 /* 2239 * If this is a request for a synthetic attribute in the system.* 2240 * namespace use the generic infrastructure to resolve a handler 2241 * for it via sb->s_xattr. 2242 */ 2243 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2244 return generic_getxattr(dentry, name, buffer, size); 2245 2246 err = shmem_xattr_validate(name); 2247 if (err) 2248 return err; 2249 2250 return simple_xattr_get(&info->xattrs, name, buffer, size); 2251} 2252 2253static int shmem_setxattr(struct dentry *dentry, const char *name, 2254 const void *value, size_t size, int flags) 2255{ 2256 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2257 int err; 2258 2259 /* 2260 * If this is a request for a synthetic attribute in the system.* 2261 * namespace use the generic infrastructure to resolve a handler 2262 * for it via sb->s_xattr. 2263 */ 2264 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2265 return generic_setxattr(dentry, name, value, size, flags); 2266 2267 err = shmem_xattr_validate(name); 2268 if (err) 2269 return err; 2270 2271 return simple_xattr_set(&info->xattrs, name, value, size, flags); 2272} 2273 2274static int shmem_removexattr(struct dentry *dentry, const char *name) 2275{ 2276 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2277 int err; 2278 2279 /* 2280 * If this is a request for a synthetic attribute in the system.* 2281 * namespace use the generic infrastructure to resolve a handler 2282 * for it via sb->s_xattr. 2283 */ 2284 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 2285 return generic_removexattr(dentry, name); 2286 2287 err = shmem_xattr_validate(name); 2288 if (err) 2289 return err; 2290 2291 return simple_xattr_remove(&info->xattrs, name); 2292} 2293 2294static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 2295{ 2296 struct shmem_inode_info *info = SHMEM_I(dentry->d_inode); 2297 return simple_xattr_list(&info->xattrs, buffer, size); 2298} 2299#endif /* CONFIG_TMPFS_XATTR */ 2300 2301static const struct inode_operations shmem_short_symlink_operations = { 2302 .readlink = generic_readlink, 2303 .follow_link = shmem_follow_short_symlink, 2304#ifdef CONFIG_TMPFS_XATTR 2305 .setxattr = shmem_setxattr, 2306 .getxattr = shmem_getxattr, 2307 .listxattr = shmem_listxattr, 2308 .removexattr = shmem_removexattr, 2309#endif 2310}; 2311 2312static const struct inode_operations shmem_symlink_inode_operations = { 2313 .readlink = generic_readlink, 2314 .follow_link = shmem_follow_link, 2315 .put_link = shmem_put_link, 2316#ifdef CONFIG_TMPFS_XATTR 2317 .setxattr = shmem_setxattr, 2318 .getxattr = shmem_getxattr, 2319 .listxattr = shmem_listxattr, 2320 .removexattr = shmem_removexattr, 2321#endif 2322}; 2323 2324static struct dentry *shmem_get_parent(struct dentry *child) 2325{ 2326 return ERR_PTR(-ESTALE); 2327} 2328 2329static int shmem_match(struct inode *ino, void *vfh) 2330{ 2331 __u32 *fh = vfh; 2332 __u64 inum = fh[2]; 2333 inum = (inum << 32) | fh[1]; 2334 return ino->i_ino == inum && fh[0] == ino->i_generation; 2335} 2336 2337static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2338 struct fid *fid, int fh_len, int fh_type) 2339{ 2340 struct inode *inode; 2341 struct dentry *dentry = NULL; 2342 u64 inum; 2343 2344 if (fh_len < 3) 2345 return NULL; 2346 2347 inum = fid->raw[2]; 2348 inum = (inum << 32) | fid->raw[1]; 2349 2350 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2351 shmem_match, fid->raw); 2352 if (inode) { 2353 dentry = d_find_alias(inode); 2354 iput(inode); 2355 } 2356 2357 return dentry; 2358} 2359 2360static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 2361 struct inode *parent) 2362{ 2363 if (*len < 3) { 2364 *len = 3; 2365 return FILEID_INVALID; 2366 } 2367 2368 if (inode_unhashed(inode)) { 2369 /* Unfortunately insert_inode_hash is not idempotent, 2370 * so as we hash inodes here rather than at creation 2371 * time, we need a lock to ensure we only try 2372 * to do it once 2373 */ 2374 static DEFINE_SPINLOCK(lock); 2375 spin_lock(&lock); 2376 if (inode_unhashed(inode)) 2377 __insert_inode_hash(inode, 2378 inode->i_ino + inode->i_generation); 2379 spin_unlock(&lock); 2380 } 2381 2382 fh[0] = inode->i_generation; 2383 fh[1] = inode->i_ino; 2384 fh[2] = ((__u64)inode->i_ino) >> 32; 2385 2386 *len = 3; 2387 return 1; 2388} 2389 2390static const struct export_operations shmem_export_ops = { 2391 .get_parent = shmem_get_parent, 2392 .encode_fh = shmem_encode_fh, 2393 .fh_to_dentry = shmem_fh_to_dentry, 2394}; 2395 2396static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2397 bool remount) 2398{ 2399 char *this_char, *value, *rest; 2400 struct mempolicy *mpol = NULL; 2401 uid_t uid; 2402 gid_t gid; 2403 2404 while (options != NULL) { 2405 this_char = options; 2406 for (;;) { 2407 /* 2408 * NUL-terminate this option: unfortunately, 2409 * mount options form a comma-separated list, 2410 * but mpol's nodelist may also contain commas. 2411 */ 2412 options = strchr(options, ','); 2413 if (options == NULL) 2414 break; 2415 options++; 2416 if (!isdigit(*options)) { 2417 options[-1] = '\0'; 2418 break; 2419 } 2420 } 2421 if (!*this_char) 2422 continue; 2423 if ((value = strchr(this_char,'=')) != NULL) { 2424 *value++ = 0; 2425 } else { 2426 printk(KERN_ERR 2427 "tmpfs: No value for mount option '%s'\n", 2428 this_char); 2429 goto error; 2430 } 2431 2432 if (!strcmp(this_char,"size")) { 2433 unsigned long long size; 2434 size = memparse(value,&rest); 2435 if (*rest == '%') { 2436 size <<= PAGE_SHIFT; 2437 size *= totalram_pages; 2438 do_div(size, 100); 2439 rest++; 2440 } 2441 if (*rest) 2442 goto bad_val; 2443 sbinfo->max_blocks = 2444 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2445 } else if (!strcmp(this_char,"nr_blocks")) { 2446 sbinfo->max_blocks = memparse(value, &rest); 2447 if (*rest) 2448 goto bad_val; 2449 } else if (!strcmp(this_char,"nr_inodes")) { 2450 sbinfo->max_inodes = memparse(value, &rest); 2451 if (*rest) 2452 goto bad_val; 2453 } else if (!strcmp(this_char,"mode")) { 2454 if (remount) 2455 continue; 2456 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2457 if (*rest) 2458 goto bad_val; 2459 } else if (!strcmp(this_char,"uid")) { 2460 if (remount) 2461 continue; 2462 uid = simple_strtoul(value, &rest, 0); 2463 if (*rest) 2464 goto bad_val; 2465 sbinfo->uid = make_kuid(current_user_ns(), uid); 2466 if (!uid_valid(sbinfo->uid)) 2467 goto bad_val; 2468 } else if (!strcmp(this_char,"gid")) { 2469 if (remount) 2470 continue; 2471 gid = simple_strtoul(value, &rest, 0); 2472 if (*rest) 2473 goto bad_val; 2474 sbinfo->gid = make_kgid(current_user_ns(), gid); 2475 if (!gid_valid(sbinfo->gid)) 2476 goto bad_val; 2477 } else if (!strcmp(this_char,"mpol")) { 2478 mpol_put(mpol); 2479 mpol = NULL; 2480 if (mpol_parse_str(value, &mpol)) 2481 goto bad_val; 2482 } else { 2483 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2484 this_char); 2485 goto error; 2486 } 2487 } 2488 sbinfo->mpol = mpol; 2489 return 0; 2490 2491bad_val: 2492 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2493 value, this_char); 2494error: 2495 mpol_put(mpol); 2496 return 1; 2497 2498} 2499 2500static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2501{ 2502 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2503 struct shmem_sb_info config = *sbinfo; 2504 unsigned long inodes; 2505 int error = -EINVAL; 2506 2507 config.mpol = NULL; 2508 if (shmem_parse_options(data, &config, true)) 2509 return error; 2510 2511 spin_lock(&sbinfo->stat_lock); 2512 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2513 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 2514 goto out; 2515 if (config.max_inodes < inodes) 2516 goto out; 2517 /* 2518 * Those tests disallow limited->unlimited while any are in use; 2519 * but we must separately disallow unlimited->limited, because 2520 * in that case we have no record of how much is already in use. 2521 */ 2522 if (config.max_blocks && !sbinfo->max_blocks) 2523 goto out; 2524 if (config.max_inodes && !sbinfo->max_inodes) 2525 goto out; 2526 2527 error = 0; 2528 sbinfo->max_blocks = config.max_blocks; 2529 sbinfo->max_inodes = config.max_inodes; 2530 sbinfo->free_inodes = config.max_inodes - inodes; 2531 2532 /* 2533 * Preserve previous mempolicy unless mpol remount option was specified. 2534 */ 2535 if (config.mpol) { 2536 mpol_put(sbinfo->mpol); 2537 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2538 } 2539out: 2540 spin_unlock(&sbinfo->stat_lock); 2541 return error; 2542} 2543 2544static int shmem_show_options(struct seq_file *seq, struct dentry *root) 2545{ 2546 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 2547 2548 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2549 seq_printf(seq, ",size=%luk", 2550 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2551 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2552 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2553 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2554 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 2555 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 2556 seq_printf(seq, ",uid=%u", 2557 from_kuid_munged(&init_user_ns, sbinfo->uid)); 2558 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 2559 seq_printf(seq, ",gid=%u", 2560 from_kgid_munged(&init_user_ns, sbinfo->gid)); 2561 shmem_show_mpol(seq, sbinfo->mpol); 2562 return 0; 2563} 2564#endif /* CONFIG_TMPFS */ 2565 2566static void shmem_put_super(struct super_block *sb) 2567{ 2568 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2569 2570 percpu_counter_destroy(&sbinfo->used_blocks); 2571 mpol_put(sbinfo->mpol); 2572 kfree(sbinfo); 2573 sb->s_fs_info = NULL; 2574} 2575 2576int shmem_fill_super(struct super_block *sb, void *data, int silent) 2577{ 2578 struct inode *inode; 2579 struct shmem_sb_info *sbinfo; 2580 int err = -ENOMEM; 2581 2582 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2583 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2584 L1_CACHE_BYTES), GFP_KERNEL); 2585 if (!sbinfo) 2586 return -ENOMEM; 2587 2588 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2589 sbinfo->uid = current_fsuid(); 2590 sbinfo->gid = current_fsgid(); 2591 sb->s_fs_info = sbinfo; 2592 2593#ifdef CONFIG_TMPFS 2594 /* 2595 * Per default we only allow half of the physical ram per 2596 * tmpfs instance, limiting inodes to one per page of lowmem; 2597 * but the internal instance is left unlimited. 2598 */ 2599 if (!(sb->s_flags & MS_KERNMOUNT)) { 2600 sbinfo->max_blocks = shmem_default_max_blocks(); 2601 sbinfo->max_inodes = shmem_default_max_inodes(); 2602 if (shmem_parse_options(data, sbinfo, false)) { 2603 err = -EINVAL; 2604 goto failed; 2605 } 2606 } else { 2607 sb->s_flags |= MS_NOUSER; 2608 } 2609 sb->s_export_op = &shmem_export_ops; 2610 sb->s_flags |= MS_NOSEC; 2611#else 2612 sb->s_flags |= MS_NOUSER; 2613#endif 2614 2615 spin_lock_init(&sbinfo->stat_lock); 2616 if (percpu_counter_init(&sbinfo->used_blocks, 0)) 2617 goto failed; 2618 sbinfo->free_inodes = sbinfo->max_inodes; 2619 2620 sb->s_maxbytes = MAX_LFS_FILESIZE; 2621 sb->s_blocksize = PAGE_CACHE_SIZE; 2622 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2623 sb->s_magic = TMPFS_MAGIC; 2624 sb->s_op = &shmem_ops; 2625 sb->s_time_gran = 1; 2626#ifdef CONFIG_TMPFS_XATTR 2627 sb->s_xattr = shmem_xattr_handlers; 2628#endif 2629#ifdef CONFIG_TMPFS_POSIX_ACL 2630 sb->s_flags |= MS_POSIXACL; 2631#endif 2632 2633 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2634 if (!inode) 2635 goto failed; 2636 inode->i_uid = sbinfo->uid; 2637 inode->i_gid = sbinfo->gid; 2638 sb->s_root = d_make_root(inode); 2639 if (!sb->s_root) 2640 goto failed; 2641 return 0; 2642 2643failed: 2644 shmem_put_super(sb); 2645 return err; 2646} 2647 2648static struct kmem_cache *shmem_inode_cachep; 2649 2650static struct inode *shmem_alloc_inode(struct super_block *sb) 2651{ 2652 struct shmem_inode_info *info; 2653 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2654 if (!info) 2655 return NULL; 2656 return &info->vfs_inode; 2657} 2658 2659static void shmem_destroy_callback(struct rcu_head *head) 2660{ 2661 struct inode *inode = container_of(head, struct inode, i_rcu); 2662 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2663} 2664 2665static void shmem_destroy_inode(struct inode *inode) 2666{ 2667 if (S_ISREG(inode->i_mode)) 2668 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2669 call_rcu(&inode->i_rcu, shmem_destroy_callback); 2670} 2671 2672static void shmem_init_inode(void *foo) 2673{ 2674 struct shmem_inode_info *info = foo; 2675 inode_init_once(&info->vfs_inode); 2676} 2677 2678static int shmem_init_inodecache(void) 2679{ 2680 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2681 sizeof(struct shmem_inode_info), 2682 0, SLAB_PANIC, shmem_init_inode); 2683 return 0; 2684} 2685 2686static void shmem_destroy_inodecache(void) 2687{ 2688 kmem_cache_destroy(shmem_inode_cachep); 2689} 2690 2691static const struct address_space_operations shmem_aops = { 2692 .writepage = shmem_writepage, 2693 .set_page_dirty = __set_page_dirty_no_writeback, 2694#ifdef CONFIG_TMPFS 2695 .write_begin = shmem_write_begin, 2696 .write_end = shmem_write_end, 2697#endif 2698 .migratepage = migrate_page, 2699 .error_remove_page = generic_error_remove_page, 2700}; 2701 2702static const struct file_operations shmem_file_operations = { 2703 .mmap = shmem_mmap, 2704#ifdef CONFIG_TMPFS 2705 .llseek = shmem_file_llseek, 2706 .read = new_sync_read, 2707 .write = new_sync_write, 2708 .read_iter = shmem_file_read_iter, 2709 .write_iter = generic_file_write_iter, 2710 .fsync = noop_fsync, 2711 .splice_read = shmem_file_splice_read, 2712 .splice_write = iter_file_splice_write, 2713 .fallocate = shmem_fallocate, 2714#endif 2715}; 2716 2717static const struct inode_operations shmem_inode_operations = { 2718 .setattr = shmem_setattr, 2719#ifdef CONFIG_TMPFS_XATTR 2720 .setxattr = shmem_setxattr, 2721 .getxattr = shmem_getxattr, 2722 .listxattr = shmem_listxattr, 2723 .removexattr = shmem_removexattr, 2724 .set_acl = simple_set_acl, 2725#endif 2726}; 2727 2728static const struct inode_operations shmem_dir_inode_operations = { 2729#ifdef CONFIG_TMPFS 2730 .create = shmem_create, 2731 .lookup = simple_lookup, 2732 .link = shmem_link, 2733 .unlink = shmem_unlink, 2734 .symlink = shmem_symlink, 2735 .mkdir = shmem_mkdir, 2736 .rmdir = shmem_rmdir, 2737 .mknod = shmem_mknod, 2738 .rename = shmem_rename, 2739 .tmpfile = shmem_tmpfile, 2740#endif 2741#ifdef CONFIG_TMPFS_XATTR 2742 .setxattr = shmem_setxattr, 2743 .getxattr = shmem_getxattr, 2744 .listxattr = shmem_listxattr, 2745 .removexattr = shmem_removexattr, 2746#endif 2747#ifdef CONFIG_TMPFS_POSIX_ACL 2748 .setattr = shmem_setattr, 2749 .set_acl = simple_set_acl, 2750#endif 2751}; 2752 2753static const struct inode_operations shmem_special_inode_operations = { 2754#ifdef CONFIG_TMPFS_XATTR 2755 .setxattr = shmem_setxattr, 2756 .getxattr = shmem_getxattr, 2757 .listxattr = shmem_listxattr, 2758 .removexattr = shmem_removexattr, 2759#endif 2760#ifdef CONFIG_TMPFS_POSIX_ACL 2761 .setattr = shmem_setattr, 2762 .set_acl = simple_set_acl, 2763#endif 2764}; 2765 2766static const struct super_operations shmem_ops = { 2767 .alloc_inode = shmem_alloc_inode, 2768 .destroy_inode = shmem_destroy_inode, 2769#ifdef CONFIG_TMPFS 2770 .statfs = shmem_statfs, 2771 .remount_fs = shmem_remount_fs, 2772 .show_options = shmem_show_options, 2773#endif 2774 .evict_inode = shmem_evict_inode, 2775 .drop_inode = generic_delete_inode, 2776 .put_super = shmem_put_super, 2777}; 2778 2779static const struct vm_operations_struct shmem_vm_ops = { 2780 .fault = shmem_fault, 2781 .map_pages = filemap_map_pages, 2782#ifdef CONFIG_NUMA 2783 .set_policy = shmem_set_policy, 2784 .get_policy = shmem_get_policy, 2785#endif 2786 .remap_pages = generic_file_remap_pages, 2787}; 2788 2789static struct dentry *shmem_mount(struct file_system_type *fs_type, 2790 int flags, const char *dev_name, void *data) 2791{ 2792 return mount_nodev(fs_type, flags, data, shmem_fill_super); 2793} 2794 2795static struct file_system_type shmem_fs_type = { 2796 .owner = THIS_MODULE, 2797 .name = "tmpfs", 2798 .mount = shmem_mount, 2799 .kill_sb = kill_litter_super, 2800 .fs_flags = FS_USERNS_MOUNT, 2801}; 2802 2803int __init shmem_init(void) 2804{ 2805 int error; 2806 2807 /* If rootfs called this, don't re-init */ 2808 if (shmem_inode_cachep) 2809 return 0; 2810 2811 error = bdi_init(&shmem_backing_dev_info); 2812 if (error) 2813 goto out4; 2814 2815 error = shmem_init_inodecache(); 2816 if (error) 2817 goto out3; 2818 2819 error = register_filesystem(&shmem_fs_type); 2820 if (error) { 2821 printk(KERN_ERR "Could not register tmpfs\n"); 2822 goto out2; 2823 } 2824 2825 shm_mnt = kern_mount(&shmem_fs_type); 2826 if (IS_ERR(shm_mnt)) { 2827 error = PTR_ERR(shm_mnt); 2828 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2829 goto out1; 2830 } 2831 return 0; 2832 2833out1: 2834 unregister_filesystem(&shmem_fs_type); 2835out2: 2836 shmem_destroy_inodecache(); 2837out3: 2838 bdi_destroy(&shmem_backing_dev_info); 2839out4: 2840 shm_mnt = ERR_PTR(error); 2841 return error; 2842} 2843 2844#else /* !CONFIG_SHMEM */ 2845 2846/* 2847 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2848 * 2849 * This is intended for small system where the benefits of the full 2850 * shmem code (swap-backed and resource-limited) are outweighed by 2851 * their complexity. On systems without swap this code should be 2852 * effectively equivalent, but much lighter weight. 2853 */ 2854 2855static struct file_system_type shmem_fs_type = { 2856 .name = "tmpfs", 2857 .mount = ramfs_mount, 2858 .kill_sb = kill_litter_super, 2859 .fs_flags = FS_USERNS_MOUNT, 2860}; 2861 2862int __init shmem_init(void) 2863{ 2864 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 2865 2866 shm_mnt = kern_mount(&shmem_fs_type); 2867 BUG_ON(IS_ERR(shm_mnt)); 2868 2869 return 0; 2870} 2871 2872int shmem_unuse(swp_entry_t swap, struct page *page) 2873{ 2874 return 0; 2875} 2876 2877int shmem_lock(struct file *file, int lock, struct user_struct *user) 2878{ 2879 return 0; 2880} 2881 2882void shmem_unlock_mapping(struct address_space *mapping) 2883{ 2884} 2885 2886void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 2887{ 2888 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 2889} 2890EXPORT_SYMBOL_GPL(shmem_truncate_range); 2891 2892#define shmem_vm_ops generic_file_vm_ops 2893#define shmem_file_operations ramfs_file_operations 2894#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 2895#define shmem_acct_size(flags, size) 0 2896#define shmem_unacct_size(flags, size) do {} while (0) 2897 2898#endif /* CONFIG_SHMEM */ 2899 2900/* common code */ 2901 2902static struct dentry_operations anon_ops = { 2903 .d_dname = simple_dname 2904}; 2905 2906static struct file *__shmem_file_setup(const char *name, loff_t size, 2907 unsigned long flags, unsigned int i_flags) 2908{ 2909 struct file *res; 2910 struct inode *inode; 2911 struct path path; 2912 struct super_block *sb; 2913 struct qstr this; 2914 2915 if (IS_ERR(shm_mnt)) 2916 return ERR_CAST(shm_mnt); 2917 2918 if (size < 0 || size > MAX_LFS_FILESIZE) 2919 return ERR_PTR(-EINVAL); 2920 2921 if (shmem_acct_size(flags, size)) 2922 return ERR_PTR(-ENOMEM); 2923 2924 res = ERR_PTR(-ENOMEM); 2925 this.name = name; 2926 this.len = strlen(name); 2927 this.hash = 0; /* will go */ 2928 sb = shm_mnt->mnt_sb; 2929 path.dentry = d_alloc_pseudo(sb, &this); 2930 if (!path.dentry) 2931 goto put_memory; 2932 d_set_d_op(path.dentry, &anon_ops); 2933 path.mnt = mntget(shm_mnt); 2934 2935 res = ERR_PTR(-ENOSPC); 2936 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 2937 if (!inode) 2938 goto put_dentry; 2939 2940 inode->i_flags |= i_flags; 2941 d_instantiate(path.dentry, inode); 2942 inode->i_size = size; 2943 clear_nlink(inode); /* It is unlinked */ 2944 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 2945 if (IS_ERR(res)) 2946 goto put_dentry; 2947 2948 res = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2949 &shmem_file_operations); 2950 if (IS_ERR(res)) 2951 goto put_dentry; 2952 2953 return res; 2954 2955put_dentry: 2956 path_put(&path); 2957put_memory: 2958 shmem_unacct_size(flags, size); 2959 return res; 2960} 2961 2962/** 2963 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 2964 * kernel internal. There will be NO LSM permission checks against the 2965 * underlying inode. So users of this interface must do LSM checks at a 2966 * higher layer. The one user is the big_key implementation. LSM checks 2967 * are provided at the key level rather than the inode level. 2968 * @name: name for dentry (to be seen in /proc/<pid>/maps 2969 * @size: size to be set for the file 2970 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2971 */ 2972struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 2973{ 2974 return __shmem_file_setup(name, size, flags, S_PRIVATE); 2975} 2976 2977/** 2978 * shmem_file_setup - get an unlinked file living in tmpfs 2979 * @name: name for dentry (to be seen in /proc/<pid>/maps 2980 * @size: size to be set for the file 2981 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2982 */ 2983struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2984{ 2985 return __shmem_file_setup(name, size, flags, 0); 2986} 2987EXPORT_SYMBOL_GPL(shmem_file_setup); 2988 2989/** 2990 * shmem_zero_setup - setup a shared anonymous mapping 2991 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2992 */ 2993int shmem_zero_setup(struct vm_area_struct *vma) 2994{ 2995 struct file *file; 2996 loff_t size = vma->vm_end - vma->vm_start; 2997 2998 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2999 if (IS_ERR(file)) 3000 return PTR_ERR(file); 3001 3002 if (vma->vm_file) 3003 fput(vma->vm_file); 3004 vma->vm_file = file; 3005 vma->vm_ops = &shmem_vm_ops; 3006 return 0; 3007} 3008 3009/** 3010 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 3011 * @mapping: the page's address_space 3012 * @index: the page index 3013 * @gfp: the page allocator flags to use if allocating 3014 * 3015 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 3016 * with any new page allocations done using the specified allocation flags. 3017 * But read_cache_page_gfp() uses the ->readpage() method: which does not 3018 * suit tmpfs, since it may have pages in swapcache, and needs to find those 3019 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 3020 * 3021 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 3022 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 3023 */ 3024struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 3025 pgoff_t index, gfp_t gfp) 3026{ 3027#ifdef CONFIG_SHMEM 3028 struct inode *inode = mapping->host; 3029 struct page *page; 3030 int error; 3031 3032 BUG_ON(mapping->a_ops != &shmem_aops); 3033 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL); 3034 if (error) 3035 page = ERR_PTR(error); 3036 else 3037 unlock_page(page); 3038 return page; 3039#else 3040 /* 3041 * The tiny !SHMEM case uses ramfs without swap 3042 */ 3043 return read_cache_page_gfp(mapping, index, gfp); 3044#endif 3045} 3046EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 3047