shmem.c revision 0552f879d45cecc35d8e372a591fc5ed863bca58
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2005 Hugh Dickins. 10 * Copyright (C) 2002-2005 VERITAS Software Corporation. 11 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * 13 * Extended attribute support for tmpfs: 14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 16 * 17 * tiny-shmem: 18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 19 * 20 * This file is released under the GPL. 21 */ 22 23#include <linux/fs.h> 24#include <linux/init.h> 25#include <linux/vfs.h> 26#include <linux/mount.h> 27#include <linux/pagemap.h> 28#include <linux/file.h> 29#include <linux/mm.h> 30#include <linux/module.h> 31#include <linux/swap.h> 32 33static struct vfsmount *shm_mnt; 34 35#ifdef CONFIG_SHMEM 36/* 37 * This virtual memory filesystem is heavily based on the ramfs. It 38 * extends ramfs by the ability to use swap and honor resource limits 39 * which makes it a completely usable filesystem. 40 */ 41 42#include <linux/xattr.h> 43#include <linux/exportfs.h> 44#include <linux/generic_acl.h> 45#include <linux/mman.h> 46#include <linux/string.h> 47#include <linux/slab.h> 48#include <linux/backing-dev.h> 49#include <linux/shmem_fs.h> 50#include <linux/writeback.h> 51#include <linux/blkdev.h> 52#include <linux/security.h> 53#include <linux/swapops.h> 54#include <linux/mempolicy.h> 55#include <linux/namei.h> 56#include <linux/ctype.h> 57#include <linux/migrate.h> 58#include <linux/highmem.h> 59#include <linux/seq_file.h> 60#include <linux/magic.h> 61 62#include <asm/uaccess.h> 63#include <asm/div64.h> 64#include <asm/pgtable.h> 65 66/* 67 * The maximum size of a shmem/tmpfs file is limited by the maximum size of 68 * its triple-indirect swap vector - see illustration at shmem_swp_entry(). 69 * 70 * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel, 71 * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum 72 * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel, 73 * MAX_LFS_FILESIZE being then more restrictive than swap vector layout. 74 * 75 * We use / and * instead of shifts in the definitions below, so that the swap 76 * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE. 77 */ 78#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) 79#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) 80 81#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) 82#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT) 83 84#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE) 85#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT)) 86 87#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 88#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 89 90/* info->flags needs VM_flags to handle pagein/truncate races efficiently */ 91#define SHMEM_PAGEIN VM_READ 92#define SHMEM_TRUNCATE VM_WRITE 93 94/* Definition to limit shmem_truncate's steps between cond_rescheds */ 95#define LATENCY_LIMIT 64 96 97/* Pretend that each entry is of this size in directory's i_size */ 98#define BOGO_DIRENT_SIZE 20 99 100/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 101enum sgp_type { 102 SGP_READ, /* don't exceed i_size, don't allocate page */ 103 SGP_CACHE, /* don't exceed i_size, may allocate page */ 104 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 105 SGP_WRITE, /* may exceed i_size, may allocate page */ 106}; 107 108#ifdef CONFIG_TMPFS 109static unsigned long shmem_default_max_blocks(void) 110{ 111 return totalram_pages / 2; 112} 113 114static unsigned long shmem_default_max_inodes(void) 115{ 116 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 117} 118#endif 119 120static int shmem_getpage(struct inode *inode, unsigned long idx, 121 struct page **pagep, enum sgp_type sgp, int *type); 122 123static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) 124{ 125 /* 126 * The above definition of ENTRIES_PER_PAGE, and the use of 127 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 128 * might be reconsidered if it ever diverges from PAGE_SIZE. 129 * 130 * Mobility flags are masked out as swap vectors cannot move 131 */ 132 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, 133 PAGE_CACHE_SHIFT-PAGE_SHIFT); 134} 135 136static inline void shmem_dir_free(struct page *page) 137{ 138 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); 139} 140 141static struct page **shmem_dir_map(struct page *page) 142{ 143 return (struct page **)kmap_atomic(page, KM_USER0); 144} 145 146static inline void shmem_dir_unmap(struct page **dir) 147{ 148 kunmap_atomic(dir, KM_USER0); 149} 150 151static swp_entry_t *shmem_swp_map(struct page *page) 152{ 153 return (swp_entry_t *)kmap_atomic(page, KM_USER1); 154} 155 156static inline void shmem_swp_balance_unmap(void) 157{ 158 /* 159 * When passing a pointer to an i_direct entry, to code which 160 * also handles indirect entries and so will shmem_swp_unmap, 161 * we must arrange for the preempt count to remain in balance. 162 * What kmap_atomic of a lowmem page does depends on config 163 * and architecture, so pretend to kmap_atomic some lowmem page. 164 */ 165 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); 166} 167 168static inline void shmem_swp_unmap(swp_entry_t *entry) 169{ 170 kunmap_atomic(entry, KM_USER1); 171} 172 173static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 174{ 175 return sb->s_fs_info; 176} 177 178/* 179 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 180 * for shared memory and for shared anonymous (/dev/zero) mappings 181 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 182 * consistent with the pre-accounting of private mappings ... 183 */ 184static inline int shmem_acct_size(unsigned long flags, loff_t size) 185{ 186 return (flags & VM_NORESERVE) ? 187 0 : security_vm_enough_memory_kern(VM_ACCT(size)); 188} 189 190static inline void shmem_unacct_size(unsigned long flags, loff_t size) 191{ 192 if (!(flags & VM_NORESERVE)) 193 vm_unacct_memory(VM_ACCT(size)); 194} 195 196/* 197 * ... whereas tmpfs objects are accounted incrementally as 198 * pages are allocated, in order to allow huge sparse files. 199 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 200 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 201 */ 202static inline int shmem_acct_block(unsigned long flags) 203{ 204 return (flags & VM_NORESERVE) ? 205 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0; 206} 207 208static inline void shmem_unacct_blocks(unsigned long flags, long pages) 209{ 210 if (flags & VM_NORESERVE) 211 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 212} 213 214static const struct super_operations shmem_ops; 215static const struct address_space_operations shmem_aops; 216static const struct file_operations shmem_file_operations; 217static const struct inode_operations shmem_inode_operations; 218static const struct inode_operations shmem_dir_inode_operations; 219static const struct inode_operations shmem_special_inode_operations; 220static const struct vm_operations_struct shmem_vm_ops; 221 222static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 223 .ra_pages = 0, /* No readahead */ 224 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 225 .unplug_io_fn = default_unplug_io_fn, 226}; 227 228static LIST_HEAD(shmem_swaplist); 229static DEFINE_MUTEX(shmem_swaplist_mutex); 230 231static void shmem_free_blocks(struct inode *inode, long pages) 232{ 233 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 234 if (sbinfo->max_blocks) { 235 spin_lock(&sbinfo->stat_lock); 236 sbinfo->free_blocks += pages; 237 inode->i_blocks -= pages*BLOCKS_PER_PAGE; 238 spin_unlock(&sbinfo->stat_lock); 239 } 240} 241 242static int shmem_reserve_inode(struct super_block *sb) 243{ 244 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 245 if (sbinfo->max_inodes) { 246 spin_lock(&sbinfo->stat_lock); 247 if (!sbinfo->free_inodes) { 248 spin_unlock(&sbinfo->stat_lock); 249 return -ENOSPC; 250 } 251 sbinfo->free_inodes--; 252 spin_unlock(&sbinfo->stat_lock); 253 } 254 return 0; 255} 256 257static void shmem_free_inode(struct super_block *sb) 258{ 259 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 260 if (sbinfo->max_inodes) { 261 spin_lock(&sbinfo->stat_lock); 262 sbinfo->free_inodes++; 263 spin_unlock(&sbinfo->stat_lock); 264 } 265} 266 267/** 268 * shmem_recalc_inode - recalculate the size of an inode 269 * @inode: inode to recalc 270 * 271 * We have to calculate the free blocks since the mm can drop 272 * undirtied hole pages behind our back. 273 * 274 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 275 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 276 * 277 * It has to be called with the spinlock held. 278 */ 279static void shmem_recalc_inode(struct inode *inode) 280{ 281 struct shmem_inode_info *info = SHMEM_I(inode); 282 long freed; 283 284 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 285 if (freed > 0) { 286 info->alloced -= freed; 287 shmem_unacct_blocks(info->flags, freed); 288 shmem_free_blocks(inode, freed); 289 } 290} 291 292/** 293 * shmem_swp_entry - find the swap vector position in the info structure 294 * @info: info structure for the inode 295 * @index: index of the page to find 296 * @page: optional page to add to the structure. Has to be preset to 297 * all zeros 298 * 299 * If there is no space allocated yet it will return NULL when 300 * page is NULL, else it will use the page for the needed block, 301 * setting it to NULL on return to indicate that it has been used. 302 * 303 * The swap vector is organized the following way: 304 * 305 * There are SHMEM_NR_DIRECT entries directly stored in the 306 * shmem_inode_info structure. So small files do not need an addional 307 * allocation. 308 * 309 * For pages with index > SHMEM_NR_DIRECT there is the pointer 310 * i_indirect which points to a page which holds in the first half 311 * doubly indirect blocks, in the second half triple indirect blocks: 312 * 313 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the 314 * following layout (for SHMEM_NR_DIRECT == 16): 315 * 316 * i_indirect -> dir --> 16-19 317 * | +-> 20-23 318 * | 319 * +-->dir2 --> 24-27 320 * | +-> 28-31 321 * | +-> 32-35 322 * | +-> 36-39 323 * | 324 * +-->dir3 --> 40-43 325 * +-> 44-47 326 * +-> 48-51 327 * +-> 52-55 328 */ 329static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) 330{ 331 unsigned long offset; 332 struct page **dir; 333 struct page *subdir; 334 335 if (index < SHMEM_NR_DIRECT) { 336 shmem_swp_balance_unmap(); 337 return info->i_direct+index; 338 } 339 if (!info->i_indirect) { 340 if (page) { 341 info->i_indirect = *page; 342 *page = NULL; 343 } 344 return NULL; /* need another page */ 345 } 346 347 index -= SHMEM_NR_DIRECT; 348 offset = index % ENTRIES_PER_PAGE; 349 index /= ENTRIES_PER_PAGE; 350 dir = shmem_dir_map(info->i_indirect); 351 352 if (index >= ENTRIES_PER_PAGE/2) { 353 index -= ENTRIES_PER_PAGE/2; 354 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; 355 index %= ENTRIES_PER_PAGE; 356 subdir = *dir; 357 if (!subdir) { 358 if (page) { 359 *dir = *page; 360 *page = NULL; 361 } 362 shmem_dir_unmap(dir); 363 return NULL; /* need another page */ 364 } 365 shmem_dir_unmap(dir); 366 dir = shmem_dir_map(subdir); 367 } 368 369 dir += index; 370 subdir = *dir; 371 if (!subdir) { 372 if (!page || !(subdir = *page)) { 373 shmem_dir_unmap(dir); 374 return NULL; /* need a page */ 375 } 376 *dir = subdir; 377 *page = NULL; 378 } 379 shmem_dir_unmap(dir); 380 return shmem_swp_map(subdir) + offset; 381} 382 383static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) 384{ 385 long incdec = value? 1: -1; 386 387 entry->val = value; 388 info->swapped += incdec; 389 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { 390 struct page *page = kmap_atomic_to_page(entry); 391 set_page_private(page, page_private(page) + incdec); 392 } 393} 394 395/** 396 * shmem_swp_alloc - get the position of the swap entry for the page. 397 * @info: info structure for the inode 398 * @index: index of the page to find 399 * @sgp: check and recheck i_size? skip allocation? 400 * 401 * If the entry does not exist, allocate it. 402 */ 403static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) 404{ 405 struct inode *inode = &info->vfs_inode; 406 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 407 struct page *page = NULL; 408 swp_entry_t *entry; 409 410 if (sgp != SGP_WRITE && 411 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 412 return ERR_PTR(-EINVAL); 413 414 while (!(entry = shmem_swp_entry(info, index, &page))) { 415 if (sgp == SGP_READ) 416 return shmem_swp_map(ZERO_PAGE(0)); 417 /* 418 * Test free_blocks against 1 not 0, since we have 1 data 419 * page (and perhaps indirect index pages) yet to allocate: 420 * a waste to allocate index if we cannot allocate data. 421 */ 422 if (sbinfo->max_blocks) { 423 spin_lock(&sbinfo->stat_lock); 424 if (sbinfo->free_blocks <= 1) { 425 spin_unlock(&sbinfo->stat_lock); 426 return ERR_PTR(-ENOSPC); 427 } 428 sbinfo->free_blocks--; 429 inode->i_blocks += BLOCKS_PER_PAGE; 430 spin_unlock(&sbinfo->stat_lock); 431 } 432 433 spin_unlock(&info->lock); 434 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); 435 if (page) 436 set_page_private(page, 0); 437 spin_lock(&info->lock); 438 439 if (!page) { 440 shmem_free_blocks(inode, 1); 441 return ERR_PTR(-ENOMEM); 442 } 443 if (sgp != SGP_WRITE && 444 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 445 entry = ERR_PTR(-EINVAL); 446 break; 447 } 448 if (info->next_index <= index) 449 info->next_index = index + 1; 450 } 451 if (page) { 452 /* another task gave its page, or truncated the file */ 453 shmem_free_blocks(inode, 1); 454 shmem_dir_free(page); 455 } 456 if (info->next_index <= index && !IS_ERR(entry)) 457 info->next_index = index + 1; 458 return entry; 459} 460 461/** 462 * shmem_free_swp - free some swap entries in a directory 463 * @dir: pointer to the directory 464 * @edir: pointer after last entry of the directory 465 * @punch_lock: pointer to spinlock when needed for the holepunch case 466 */ 467static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, 468 spinlock_t *punch_lock) 469{ 470 spinlock_t *punch_unlock = NULL; 471 swp_entry_t *ptr; 472 int freed = 0; 473 474 for (ptr = dir; ptr < edir; ptr++) { 475 if (ptr->val) { 476 if (unlikely(punch_lock)) { 477 punch_unlock = punch_lock; 478 punch_lock = NULL; 479 spin_lock(punch_unlock); 480 if (!ptr->val) 481 continue; 482 } 483 free_swap_and_cache(*ptr); 484 *ptr = (swp_entry_t){0}; 485 freed++; 486 } 487 } 488 if (punch_unlock) 489 spin_unlock(punch_unlock); 490 return freed; 491} 492 493static int shmem_map_and_free_swp(struct page *subdir, int offset, 494 int limit, struct page ***dir, spinlock_t *punch_lock) 495{ 496 swp_entry_t *ptr; 497 int freed = 0; 498 499 ptr = shmem_swp_map(subdir); 500 for (; offset < limit; offset += LATENCY_LIMIT) { 501 int size = limit - offset; 502 if (size > LATENCY_LIMIT) 503 size = LATENCY_LIMIT; 504 freed += shmem_free_swp(ptr+offset, ptr+offset+size, 505 punch_lock); 506 if (need_resched()) { 507 shmem_swp_unmap(ptr); 508 if (*dir) { 509 shmem_dir_unmap(*dir); 510 *dir = NULL; 511 } 512 cond_resched(); 513 ptr = shmem_swp_map(subdir); 514 } 515 } 516 shmem_swp_unmap(ptr); 517 return freed; 518} 519 520static void shmem_free_pages(struct list_head *next) 521{ 522 struct page *page; 523 int freed = 0; 524 525 do { 526 page = container_of(next, struct page, lru); 527 next = next->next; 528 shmem_dir_free(page); 529 freed++; 530 if (freed >= LATENCY_LIMIT) { 531 cond_resched(); 532 freed = 0; 533 } 534 } while (next); 535} 536 537static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) 538{ 539 struct shmem_inode_info *info = SHMEM_I(inode); 540 unsigned long idx; 541 unsigned long size; 542 unsigned long limit; 543 unsigned long stage; 544 unsigned long diroff; 545 struct page **dir; 546 struct page *topdir; 547 struct page *middir; 548 struct page *subdir; 549 swp_entry_t *ptr; 550 LIST_HEAD(pages_to_free); 551 long nr_pages_to_free = 0; 552 long nr_swaps_freed = 0; 553 int offset; 554 int freed; 555 int punch_hole; 556 spinlock_t *needs_lock; 557 spinlock_t *punch_lock; 558 unsigned long upper_limit; 559 560 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 561 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 562 if (idx >= info->next_index) 563 return; 564 565 spin_lock(&info->lock); 566 info->flags |= SHMEM_TRUNCATE; 567 if (likely(end == (loff_t) -1)) { 568 limit = info->next_index; 569 upper_limit = SHMEM_MAX_INDEX; 570 info->next_index = idx; 571 needs_lock = NULL; 572 punch_hole = 0; 573 } else { 574 if (end + 1 >= inode->i_size) { /* we may free a little more */ 575 limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> 576 PAGE_CACHE_SHIFT; 577 upper_limit = SHMEM_MAX_INDEX; 578 } else { 579 limit = (end + 1) >> PAGE_CACHE_SHIFT; 580 upper_limit = limit; 581 } 582 needs_lock = &info->lock; 583 punch_hole = 1; 584 } 585 586 topdir = info->i_indirect; 587 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { 588 info->i_indirect = NULL; 589 nr_pages_to_free++; 590 list_add(&topdir->lru, &pages_to_free); 591 } 592 spin_unlock(&info->lock); 593 594 if (info->swapped && idx < SHMEM_NR_DIRECT) { 595 ptr = info->i_direct; 596 size = limit; 597 if (size > SHMEM_NR_DIRECT) 598 size = SHMEM_NR_DIRECT; 599 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); 600 } 601 602 /* 603 * If there are no indirect blocks or we are punching a hole 604 * below indirect blocks, nothing to be done. 605 */ 606 if (!topdir || limit <= SHMEM_NR_DIRECT) 607 goto done2; 608 609 /* 610 * The truncation case has already dropped info->lock, and we're safe 611 * because i_size and next_index have already been lowered, preventing 612 * access beyond. But in the punch_hole case, we still need to take 613 * the lock when updating the swap directory, because there might be 614 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or 615 * shmem_writepage. However, whenever we find we can remove a whole 616 * directory page (not at the misaligned start or end of the range), 617 * we first NULLify its pointer in the level above, and then have no 618 * need to take the lock when updating its contents: needs_lock and 619 * punch_lock (either pointing to info->lock or NULL) manage this. 620 */ 621 622 upper_limit -= SHMEM_NR_DIRECT; 623 limit -= SHMEM_NR_DIRECT; 624 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; 625 offset = idx % ENTRIES_PER_PAGE; 626 idx -= offset; 627 628 dir = shmem_dir_map(topdir); 629 stage = ENTRIES_PER_PAGEPAGE/2; 630 if (idx < ENTRIES_PER_PAGEPAGE/2) { 631 middir = topdir; 632 diroff = idx/ENTRIES_PER_PAGE; 633 } else { 634 dir += ENTRIES_PER_PAGE/2; 635 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; 636 while (stage <= idx) 637 stage += ENTRIES_PER_PAGEPAGE; 638 middir = *dir; 639 if (*dir) { 640 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % 641 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; 642 if (!diroff && !offset && upper_limit >= stage) { 643 if (needs_lock) { 644 spin_lock(needs_lock); 645 *dir = NULL; 646 spin_unlock(needs_lock); 647 needs_lock = NULL; 648 } else 649 *dir = NULL; 650 nr_pages_to_free++; 651 list_add(&middir->lru, &pages_to_free); 652 } 653 shmem_dir_unmap(dir); 654 dir = shmem_dir_map(middir); 655 } else { 656 diroff = 0; 657 offset = 0; 658 idx = stage; 659 } 660 } 661 662 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { 663 if (unlikely(idx == stage)) { 664 shmem_dir_unmap(dir); 665 dir = shmem_dir_map(topdir) + 666 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 667 while (!*dir) { 668 dir++; 669 idx += ENTRIES_PER_PAGEPAGE; 670 if (idx >= limit) 671 goto done1; 672 } 673 stage = idx + ENTRIES_PER_PAGEPAGE; 674 middir = *dir; 675 if (punch_hole) 676 needs_lock = &info->lock; 677 if (upper_limit >= stage) { 678 if (needs_lock) { 679 spin_lock(needs_lock); 680 *dir = NULL; 681 spin_unlock(needs_lock); 682 needs_lock = NULL; 683 } else 684 *dir = NULL; 685 nr_pages_to_free++; 686 list_add(&middir->lru, &pages_to_free); 687 } 688 shmem_dir_unmap(dir); 689 cond_resched(); 690 dir = shmem_dir_map(middir); 691 diroff = 0; 692 } 693 punch_lock = needs_lock; 694 subdir = dir[diroff]; 695 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { 696 if (needs_lock) { 697 spin_lock(needs_lock); 698 dir[diroff] = NULL; 699 spin_unlock(needs_lock); 700 punch_lock = NULL; 701 } else 702 dir[diroff] = NULL; 703 nr_pages_to_free++; 704 list_add(&subdir->lru, &pages_to_free); 705 } 706 if (subdir && page_private(subdir) /* has swap entries */) { 707 size = limit - idx; 708 if (size > ENTRIES_PER_PAGE) 709 size = ENTRIES_PER_PAGE; 710 freed = shmem_map_and_free_swp(subdir, 711 offset, size, &dir, punch_lock); 712 if (!dir) 713 dir = shmem_dir_map(middir); 714 nr_swaps_freed += freed; 715 if (offset || punch_lock) { 716 spin_lock(&info->lock); 717 set_page_private(subdir, 718 page_private(subdir) - freed); 719 spin_unlock(&info->lock); 720 } else 721 BUG_ON(page_private(subdir) != freed); 722 } 723 offset = 0; 724 } 725done1: 726 shmem_dir_unmap(dir); 727done2: 728 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { 729 /* 730 * Call truncate_inode_pages again: racing shmem_unuse_inode 731 * may have swizzled a page in from swap since vmtruncate or 732 * generic_delete_inode did it, before we lowered next_index. 733 * Also, though shmem_getpage checks i_size before adding to 734 * cache, no recheck after: so fix the narrow window there too. 735 * 736 * Recalling truncate_inode_pages_range and unmap_mapping_range 737 * every time for punch_hole (which never got a chance to clear 738 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, 739 * yet hardly ever necessary: try to optimize them out later. 740 */ 741 truncate_inode_pages_range(inode->i_mapping, start, end); 742 if (punch_hole) 743 unmap_mapping_range(inode->i_mapping, start, 744 end - start, 1); 745 } 746 747 spin_lock(&info->lock); 748 info->flags &= ~SHMEM_TRUNCATE; 749 info->swapped -= nr_swaps_freed; 750 if (nr_pages_to_free) 751 shmem_free_blocks(inode, nr_pages_to_free); 752 shmem_recalc_inode(inode); 753 spin_unlock(&info->lock); 754 755 /* 756 * Empty swap vector directory pages to be freed? 757 */ 758 if (!list_empty(&pages_to_free)) { 759 pages_to_free.prev->next = NULL; 760 shmem_free_pages(pages_to_free.next); 761 } 762} 763 764static void shmem_truncate(struct inode *inode) 765{ 766 shmem_truncate_range(inode, inode->i_size, (loff_t)-1); 767} 768 769static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 770{ 771 struct inode *inode = dentry->d_inode; 772 struct page *page = NULL; 773 int error; 774 775 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 776 if (attr->ia_size < inode->i_size) { 777 /* 778 * If truncating down to a partial page, then 779 * if that page is already allocated, hold it 780 * in memory until the truncation is over, so 781 * truncate_partial_page cannnot miss it were 782 * it assigned to swap. 783 */ 784 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { 785 (void) shmem_getpage(inode, 786 attr->ia_size>>PAGE_CACHE_SHIFT, 787 &page, SGP_READ, NULL); 788 if (page) 789 unlock_page(page); 790 } 791 /* 792 * Reset SHMEM_PAGEIN flag so that shmem_truncate can 793 * detect if any pages might have been added to cache 794 * after truncate_inode_pages. But we needn't bother 795 * if it's being fully truncated to zero-length: the 796 * nrpages check is efficient enough in that case. 797 */ 798 if (attr->ia_size) { 799 struct shmem_inode_info *info = SHMEM_I(inode); 800 spin_lock(&info->lock); 801 info->flags &= ~SHMEM_PAGEIN; 802 spin_unlock(&info->lock); 803 } 804 } 805 } 806 807 error = inode_change_ok(inode, attr); 808 if (!error) 809 error = inode_setattr(inode, attr); 810#ifdef CONFIG_TMPFS_POSIX_ACL 811 if (!error && (attr->ia_valid & ATTR_MODE)) 812 error = generic_acl_chmod(inode, &shmem_acl_ops); 813#endif 814 if (page) 815 page_cache_release(page); 816 return error; 817} 818 819static void shmem_delete_inode(struct inode *inode) 820{ 821 struct shmem_inode_info *info = SHMEM_I(inode); 822 823 if (inode->i_op->truncate == shmem_truncate) { 824 truncate_inode_pages(inode->i_mapping, 0); 825 shmem_unacct_size(info->flags, inode->i_size); 826 inode->i_size = 0; 827 shmem_truncate(inode); 828 if (!list_empty(&info->swaplist)) { 829 mutex_lock(&shmem_swaplist_mutex); 830 list_del_init(&info->swaplist); 831 mutex_unlock(&shmem_swaplist_mutex); 832 } 833 } 834 BUG_ON(inode->i_blocks); 835 shmem_free_inode(inode->i_sb); 836 clear_inode(inode); 837} 838 839static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) 840{ 841 swp_entry_t *ptr; 842 843 for (ptr = dir; ptr < edir; ptr++) { 844 if (ptr->val == entry.val) 845 return ptr - dir; 846 } 847 return -1; 848} 849 850static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) 851{ 852 struct inode *inode; 853 unsigned long idx; 854 unsigned long size; 855 unsigned long limit; 856 unsigned long stage; 857 struct page **dir; 858 struct page *subdir; 859 swp_entry_t *ptr; 860 int offset; 861 int error; 862 863 idx = 0; 864 ptr = info->i_direct; 865 spin_lock(&info->lock); 866 if (!info->swapped) { 867 list_del_init(&info->swaplist); 868 goto lost2; 869 } 870 limit = info->next_index; 871 size = limit; 872 if (size > SHMEM_NR_DIRECT) 873 size = SHMEM_NR_DIRECT; 874 offset = shmem_find_swp(entry, ptr, ptr+size); 875 if (offset >= 0) 876 goto found; 877 if (!info->i_indirect) 878 goto lost2; 879 880 dir = shmem_dir_map(info->i_indirect); 881 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; 882 883 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { 884 if (unlikely(idx == stage)) { 885 shmem_dir_unmap(dir-1); 886 if (cond_resched_lock(&info->lock)) { 887 /* check it has not been truncated */ 888 if (limit > info->next_index) { 889 limit = info->next_index; 890 if (idx >= limit) 891 goto lost2; 892 } 893 } 894 dir = shmem_dir_map(info->i_indirect) + 895 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 896 while (!*dir) { 897 dir++; 898 idx += ENTRIES_PER_PAGEPAGE; 899 if (idx >= limit) 900 goto lost1; 901 } 902 stage = idx + ENTRIES_PER_PAGEPAGE; 903 subdir = *dir; 904 shmem_dir_unmap(dir); 905 dir = shmem_dir_map(subdir); 906 } 907 subdir = *dir; 908 if (subdir && page_private(subdir)) { 909 ptr = shmem_swp_map(subdir); 910 size = limit - idx; 911 if (size > ENTRIES_PER_PAGE) 912 size = ENTRIES_PER_PAGE; 913 offset = shmem_find_swp(entry, ptr, ptr+size); 914 shmem_swp_unmap(ptr); 915 if (offset >= 0) { 916 shmem_dir_unmap(dir); 917 goto found; 918 } 919 } 920 } 921lost1: 922 shmem_dir_unmap(dir-1); 923lost2: 924 spin_unlock(&info->lock); 925 return 0; 926found: 927 idx += offset; 928 inode = igrab(&info->vfs_inode); 929 spin_unlock(&info->lock); 930 931 /* 932 * Move _head_ to start search for next from here. 933 * But be careful: shmem_delete_inode checks list_empty without taking 934 * mutex, and there's an instant in list_move_tail when info->swaplist 935 * would appear empty, if it were the only one on shmem_swaplist. We 936 * could avoid doing it if inode NULL; or use this minor optimization. 937 */ 938 if (shmem_swaplist.next != &info->swaplist) 939 list_move_tail(&shmem_swaplist, &info->swaplist); 940 mutex_unlock(&shmem_swaplist_mutex); 941 942 error = 1; 943 if (!inode) 944 goto out; 945 /* 946 * Charge page using GFP_KERNEL while we can wait. 947 * Charged back to the user(not to caller) when swap account is used. 948 * add_to_page_cache() will be called with GFP_NOWAIT. 949 */ 950 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 951 if (error) 952 goto out; 953 error = radix_tree_preload(GFP_KERNEL); 954 if (error) { 955 mem_cgroup_uncharge_cache_page(page); 956 goto out; 957 } 958 error = 1; 959 960 spin_lock(&info->lock); 961 ptr = shmem_swp_entry(info, idx, NULL); 962 if (ptr && ptr->val == entry.val) { 963 error = add_to_page_cache_locked(page, inode->i_mapping, 964 idx, GFP_NOWAIT); 965 /* does mem_cgroup_uncharge_cache_page on error */ 966 } else /* we must compensate for our precharge above */ 967 mem_cgroup_uncharge_cache_page(page); 968 969 if (error == -EEXIST) { 970 struct page *filepage = find_get_page(inode->i_mapping, idx); 971 error = 1; 972 if (filepage) { 973 /* 974 * There might be a more uptodate page coming down 975 * from a stacked writepage: forget our swappage if so. 976 */ 977 if (PageUptodate(filepage)) 978 error = 0; 979 page_cache_release(filepage); 980 } 981 } 982 if (!error) { 983 delete_from_swap_cache(page); 984 set_page_dirty(page); 985 info->flags |= SHMEM_PAGEIN; 986 shmem_swp_set(info, ptr, 0); 987 swap_free(entry); 988 error = 1; /* not an error, but entry was found */ 989 } 990 if (ptr) 991 shmem_swp_unmap(ptr); 992 spin_unlock(&info->lock); 993 radix_tree_preload_end(); 994out: 995 unlock_page(page); 996 page_cache_release(page); 997 iput(inode); /* allows for NULL */ 998 return error; 999} 1000 1001/* 1002 * shmem_unuse() search for an eventually swapped out shmem page. 1003 */ 1004int shmem_unuse(swp_entry_t entry, struct page *page) 1005{ 1006 struct list_head *p, *next; 1007 struct shmem_inode_info *info; 1008 int found = 0; 1009 1010 mutex_lock(&shmem_swaplist_mutex); 1011 list_for_each_safe(p, next, &shmem_swaplist) { 1012 info = list_entry(p, struct shmem_inode_info, swaplist); 1013 found = shmem_unuse_inode(info, entry, page); 1014 cond_resched(); 1015 if (found) 1016 goto out; 1017 } 1018 mutex_unlock(&shmem_swaplist_mutex); 1019 /* 1020 * Can some race bring us here? We've been holding page lock, 1021 * so I think not; but would rather try again later than BUG() 1022 */ 1023 unlock_page(page); 1024 page_cache_release(page); 1025out: 1026 return (found < 0) ? found : 0; 1027} 1028 1029/* 1030 * Move the page from the page cache to the swap cache. 1031 */ 1032static int shmem_writepage(struct page *page, struct writeback_control *wbc) 1033{ 1034 struct shmem_inode_info *info; 1035 swp_entry_t *entry, swap; 1036 struct address_space *mapping; 1037 unsigned long index; 1038 struct inode *inode; 1039 1040 BUG_ON(!PageLocked(page)); 1041 mapping = page->mapping; 1042 index = page->index; 1043 inode = mapping->host; 1044 info = SHMEM_I(inode); 1045 if (info->flags & VM_LOCKED) 1046 goto redirty; 1047 if (!total_swap_pages) 1048 goto redirty; 1049 1050 /* 1051 * shmem_backing_dev_info's capabilities prevent regular writeback or 1052 * sync from ever calling shmem_writepage; but a stacking filesystem 1053 * may use the ->writepage of its underlying filesystem, in which case 1054 * tmpfs should write out to swap only in response to memory pressure, 1055 * and not for the writeback threads or sync. However, in those cases, 1056 * we do still want to check if there's a redundant swappage to be 1057 * discarded. 1058 */ 1059 if (wbc->for_reclaim) 1060 swap = get_swap_page(); 1061 else 1062 swap.val = 0; 1063 1064 spin_lock(&info->lock); 1065 if (index >= info->next_index) { 1066 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 1067 goto unlock; 1068 } 1069 entry = shmem_swp_entry(info, index, NULL); 1070 if (entry->val) { 1071 /* 1072 * The more uptodate page coming down from a stacked 1073 * writepage should replace our old swappage. 1074 */ 1075 free_swap_and_cache(*entry); 1076 shmem_swp_set(info, entry, 0); 1077 } 1078 shmem_recalc_inode(inode); 1079 1080 if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 1081 remove_from_page_cache(page); 1082 shmem_swp_set(info, entry, swap.val); 1083 shmem_swp_unmap(entry); 1084 if (list_empty(&info->swaplist)) 1085 inode = igrab(inode); 1086 else 1087 inode = NULL; 1088 spin_unlock(&info->lock); 1089 swap_shmem_alloc(swap); 1090 BUG_ON(page_mapped(page)); 1091 page_cache_release(page); /* pagecache ref */ 1092 swap_writepage(page, wbc); 1093 if (inode) { 1094 mutex_lock(&shmem_swaplist_mutex); 1095 /* move instead of add in case we're racing */ 1096 list_move_tail(&info->swaplist, &shmem_swaplist); 1097 mutex_unlock(&shmem_swaplist_mutex); 1098 iput(inode); 1099 } 1100 return 0; 1101 } 1102 1103 shmem_swp_unmap(entry); 1104unlock: 1105 spin_unlock(&info->lock); 1106 /* 1107 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 1108 * clear SWAP_HAS_CACHE flag. 1109 */ 1110 swapcache_free(swap, NULL); 1111redirty: 1112 set_page_dirty(page); 1113 if (wbc->for_reclaim) 1114 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1115 unlock_page(page); 1116 return 0; 1117} 1118 1119#ifdef CONFIG_NUMA 1120#ifdef CONFIG_TMPFS 1121static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1122{ 1123 char buffer[64]; 1124 1125 if (!mpol || mpol->mode == MPOL_DEFAULT) 1126 return; /* show nothing */ 1127 1128 mpol_to_str(buffer, sizeof(buffer), mpol, 1); 1129 1130 seq_printf(seq, ",mpol=%s", buffer); 1131} 1132 1133static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1134{ 1135 struct mempolicy *mpol = NULL; 1136 if (sbinfo->mpol) { 1137 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 1138 mpol = sbinfo->mpol; 1139 mpol_get(mpol); 1140 spin_unlock(&sbinfo->stat_lock); 1141 } 1142 return mpol; 1143} 1144#endif /* CONFIG_TMPFS */ 1145 1146static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, 1147 struct shmem_inode_info *info, unsigned long idx) 1148{ 1149 struct mempolicy mpol, *spol; 1150 struct vm_area_struct pvma; 1151 struct page *page; 1152 1153 spol = mpol_cond_copy(&mpol, 1154 mpol_shared_policy_lookup(&info->policy, idx)); 1155 1156 /* Create a pseudo vma that just contains the policy */ 1157 pvma.vm_start = 0; 1158 pvma.vm_pgoff = idx; 1159 pvma.vm_ops = NULL; 1160 pvma.vm_policy = spol; 1161 page = swapin_readahead(entry, gfp, &pvma, 0); 1162 return page; 1163} 1164 1165static struct page *shmem_alloc_page(gfp_t gfp, 1166 struct shmem_inode_info *info, unsigned long idx) 1167{ 1168 struct vm_area_struct pvma; 1169 1170 /* Create a pseudo vma that just contains the policy */ 1171 pvma.vm_start = 0; 1172 pvma.vm_pgoff = idx; 1173 pvma.vm_ops = NULL; 1174 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1175 1176 /* 1177 * alloc_page_vma() will drop the shared policy reference 1178 */ 1179 return alloc_page_vma(gfp, &pvma, 0); 1180} 1181#else /* !CONFIG_NUMA */ 1182#ifdef CONFIG_TMPFS 1183static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) 1184{ 1185} 1186#endif /* CONFIG_TMPFS */ 1187 1188static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, 1189 struct shmem_inode_info *info, unsigned long idx) 1190{ 1191 return swapin_readahead(entry, gfp, NULL, 0); 1192} 1193 1194static inline struct page *shmem_alloc_page(gfp_t gfp, 1195 struct shmem_inode_info *info, unsigned long idx) 1196{ 1197 return alloc_page(gfp); 1198} 1199#endif /* CONFIG_NUMA */ 1200 1201#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 1202static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1203{ 1204 return NULL; 1205} 1206#endif 1207 1208/* 1209 * shmem_getpage - either get the page from swap or allocate a new one 1210 * 1211 * If we allocate a new one we do not mark it dirty. That's up to the 1212 * vm. If we swap it in we mark it dirty since we also free the swap 1213 * entry since a page cannot live in both the swap and page cache 1214 */ 1215static int shmem_getpage(struct inode *inode, unsigned long idx, 1216 struct page **pagep, enum sgp_type sgp, int *type) 1217{ 1218 struct address_space *mapping = inode->i_mapping; 1219 struct shmem_inode_info *info = SHMEM_I(inode); 1220 struct shmem_sb_info *sbinfo; 1221 struct page *filepage = *pagep; 1222 struct page *swappage; 1223 swp_entry_t *entry; 1224 swp_entry_t swap; 1225 gfp_t gfp; 1226 int error; 1227 1228 if (idx >= SHMEM_MAX_INDEX) 1229 return -EFBIG; 1230 1231 if (type) 1232 *type = 0; 1233 1234 /* 1235 * Normally, filepage is NULL on entry, and either found 1236 * uptodate immediately, or allocated and zeroed, or read 1237 * in under swappage, which is then assigned to filepage. 1238 * But shmem_readpage (required for splice) passes in a locked 1239 * filepage, which may be found not uptodate by other callers 1240 * too, and may need to be copied from the swappage read in. 1241 */ 1242repeat: 1243 if (!filepage) 1244 filepage = find_lock_page(mapping, idx); 1245 if (filepage && PageUptodate(filepage)) 1246 goto done; 1247 error = 0; 1248 gfp = mapping_gfp_mask(mapping); 1249 if (!filepage) { 1250 /* 1251 * Try to preload while we can wait, to not make a habit of 1252 * draining atomic reserves; but don't latch on to this cpu. 1253 */ 1254 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 1255 if (error) 1256 goto failed; 1257 radix_tree_preload_end(); 1258 } 1259 1260 spin_lock(&info->lock); 1261 shmem_recalc_inode(inode); 1262 entry = shmem_swp_alloc(info, idx, sgp); 1263 if (IS_ERR(entry)) { 1264 spin_unlock(&info->lock); 1265 error = PTR_ERR(entry); 1266 goto failed; 1267 } 1268 swap = *entry; 1269 1270 if (swap.val) { 1271 /* Look it up and read it in.. */ 1272 swappage = lookup_swap_cache(swap); 1273 if (!swappage) { 1274 shmem_swp_unmap(entry); 1275 /* here we actually do the io */ 1276 if (type && !(*type & VM_FAULT_MAJOR)) { 1277 __count_vm_event(PGMAJFAULT); 1278 *type |= VM_FAULT_MAJOR; 1279 } 1280 spin_unlock(&info->lock); 1281 swappage = shmem_swapin(swap, gfp, info, idx); 1282 if (!swappage) { 1283 spin_lock(&info->lock); 1284 entry = shmem_swp_alloc(info, idx, sgp); 1285 if (IS_ERR(entry)) 1286 error = PTR_ERR(entry); 1287 else { 1288 if (entry->val == swap.val) 1289 error = -ENOMEM; 1290 shmem_swp_unmap(entry); 1291 } 1292 spin_unlock(&info->lock); 1293 if (error) 1294 goto failed; 1295 goto repeat; 1296 } 1297 wait_on_page_locked(swappage); 1298 page_cache_release(swappage); 1299 goto repeat; 1300 } 1301 1302 /* We have to do this with page locked to prevent races */ 1303 if (!trylock_page(swappage)) { 1304 shmem_swp_unmap(entry); 1305 spin_unlock(&info->lock); 1306 wait_on_page_locked(swappage); 1307 page_cache_release(swappage); 1308 goto repeat; 1309 } 1310 if (PageWriteback(swappage)) { 1311 shmem_swp_unmap(entry); 1312 spin_unlock(&info->lock); 1313 wait_on_page_writeback(swappage); 1314 unlock_page(swappage); 1315 page_cache_release(swappage); 1316 goto repeat; 1317 } 1318 if (!PageUptodate(swappage)) { 1319 shmem_swp_unmap(entry); 1320 spin_unlock(&info->lock); 1321 unlock_page(swappage); 1322 page_cache_release(swappage); 1323 error = -EIO; 1324 goto failed; 1325 } 1326 1327 if (filepage) { 1328 shmem_swp_set(info, entry, 0); 1329 shmem_swp_unmap(entry); 1330 delete_from_swap_cache(swappage); 1331 spin_unlock(&info->lock); 1332 copy_highpage(filepage, swappage); 1333 unlock_page(swappage); 1334 page_cache_release(swappage); 1335 flush_dcache_page(filepage); 1336 SetPageUptodate(filepage); 1337 set_page_dirty(filepage); 1338 swap_free(swap); 1339 } else if (!(error = add_to_page_cache_locked(swappage, mapping, 1340 idx, GFP_NOWAIT))) { 1341 info->flags |= SHMEM_PAGEIN; 1342 shmem_swp_set(info, entry, 0); 1343 shmem_swp_unmap(entry); 1344 delete_from_swap_cache(swappage); 1345 spin_unlock(&info->lock); 1346 filepage = swappage; 1347 set_page_dirty(filepage); 1348 swap_free(swap); 1349 } else { 1350 shmem_swp_unmap(entry); 1351 spin_unlock(&info->lock); 1352 if (error == -ENOMEM) { 1353 /* 1354 * reclaim from proper memory cgroup and 1355 * call memcg's OOM if needed. 1356 */ 1357 error = mem_cgroup_shmem_charge_fallback( 1358 swappage, 1359 current->mm, 1360 gfp); 1361 if (error) { 1362 unlock_page(swappage); 1363 page_cache_release(swappage); 1364 goto failed; 1365 } 1366 } 1367 unlock_page(swappage); 1368 page_cache_release(swappage); 1369 goto repeat; 1370 } 1371 } else if (sgp == SGP_READ && !filepage) { 1372 shmem_swp_unmap(entry); 1373 filepage = find_get_page(mapping, idx); 1374 if (filepage && 1375 (!PageUptodate(filepage) || !trylock_page(filepage))) { 1376 spin_unlock(&info->lock); 1377 wait_on_page_locked(filepage); 1378 page_cache_release(filepage); 1379 filepage = NULL; 1380 goto repeat; 1381 } 1382 spin_unlock(&info->lock); 1383 } else { 1384 shmem_swp_unmap(entry); 1385 sbinfo = SHMEM_SB(inode->i_sb); 1386 if (sbinfo->max_blocks) { 1387 spin_lock(&sbinfo->stat_lock); 1388 if (sbinfo->free_blocks == 0 || 1389 shmem_acct_block(info->flags)) { 1390 spin_unlock(&sbinfo->stat_lock); 1391 spin_unlock(&info->lock); 1392 error = -ENOSPC; 1393 goto failed; 1394 } 1395 sbinfo->free_blocks--; 1396 inode->i_blocks += BLOCKS_PER_PAGE; 1397 spin_unlock(&sbinfo->stat_lock); 1398 } else if (shmem_acct_block(info->flags)) { 1399 spin_unlock(&info->lock); 1400 error = -ENOSPC; 1401 goto failed; 1402 } 1403 1404 if (!filepage) { 1405 int ret; 1406 1407 spin_unlock(&info->lock); 1408 filepage = shmem_alloc_page(gfp, info, idx); 1409 if (!filepage) { 1410 shmem_unacct_blocks(info->flags, 1); 1411 shmem_free_blocks(inode, 1); 1412 error = -ENOMEM; 1413 goto failed; 1414 } 1415 SetPageSwapBacked(filepage); 1416 1417 /* Precharge page while we can wait, compensate after */ 1418 error = mem_cgroup_cache_charge(filepage, current->mm, 1419 GFP_KERNEL); 1420 if (error) { 1421 page_cache_release(filepage); 1422 shmem_unacct_blocks(info->flags, 1); 1423 shmem_free_blocks(inode, 1); 1424 filepage = NULL; 1425 goto failed; 1426 } 1427 1428 spin_lock(&info->lock); 1429 entry = shmem_swp_alloc(info, idx, sgp); 1430 if (IS_ERR(entry)) 1431 error = PTR_ERR(entry); 1432 else { 1433 swap = *entry; 1434 shmem_swp_unmap(entry); 1435 } 1436 ret = error || swap.val; 1437 if (ret) 1438 mem_cgroup_uncharge_cache_page(filepage); 1439 else 1440 ret = add_to_page_cache_lru(filepage, mapping, 1441 idx, GFP_NOWAIT); 1442 /* 1443 * At add_to_page_cache_lru() failure, uncharge will 1444 * be done automatically. 1445 */ 1446 if (ret) { 1447 spin_unlock(&info->lock); 1448 page_cache_release(filepage); 1449 shmem_unacct_blocks(info->flags, 1); 1450 shmem_free_blocks(inode, 1); 1451 filepage = NULL; 1452 if (error) 1453 goto failed; 1454 goto repeat; 1455 } 1456 info->flags |= SHMEM_PAGEIN; 1457 } 1458 1459 info->alloced++; 1460 spin_unlock(&info->lock); 1461 clear_highpage(filepage); 1462 flush_dcache_page(filepage); 1463 SetPageUptodate(filepage); 1464 if (sgp == SGP_DIRTY) 1465 set_page_dirty(filepage); 1466 } 1467done: 1468 *pagep = filepage; 1469 return 0; 1470 1471failed: 1472 if (*pagep != filepage) { 1473 unlock_page(filepage); 1474 page_cache_release(filepage); 1475 } 1476 return error; 1477} 1478 1479static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1480{ 1481 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1482 int error; 1483 int ret; 1484 1485 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 1486 return VM_FAULT_SIGBUS; 1487 1488 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1489 if (error) 1490 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1491 1492 return ret | VM_FAULT_LOCKED; 1493} 1494 1495#ifdef CONFIG_NUMA 1496static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 1497{ 1498 struct inode *i = vma->vm_file->f_path.dentry->d_inode; 1499 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); 1500} 1501 1502static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1503 unsigned long addr) 1504{ 1505 struct inode *i = vma->vm_file->f_path.dentry->d_inode; 1506 unsigned long idx; 1507 1508 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1509 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); 1510} 1511#endif 1512 1513int shmem_lock(struct file *file, int lock, struct user_struct *user) 1514{ 1515 struct inode *inode = file->f_path.dentry->d_inode; 1516 struct shmem_inode_info *info = SHMEM_I(inode); 1517 int retval = -ENOMEM; 1518 1519 spin_lock(&info->lock); 1520 if (lock && !(info->flags & VM_LOCKED)) { 1521 if (!user_shm_lock(inode->i_size, user)) 1522 goto out_nomem; 1523 info->flags |= VM_LOCKED; 1524 mapping_set_unevictable(file->f_mapping); 1525 } 1526 if (!lock && (info->flags & VM_LOCKED) && user) { 1527 user_shm_unlock(inode->i_size, user); 1528 info->flags &= ~VM_LOCKED; 1529 mapping_clear_unevictable(file->f_mapping); 1530 scan_mapping_unevictable_pages(file->f_mapping); 1531 } 1532 retval = 0; 1533 1534out_nomem: 1535 spin_unlock(&info->lock); 1536 return retval; 1537} 1538 1539static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1540{ 1541 file_accessed(file); 1542 vma->vm_ops = &shmem_vm_ops; 1543 vma->vm_flags |= VM_CAN_NONLINEAR; 1544 return 0; 1545} 1546 1547static struct inode *shmem_get_inode(struct super_block *sb, int mode, 1548 dev_t dev, unsigned long flags) 1549{ 1550 struct inode *inode; 1551 struct shmem_inode_info *info; 1552 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1553 1554 if (shmem_reserve_inode(sb)) 1555 return NULL; 1556 1557 inode = new_inode(sb); 1558 if (inode) { 1559 inode->i_mode = mode; 1560 inode->i_uid = current_fsuid(); 1561 inode->i_gid = current_fsgid(); 1562 inode->i_blocks = 0; 1563 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1564 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1565 inode->i_generation = get_seconds(); 1566 info = SHMEM_I(inode); 1567 memset(info, 0, (char *)inode - (char *)info); 1568 spin_lock_init(&info->lock); 1569 info->flags = flags & VM_NORESERVE; 1570 INIT_LIST_HEAD(&info->swaplist); 1571 cache_no_acl(inode); 1572 1573 switch (mode & S_IFMT) { 1574 default: 1575 inode->i_op = &shmem_special_inode_operations; 1576 init_special_inode(inode, mode, dev); 1577 break; 1578 case S_IFREG: 1579 inode->i_mapping->a_ops = &shmem_aops; 1580 inode->i_op = &shmem_inode_operations; 1581 inode->i_fop = &shmem_file_operations; 1582 mpol_shared_policy_init(&info->policy, 1583 shmem_get_sbmpol(sbinfo)); 1584 break; 1585 case S_IFDIR: 1586 inc_nlink(inode); 1587 /* Some things misbehave if size == 0 on a directory */ 1588 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1589 inode->i_op = &shmem_dir_inode_operations; 1590 inode->i_fop = &simple_dir_operations; 1591 break; 1592 case S_IFLNK: 1593 /* 1594 * Must not load anything in the rbtree, 1595 * mpol_free_shared_policy will not be called. 1596 */ 1597 mpol_shared_policy_init(&info->policy, NULL); 1598 break; 1599 } 1600 } else 1601 shmem_free_inode(sb); 1602 return inode; 1603} 1604 1605#ifdef CONFIG_TMPFS 1606static const struct inode_operations shmem_symlink_inode_operations; 1607static const struct inode_operations shmem_symlink_inline_operations; 1608 1609/* 1610 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; 1611 * but providing them allows a tmpfs file to be used for splice, sendfile, and 1612 * below the loop driver, in the generic fashion that many filesystems support. 1613 */ 1614static int shmem_readpage(struct file *file, struct page *page) 1615{ 1616 struct inode *inode = page->mapping->host; 1617 int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); 1618 unlock_page(page); 1619 return error; 1620} 1621 1622static int 1623shmem_write_begin(struct file *file, struct address_space *mapping, 1624 loff_t pos, unsigned len, unsigned flags, 1625 struct page **pagep, void **fsdata) 1626{ 1627 struct inode *inode = mapping->host; 1628 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1629 *pagep = NULL; 1630 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1631} 1632 1633static int 1634shmem_write_end(struct file *file, struct address_space *mapping, 1635 loff_t pos, unsigned len, unsigned copied, 1636 struct page *page, void *fsdata) 1637{ 1638 struct inode *inode = mapping->host; 1639 1640 if (pos + copied > inode->i_size) 1641 i_size_write(inode, pos + copied); 1642 1643 set_page_dirty(page); 1644 unlock_page(page); 1645 page_cache_release(page); 1646 1647 return copied; 1648} 1649 1650static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1651{ 1652 struct inode *inode = filp->f_path.dentry->d_inode; 1653 struct address_space *mapping = inode->i_mapping; 1654 unsigned long index, offset; 1655 enum sgp_type sgp = SGP_READ; 1656 1657 /* 1658 * Might this read be for a stacking filesystem? Then when reading 1659 * holes of a sparse file, we actually need to allocate those pages, 1660 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1661 */ 1662 if (segment_eq(get_fs(), KERNEL_DS)) 1663 sgp = SGP_DIRTY; 1664 1665 index = *ppos >> PAGE_CACHE_SHIFT; 1666 offset = *ppos & ~PAGE_CACHE_MASK; 1667 1668 for (;;) { 1669 struct page *page = NULL; 1670 unsigned long end_index, nr, ret; 1671 loff_t i_size = i_size_read(inode); 1672 1673 end_index = i_size >> PAGE_CACHE_SHIFT; 1674 if (index > end_index) 1675 break; 1676 if (index == end_index) { 1677 nr = i_size & ~PAGE_CACHE_MASK; 1678 if (nr <= offset) 1679 break; 1680 } 1681 1682 desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 1683 if (desc->error) { 1684 if (desc->error == -EINVAL) 1685 desc->error = 0; 1686 break; 1687 } 1688 if (page) 1689 unlock_page(page); 1690 1691 /* 1692 * We must evaluate after, since reads (unlike writes) 1693 * are called without i_mutex protection against truncate 1694 */ 1695 nr = PAGE_CACHE_SIZE; 1696 i_size = i_size_read(inode); 1697 end_index = i_size >> PAGE_CACHE_SHIFT; 1698 if (index == end_index) { 1699 nr = i_size & ~PAGE_CACHE_MASK; 1700 if (nr <= offset) { 1701 if (page) 1702 page_cache_release(page); 1703 break; 1704 } 1705 } 1706 nr -= offset; 1707 1708 if (page) { 1709 /* 1710 * If users can be writing to this page using arbitrary 1711 * virtual addresses, take care about potential aliasing 1712 * before reading the page on the kernel side. 1713 */ 1714 if (mapping_writably_mapped(mapping)) 1715 flush_dcache_page(page); 1716 /* 1717 * Mark the page accessed if we read the beginning. 1718 */ 1719 if (!offset) 1720 mark_page_accessed(page); 1721 } else { 1722 page = ZERO_PAGE(0); 1723 page_cache_get(page); 1724 } 1725 1726 /* 1727 * Ok, we have the page, and it's up-to-date, so 1728 * now we can copy it to user space... 1729 * 1730 * The actor routine returns how many bytes were actually used.. 1731 * NOTE! This may not be the same as how much of a user buffer 1732 * we filled up (we may be padding etc), so we can only update 1733 * "pos" here (the actor routine has to update the user buffer 1734 * pointers and the remaining count). 1735 */ 1736 ret = actor(desc, page, offset, nr); 1737 offset += ret; 1738 index += offset >> PAGE_CACHE_SHIFT; 1739 offset &= ~PAGE_CACHE_MASK; 1740 1741 page_cache_release(page); 1742 if (ret != nr || !desc->count) 1743 break; 1744 1745 cond_resched(); 1746 } 1747 1748 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1749 file_accessed(filp); 1750} 1751 1752static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1753 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1754{ 1755 struct file *filp = iocb->ki_filp; 1756 ssize_t retval; 1757 unsigned long seg; 1758 size_t count; 1759 loff_t *ppos = &iocb->ki_pos; 1760 1761 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1762 if (retval) 1763 return retval; 1764 1765 for (seg = 0; seg < nr_segs; seg++) { 1766 read_descriptor_t desc; 1767 1768 desc.written = 0; 1769 desc.arg.buf = iov[seg].iov_base; 1770 desc.count = iov[seg].iov_len; 1771 if (desc.count == 0) 1772 continue; 1773 desc.error = 0; 1774 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1775 retval += desc.written; 1776 if (desc.error) { 1777 retval = retval ?: desc.error; 1778 break; 1779 } 1780 if (desc.count > 0) 1781 break; 1782 } 1783 return retval; 1784} 1785 1786static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1787{ 1788 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1789 1790 buf->f_type = TMPFS_MAGIC; 1791 buf->f_bsize = PAGE_CACHE_SIZE; 1792 buf->f_namelen = NAME_MAX; 1793 spin_lock(&sbinfo->stat_lock); 1794 if (sbinfo->max_blocks) { 1795 buf->f_blocks = sbinfo->max_blocks; 1796 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; 1797 } 1798 if (sbinfo->max_inodes) { 1799 buf->f_files = sbinfo->max_inodes; 1800 buf->f_ffree = sbinfo->free_inodes; 1801 } 1802 /* else leave those fields 0 like simple_statfs */ 1803 spin_unlock(&sbinfo->stat_lock); 1804 return 0; 1805} 1806 1807/* 1808 * File creation. Allocate an inode, and we're done.. 1809 */ 1810static int 1811shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1812{ 1813 struct inode *inode; 1814 int error = -ENOSPC; 1815 1816 inode = shmem_get_inode(dir->i_sb, mode, dev, VM_NORESERVE); 1817 if (inode) { 1818 error = security_inode_init_security(inode, dir, NULL, NULL, 1819 NULL); 1820 if (error) { 1821 if (error != -EOPNOTSUPP) { 1822 iput(inode); 1823 return error; 1824 } 1825 } 1826 error = shmem_acl_init(inode, dir); 1827 if (error) { 1828 iput(inode); 1829 return error; 1830 } 1831 if (dir->i_mode & S_ISGID) { 1832 inode->i_gid = dir->i_gid; 1833 if (S_ISDIR(mode)) 1834 inode->i_mode |= S_ISGID; 1835 } 1836 dir->i_size += BOGO_DIRENT_SIZE; 1837 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1838 d_instantiate(dentry, inode); 1839 dget(dentry); /* Extra count - pin the dentry in core */ 1840 } 1841 return error; 1842} 1843 1844static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1845{ 1846 int error; 1847 1848 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1849 return error; 1850 inc_nlink(dir); 1851 return 0; 1852} 1853 1854static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, 1855 struct nameidata *nd) 1856{ 1857 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1858} 1859 1860/* 1861 * Link a file.. 1862 */ 1863static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1864{ 1865 struct inode *inode = old_dentry->d_inode; 1866 int ret; 1867 1868 /* 1869 * No ordinary (disk based) filesystem counts links as inodes; 1870 * but each new link needs a new dentry, pinning lowmem, and 1871 * tmpfs dentries cannot be pruned until they are unlinked. 1872 */ 1873 ret = shmem_reserve_inode(inode->i_sb); 1874 if (ret) 1875 goto out; 1876 1877 dir->i_size += BOGO_DIRENT_SIZE; 1878 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1879 inc_nlink(inode); 1880 atomic_inc(&inode->i_count); /* New dentry reference */ 1881 dget(dentry); /* Extra pinning count for the created dentry */ 1882 d_instantiate(dentry, inode); 1883out: 1884 return ret; 1885} 1886 1887static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1888{ 1889 struct inode *inode = dentry->d_inode; 1890 1891 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 1892 shmem_free_inode(inode->i_sb); 1893 1894 dir->i_size -= BOGO_DIRENT_SIZE; 1895 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1896 drop_nlink(inode); 1897 dput(dentry); /* Undo the count from "create" - this does all the work */ 1898 return 0; 1899} 1900 1901static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1902{ 1903 if (!simple_empty(dentry)) 1904 return -ENOTEMPTY; 1905 1906 drop_nlink(dentry->d_inode); 1907 drop_nlink(dir); 1908 return shmem_unlink(dir, dentry); 1909} 1910 1911/* 1912 * The VFS layer already does all the dentry stuff for rename, 1913 * we just have to decrement the usage count for the target if 1914 * it exists so that the VFS layer correctly free's it when it 1915 * gets overwritten. 1916 */ 1917static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1918{ 1919 struct inode *inode = old_dentry->d_inode; 1920 int they_are_dirs = S_ISDIR(inode->i_mode); 1921 1922 if (!simple_empty(new_dentry)) 1923 return -ENOTEMPTY; 1924 1925 if (new_dentry->d_inode) { 1926 (void) shmem_unlink(new_dir, new_dentry); 1927 if (they_are_dirs) 1928 drop_nlink(old_dir); 1929 } else if (they_are_dirs) { 1930 drop_nlink(old_dir); 1931 inc_nlink(new_dir); 1932 } 1933 1934 old_dir->i_size -= BOGO_DIRENT_SIZE; 1935 new_dir->i_size += BOGO_DIRENT_SIZE; 1936 old_dir->i_ctime = old_dir->i_mtime = 1937 new_dir->i_ctime = new_dir->i_mtime = 1938 inode->i_ctime = CURRENT_TIME; 1939 return 0; 1940} 1941 1942static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1943{ 1944 int error; 1945 int len; 1946 struct inode *inode; 1947 struct page *page = NULL; 1948 char *kaddr; 1949 struct shmem_inode_info *info; 1950 1951 len = strlen(symname) + 1; 1952 if (len > PAGE_CACHE_SIZE) 1953 return -ENAMETOOLONG; 1954 1955 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 1956 if (!inode) 1957 return -ENOSPC; 1958 1959 error = security_inode_init_security(inode, dir, NULL, NULL, 1960 NULL); 1961 if (error) { 1962 if (error != -EOPNOTSUPP) { 1963 iput(inode); 1964 return error; 1965 } 1966 error = 0; 1967 } 1968 1969 info = SHMEM_I(inode); 1970 inode->i_size = len-1; 1971 if (len <= (char *)inode - (char *)info) { 1972 /* do it inline */ 1973 memcpy(info, symname, len); 1974 inode->i_op = &shmem_symlink_inline_operations; 1975 } else { 1976 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 1977 if (error) { 1978 iput(inode); 1979 return error; 1980 } 1981 inode->i_mapping->a_ops = &shmem_aops; 1982 inode->i_op = &shmem_symlink_inode_operations; 1983 kaddr = kmap_atomic(page, KM_USER0); 1984 memcpy(kaddr, symname, len); 1985 kunmap_atomic(kaddr, KM_USER0); 1986 set_page_dirty(page); 1987 unlock_page(page); 1988 page_cache_release(page); 1989 } 1990 if (dir->i_mode & S_ISGID) 1991 inode->i_gid = dir->i_gid; 1992 dir->i_size += BOGO_DIRENT_SIZE; 1993 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1994 d_instantiate(dentry, inode); 1995 dget(dentry); 1996 return 0; 1997} 1998 1999static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 2000{ 2001 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); 2002 return NULL; 2003} 2004 2005static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 2006{ 2007 struct page *page = NULL; 2008 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 2009 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); 2010 if (page) 2011 unlock_page(page); 2012 return page; 2013} 2014 2015static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 2016{ 2017 if (!IS_ERR(nd_get_link(nd))) { 2018 struct page *page = cookie; 2019 kunmap(page); 2020 mark_page_accessed(page); 2021 page_cache_release(page); 2022 } 2023} 2024 2025static const struct inode_operations shmem_symlink_inline_operations = { 2026 .readlink = generic_readlink, 2027 .follow_link = shmem_follow_link_inline, 2028}; 2029 2030static const struct inode_operations shmem_symlink_inode_operations = { 2031 .truncate = shmem_truncate, 2032 .readlink = generic_readlink, 2033 .follow_link = shmem_follow_link, 2034 .put_link = shmem_put_link, 2035}; 2036 2037#ifdef CONFIG_TMPFS_POSIX_ACL 2038/* 2039 * Superblocks without xattr inode operations will get security.* xattr 2040 * support from the VFS "for free". As soon as we have any other xattrs 2041 * like ACLs, we also need to implement the security.* handlers at 2042 * filesystem level, though. 2043 */ 2044 2045static size_t shmem_xattr_security_list(struct inode *inode, char *list, 2046 size_t list_len, const char *name, 2047 size_t name_len) 2048{ 2049 return security_inode_listsecurity(inode, list, list_len); 2050} 2051 2052static int shmem_xattr_security_get(struct inode *inode, const char *name, 2053 void *buffer, size_t size) 2054{ 2055 if (strcmp(name, "") == 0) 2056 return -EINVAL; 2057 return xattr_getsecurity(inode, name, buffer, size); 2058} 2059 2060static int shmem_xattr_security_set(struct inode *inode, const char *name, 2061 const void *value, size_t size, int flags) 2062{ 2063 if (strcmp(name, "") == 0) 2064 return -EINVAL; 2065 return security_inode_setsecurity(inode, name, value, size, flags); 2066} 2067 2068static struct xattr_handler shmem_xattr_security_handler = { 2069 .prefix = XATTR_SECURITY_PREFIX, 2070 .list = shmem_xattr_security_list, 2071 .get = shmem_xattr_security_get, 2072 .set = shmem_xattr_security_set, 2073}; 2074 2075static struct xattr_handler *shmem_xattr_handlers[] = { 2076 &shmem_xattr_acl_access_handler, 2077 &shmem_xattr_acl_default_handler, 2078 &shmem_xattr_security_handler, 2079 NULL 2080}; 2081#endif 2082 2083static struct dentry *shmem_get_parent(struct dentry *child) 2084{ 2085 return ERR_PTR(-ESTALE); 2086} 2087 2088static int shmem_match(struct inode *ino, void *vfh) 2089{ 2090 __u32 *fh = vfh; 2091 __u64 inum = fh[2]; 2092 inum = (inum << 32) | fh[1]; 2093 return ino->i_ino == inum && fh[0] == ino->i_generation; 2094} 2095 2096static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2097 struct fid *fid, int fh_len, int fh_type) 2098{ 2099 struct inode *inode; 2100 struct dentry *dentry = NULL; 2101 u64 inum = fid->raw[2]; 2102 inum = (inum << 32) | fid->raw[1]; 2103 2104 if (fh_len < 3) 2105 return NULL; 2106 2107 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2108 shmem_match, fid->raw); 2109 if (inode) { 2110 dentry = d_find_alias(inode); 2111 iput(inode); 2112 } 2113 2114 return dentry; 2115} 2116 2117static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 2118 int connectable) 2119{ 2120 struct inode *inode = dentry->d_inode; 2121 2122 if (*len < 3) 2123 return 255; 2124 2125 if (hlist_unhashed(&inode->i_hash)) { 2126 /* Unfortunately insert_inode_hash is not idempotent, 2127 * so as we hash inodes here rather than at creation 2128 * time, we need a lock to ensure we only try 2129 * to do it once 2130 */ 2131 static DEFINE_SPINLOCK(lock); 2132 spin_lock(&lock); 2133 if (hlist_unhashed(&inode->i_hash)) 2134 __insert_inode_hash(inode, 2135 inode->i_ino + inode->i_generation); 2136 spin_unlock(&lock); 2137 } 2138 2139 fh[0] = inode->i_generation; 2140 fh[1] = inode->i_ino; 2141 fh[2] = ((__u64)inode->i_ino) >> 32; 2142 2143 *len = 3; 2144 return 1; 2145} 2146 2147static const struct export_operations shmem_export_ops = { 2148 .get_parent = shmem_get_parent, 2149 .encode_fh = shmem_encode_fh, 2150 .fh_to_dentry = shmem_fh_to_dentry, 2151}; 2152 2153static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2154 bool remount) 2155{ 2156 char *this_char, *value, *rest; 2157 2158 while (options != NULL) { 2159 this_char = options; 2160 for (;;) { 2161 /* 2162 * NUL-terminate this option: unfortunately, 2163 * mount options form a comma-separated list, 2164 * but mpol's nodelist may also contain commas. 2165 */ 2166 options = strchr(options, ','); 2167 if (options == NULL) 2168 break; 2169 options++; 2170 if (!isdigit(*options)) { 2171 options[-1] = '\0'; 2172 break; 2173 } 2174 } 2175 if (!*this_char) 2176 continue; 2177 if ((value = strchr(this_char,'=')) != NULL) { 2178 *value++ = 0; 2179 } else { 2180 printk(KERN_ERR 2181 "tmpfs: No value for mount option '%s'\n", 2182 this_char); 2183 return 1; 2184 } 2185 2186 if (!strcmp(this_char,"size")) { 2187 unsigned long long size; 2188 size = memparse(value,&rest); 2189 if (*rest == '%') { 2190 size <<= PAGE_SHIFT; 2191 size *= totalram_pages; 2192 do_div(size, 100); 2193 rest++; 2194 } 2195 if (*rest) 2196 goto bad_val; 2197 sbinfo->max_blocks = 2198 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2199 } else if (!strcmp(this_char,"nr_blocks")) { 2200 sbinfo->max_blocks = memparse(value, &rest); 2201 if (*rest) 2202 goto bad_val; 2203 } else if (!strcmp(this_char,"nr_inodes")) { 2204 sbinfo->max_inodes = memparse(value, &rest); 2205 if (*rest) 2206 goto bad_val; 2207 } else if (!strcmp(this_char,"mode")) { 2208 if (remount) 2209 continue; 2210 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2211 if (*rest) 2212 goto bad_val; 2213 } else if (!strcmp(this_char,"uid")) { 2214 if (remount) 2215 continue; 2216 sbinfo->uid = simple_strtoul(value, &rest, 0); 2217 if (*rest) 2218 goto bad_val; 2219 } else if (!strcmp(this_char,"gid")) { 2220 if (remount) 2221 continue; 2222 sbinfo->gid = simple_strtoul(value, &rest, 0); 2223 if (*rest) 2224 goto bad_val; 2225 } else if (!strcmp(this_char,"mpol")) { 2226 if (mpol_parse_str(value, &sbinfo->mpol, 1)) 2227 goto bad_val; 2228 } else { 2229 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2230 this_char); 2231 return 1; 2232 } 2233 } 2234 return 0; 2235 2236bad_val: 2237 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2238 value, this_char); 2239 return 1; 2240 2241} 2242 2243static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2244{ 2245 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2246 struct shmem_sb_info config = *sbinfo; 2247 unsigned long blocks; 2248 unsigned long inodes; 2249 int error = -EINVAL; 2250 2251 if (shmem_parse_options(data, &config, true)) 2252 return error; 2253 2254 spin_lock(&sbinfo->stat_lock); 2255 blocks = sbinfo->max_blocks - sbinfo->free_blocks; 2256 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2257 if (config.max_blocks < blocks) 2258 goto out; 2259 if (config.max_inodes < inodes) 2260 goto out; 2261 /* 2262 * Those tests also disallow limited->unlimited while any are in 2263 * use, so i_blocks will always be zero when max_blocks is zero; 2264 * but we must separately disallow unlimited->limited, because 2265 * in that case we have no record of how much is already in use. 2266 */ 2267 if (config.max_blocks && !sbinfo->max_blocks) 2268 goto out; 2269 if (config.max_inodes && !sbinfo->max_inodes) 2270 goto out; 2271 2272 error = 0; 2273 sbinfo->max_blocks = config.max_blocks; 2274 sbinfo->free_blocks = config.max_blocks - blocks; 2275 sbinfo->max_inodes = config.max_inodes; 2276 sbinfo->free_inodes = config.max_inodes - inodes; 2277 2278 mpol_put(sbinfo->mpol); 2279 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2280out: 2281 spin_unlock(&sbinfo->stat_lock); 2282 return error; 2283} 2284 2285static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) 2286{ 2287 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); 2288 2289 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2290 seq_printf(seq, ",size=%luk", 2291 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2292 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2293 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2294 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2295 seq_printf(seq, ",mode=%03o", sbinfo->mode); 2296 if (sbinfo->uid != 0) 2297 seq_printf(seq, ",uid=%u", sbinfo->uid); 2298 if (sbinfo->gid != 0) 2299 seq_printf(seq, ",gid=%u", sbinfo->gid); 2300 shmem_show_mpol(seq, sbinfo->mpol); 2301 return 0; 2302} 2303#endif /* CONFIG_TMPFS */ 2304 2305static void shmem_put_super(struct super_block *sb) 2306{ 2307 kfree(sb->s_fs_info); 2308 sb->s_fs_info = NULL; 2309} 2310 2311int shmem_fill_super(struct super_block *sb, void *data, int silent) 2312{ 2313 struct inode *inode; 2314 struct dentry *root; 2315 struct shmem_sb_info *sbinfo; 2316 int err = -ENOMEM; 2317 2318 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2319 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 2320 L1_CACHE_BYTES), GFP_KERNEL); 2321 if (!sbinfo) 2322 return -ENOMEM; 2323 2324 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2325 sbinfo->uid = current_fsuid(); 2326 sbinfo->gid = current_fsgid(); 2327 sb->s_fs_info = sbinfo; 2328 2329#ifdef CONFIG_TMPFS 2330 /* 2331 * Per default we only allow half of the physical ram per 2332 * tmpfs instance, limiting inodes to one per page of lowmem; 2333 * but the internal instance is left unlimited. 2334 */ 2335 if (!(sb->s_flags & MS_NOUSER)) { 2336 sbinfo->max_blocks = shmem_default_max_blocks(); 2337 sbinfo->max_inodes = shmem_default_max_inodes(); 2338 if (shmem_parse_options(data, sbinfo, false)) { 2339 err = -EINVAL; 2340 goto failed; 2341 } 2342 } 2343 sb->s_export_op = &shmem_export_ops; 2344#else 2345 sb->s_flags |= MS_NOUSER; 2346#endif 2347 2348 spin_lock_init(&sbinfo->stat_lock); 2349 sbinfo->free_blocks = sbinfo->max_blocks; 2350 sbinfo->free_inodes = sbinfo->max_inodes; 2351 2352 sb->s_maxbytes = SHMEM_MAX_BYTES; 2353 sb->s_blocksize = PAGE_CACHE_SIZE; 2354 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2355 sb->s_magic = TMPFS_MAGIC; 2356 sb->s_op = &shmem_ops; 2357 sb->s_time_gran = 1; 2358#ifdef CONFIG_TMPFS_POSIX_ACL 2359 sb->s_xattr = shmem_xattr_handlers; 2360 sb->s_flags |= MS_POSIXACL; 2361#endif 2362 2363 inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 2364 if (!inode) 2365 goto failed; 2366 inode->i_uid = sbinfo->uid; 2367 inode->i_gid = sbinfo->gid; 2368 root = d_alloc_root(inode); 2369 if (!root) 2370 goto failed_iput; 2371 sb->s_root = root; 2372 return 0; 2373 2374failed_iput: 2375 iput(inode); 2376failed: 2377 shmem_put_super(sb); 2378 return err; 2379} 2380 2381static struct kmem_cache *shmem_inode_cachep; 2382 2383static struct inode *shmem_alloc_inode(struct super_block *sb) 2384{ 2385 struct shmem_inode_info *p; 2386 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2387 if (!p) 2388 return NULL; 2389 return &p->vfs_inode; 2390} 2391 2392static void shmem_destroy_inode(struct inode *inode) 2393{ 2394 if ((inode->i_mode & S_IFMT) == S_IFREG) { 2395 /* only struct inode is valid if it's an inline symlink */ 2396 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2397 } 2398 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2399} 2400 2401static void init_once(void *foo) 2402{ 2403 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2404 2405 inode_init_once(&p->vfs_inode); 2406} 2407 2408static int init_inodecache(void) 2409{ 2410 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2411 sizeof(struct shmem_inode_info), 2412 0, SLAB_PANIC, init_once); 2413 return 0; 2414} 2415 2416static void destroy_inodecache(void) 2417{ 2418 kmem_cache_destroy(shmem_inode_cachep); 2419} 2420 2421static const struct address_space_operations shmem_aops = { 2422 .writepage = shmem_writepage, 2423 .set_page_dirty = __set_page_dirty_no_writeback, 2424#ifdef CONFIG_TMPFS 2425 .readpage = shmem_readpage, 2426 .write_begin = shmem_write_begin, 2427 .write_end = shmem_write_end, 2428#endif 2429 .migratepage = migrate_page, 2430 .error_remove_page = generic_error_remove_page, 2431}; 2432 2433static const struct file_operations shmem_file_operations = { 2434 .mmap = shmem_mmap, 2435#ifdef CONFIG_TMPFS 2436 .llseek = generic_file_llseek, 2437 .read = do_sync_read, 2438 .write = do_sync_write, 2439 .aio_read = shmem_file_aio_read, 2440 .aio_write = generic_file_aio_write, 2441 .fsync = simple_sync_file, 2442 .splice_read = generic_file_splice_read, 2443 .splice_write = generic_file_splice_write, 2444#endif 2445}; 2446 2447static const struct inode_operations shmem_inode_operations = { 2448 .truncate = shmem_truncate, 2449 .setattr = shmem_notify_change, 2450 .truncate_range = shmem_truncate_range, 2451#ifdef CONFIG_TMPFS_POSIX_ACL 2452 .setxattr = generic_setxattr, 2453 .getxattr = generic_getxattr, 2454 .listxattr = generic_listxattr, 2455 .removexattr = generic_removexattr, 2456 .check_acl = shmem_check_acl, 2457#endif 2458 2459}; 2460 2461static const struct inode_operations shmem_dir_inode_operations = { 2462#ifdef CONFIG_TMPFS 2463 .create = shmem_create, 2464 .lookup = simple_lookup, 2465 .link = shmem_link, 2466 .unlink = shmem_unlink, 2467 .symlink = shmem_symlink, 2468 .mkdir = shmem_mkdir, 2469 .rmdir = shmem_rmdir, 2470 .mknod = shmem_mknod, 2471 .rename = shmem_rename, 2472#endif 2473#ifdef CONFIG_TMPFS_POSIX_ACL 2474 .setattr = shmem_notify_change, 2475 .setxattr = generic_setxattr, 2476 .getxattr = generic_getxattr, 2477 .listxattr = generic_listxattr, 2478 .removexattr = generic_removexattr, 2479 .check_acl = shmem_check_acl, 2480#endif 2481}; 2482 2483static const struct inode_operations shmem_special_inode_operations = { 2484#ifdef CONFIG_TMPFS_POSIX_ACL 2485 .setattr = shmem_notify_change, 2486 .setxattr = generic_setxattr, 2487 .getxattr = generic_getxattr, 2488 .listxattr = generic_listxattr, 2489 .removexattr = generic_removexattr, 2490 .check_acl = shmem_check_acl, 2491#endif 2492}; 2493 2494static const struct super_operations shmem_ops = { 2495 .alloc_inode = shmem_alloc_inode, 2496 .destroy_inode = shmem_destroy_inode, 2497#ifdef CONFIG_TMPFS 2498 .statfs = shmem_statfs, 2499 .remount_fs = shmem_remount_fs, 2500 .show_options = shmem_show_options, 2501#endif 2502 .delete_inode = shmem_delete_inode, 2503 .drop_inode = generic_delete_inode, 2504 .put_super = shmem_put_super, 2505}; 2506 2507static const struct vm_operations_struct shmem_vm_ops = { 2508 .fault = shmem_fault, 2509#ifdef CONFIG_NUMA 2510 .set_policy = shmem_set_policy, 2511 .get_policy = shmem_get_policy, 2512#endif 2513}; 2514 2515 2516static int shmem_get_sb(struct file_system_type *fs_type, 2517 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 2518{ 2519 return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); 2520} 2521 2522static struct file_system_type tmpfs_fs_type = { 2523 .owner = THIS_MODULE, 2524 .name = "tmpfs", 2525 .get_sb = shmem_get_sb, 2526 .kill_sb = kill_litter_super, 2527}; 2528 2529int __init init_tmpfs(void) 2530{ 2531 int error; 2532 2533 error = bdi_init(&shmem_backing_dev_info); 2534 if (error) 2535 goto out4; 2536 2537 error = init_inodecache(); 2538 if (error) 2539 goto out3; 2540 2541 error = register_filesystem(&tmpfs_fs_type); 2542 if (error) { 2543 printk(KERN_ERR "Could not register tmpfs\n"); 2544 goto out2; 2545 } 2546 2547 shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, 2548 tmpfs_fs_type.name, NULL); 2549 if (IS_ERR(shm_mnt)) { 2550 error = PTR_ERR(shm_mnt); 2551 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2552 goto out1; 2553 } 2554 return 0; 2555 2556out1: 2557 unregister_filesystem(&tmpfs_fs_type); 2558out2: 2559 destroy_inodecache(); 2560out3: 2561 bdi_destroy(&shmem_backing_dev_info); 2562out4: 2563 shm_mnt = ERR_PTR(error); 2564 return error; 2565} 2566 2567#else /* !CONFIG_SHMEM */ 2568 2569/* 2570 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2571 * 2572 * This is intended for small system where the benefits of the full 2573 * shmem code (swap-backed and resource-limited) are outweighed by 2574 * their complexity. On systems without swap this code should be 2575 * effectively equivalent, but much lighter weight. 2576 */ 2577 2578#include <linux/ramfs.h> 2579 2580static struct file_system_type tmpfs_fs_type = { 2581 .name = "tmpfs", 2582 .get_sb = ramfs_get_sb, 2583 .kill_sb = kill_litter_super, 2584}; 2585 2586int __init init_tmpfs(void) 2587{ 2588 BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); 2589 2590 shm_mnt = kern_mount(&tmpfs_fs_type); 2591 BUG_ON(IS_ERR(shm_mnt)); 2592 2593 return 0; 2594} 2595 2596int shmem_unuse(swp_entry_t entry, struct page *page) 2597{ 2598 return 0; 2599} 2600 2601int shmem_lock(struct file *file, int lock, struct user_struct *user) 2602{ 2603 return 0; 2604} 2605 2606#define shmem_vm_ops generic_file_vm_ops 2607#define shmem_file_operations ramfs_file_operations 2608#define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev) 2609#define shmem_acct_size(flags, size) 0 2610#define shmem_unacct_size(flags, size) do {} while (0) 2611#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE 2612 2613#endif /* CONFIG_SHMEM */ 2614 2615/* common code */ 2616 2617/** 2618 * shmem_file_setup - get an unlinked file living in tmpfs 2619 * @name: name for dentry (to be seen in /proc/<pid>/maps 2620 * @size: size to be set for the file 2621 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 2622 */ 2623struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 2624{ 2625 int error; 2626 struct file *file; 2627 struct inode *inode; 2628 struct path path; 2629 struct dentry *root; 2630 struct qstr this; 2631 2632 if (IS_ERR(shm_mnt)) 2633 return (void *)shm_mnt; 2634 2635 if (size < 0 || size > SHMEM_MAX_BYTES) 2636 return ERR_PTR(-EINVAL); 2637 2638 if (shmem_acct_size(flags, size)) 2639 return ERR_PTR(-ENOMEM); 2640 2641 error = -ENOMEM; 2642 this.name = name; 2643 this.len = strlen(name); 2644 this.hash = 0; /* will go */ 2645 root = shm_mnt->mnt_root; 2646 path.dentry = d_alloc(root, &this); 2647 if (!path.dentry) 2648 goto put_memory; 2649 path.mnt = mntget(shm_mnt); 2650 2651 error = -ENOSPC; 2652 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0, flags); 2653 if (!inode) 2654 goto put_dentry; 2655 2656 d_instantiate(path.dentry, inode); 2657 inode->i_size = size; 2658 inode->i_nlink = 0; /* It is unlinked */ 2659#ifndef CONFIG_MMU 2660 error = ramfs_nommu_expand_for_mapping(inode, size); 2661 if (error) 2662 goto put_dentry; 2663#endif 2664 2665 error = -ENFILE; 2666 file = alloc_file(&path, FMODE_WRITE | FMODE_READ, 2667 &shmem_file_operations); 2668 if (!file) 2669 goto put_dentry; 2670 2671 return file; 2672 2673put_dentry: 2674 path_put(&path); 2675put_memory: 2676 shmem_unacct_size(flags, size); 2677 return ERR_PTR(error); 2678} 2679EXPORT_SYMBOL_GPL(shmem_file_setup); 2680 2681/** 2682 * shmem_zero_setup - setup a shared anonymous mapping 2683 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2684 */ 2685int shmem_zero_setup(struct vm_area_struct *vma) 2686{ 2687 struct file *file; 2688 loff_t size = vma->vm_end - vma->vm_start; 2689 2690 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2691 if (IS_ERR(file)) 2692 return PTR_ERR(file); 2693 2694 if (vma->vm_file) 2695 fput(vma->vm_file); 2696 vma->vm_file = file; 2697 vma->vm_ops = &shmem_vm_ops; 2698 return 0; 2699} 2700