shmem.c revision b5a84319a4343a0db753436fd8147e61eaafa7ea
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2005 Hugh Dickins. 10 * Copyright (C) 2002-2005 VERITAS Software Corporation. 11 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * 13 * Extended attribute support for tmpfs: 14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 16 * 17 * tiny-shmem: 18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 19 * 20 * This file is released under the GPL. 21 */ 22 23#include <linux/fs.h> 24#include <linux/init.h> 25#include <linux/vfs.h> 26#include <linux/mount.h> 27#include <linux/file.h> 28#include <linux/mm.h> 29#include <linux/module.h> 30#include <linux/swap.h> 31 32static struct vfsmount *shm_mnt; 33 34#ifdef CONFIG_SHMEM 35/* 36 * This virtual memory filesystem is heavily based on the ramfs. It 37 * extends ramfs by the ability to use swap and honor resource limits 38 * which makes it a completely usable filesystem. 39 */ 40 41#include <linux/xattr.h> 42#include <linux/exportfs.h> 43#include <linux/generic_acl.h> 44#include <linux/mman.h> 45#include <linux/pagemap.h> 46#include <linux/string.h> 47#include <linux/slab.h> 48#include <linux/backing-dev.h> 49#include <linux/shmem_fs.h> 50#include <linux/writeback.h> 51#include <linux/vfs.h> 52#include <linux/blkdev.h> 53#include <linux/security.h> 54#include <linux/swapops.h> 55#include <linux/mempolicy.h> 56#include <linux/namei.h> 57#include <linux/ctype.h> 58#include <linux/migrate.h> 59#include <linux/highmem.h> 60#include <linux/seq_file.h> 61#include <linux/magic.h> 62 63#include <asm/uaccess.h> 64#include <asm/div64.h> 65#include <asm/pgtable.h> 66 67#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) 68#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) 69#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 70 71#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) 72#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) 73 74#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 75 76/* info->flags needs VM_flags to handle pagein/truncate races efficiently */ 77#define SHMEM_PAGEIN VM_READ 78#define SHMEM_TRUNCATE VM_WRITE 79 80/* Definition to limit shmem_truncate's steps between cond_rescheds */ 81#define LATENCY_LIMIT 64 82 83/* Pretend that each entry is of this size in directory's i_size */ 84#define BOGO_DIRENT_SIZE 20 85 86/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 87enum sgp_type { 88 SGP_READ, /* don't exceed i_size, don't allocate page */ 89 SGP_CACHE, /* don't exceed i_size, may allocate page */ 90 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 91 SGP_WRITE, /* may exceed i_size, may allocate page */ 92}; 93 94#ifdef CONFIG_TMPFS 95static unsigned long shmem_default_max_blocks(void) 96{ 97 return totalram_pages / 2; 98} 99 100static unsigned long shmem_default_max_inodes(void) 101{ 102 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 103} 104#endif 105 106static int shmem_getpage(struct inode *inode, unsigned long idx, 107 struct page **pagep, enum sgp_type sgp, int *type); 108 109static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) 110{ 111 /* 112 * The above definition of ENTRIES_PER_PAGE, and the use of 113 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 114 * might be reconsidered if it ever diverges from PAGE_SIZE. 115 * 116 * Mobility flags are masked out as swap vectors cannot move 117 */ 118 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, 119 PAGE_CACHE_SHIFT-PAGE_SHIFT); 120} 121 122static inline void shmem_dir_free(struct page *page) 123{ 124 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); 125} 126 127static struct page **shmem_dir_map(struct page *page) 128{ 129 return (struct page **)kmap_atomic(page, KM_USER0); 130} 131 132static inline void shmem_dir_unmap(struct page **dir) 133{ 134 kunmap_atomic(dir, KM_USER0); 135} 136 137static swp_entry_t *shmem_swp_map(struct page *page) 138{ 139 return (swp_entry_t *)kmap_atomic(page, KM_USER1); 140} 141 142static inline void shmem_swp_balance_unmap(void) 143{ 144 /* 145 * When passing a pointer to an i_direct entry, to code which 146 * also handles indirect entries and so will shmem_swp_unmap, 147 * we must arrange for the preempt count to remain in balance. 148 * What kmap_atomic of a lowmem page does depends on config 149 * and architecture, so pretend to kmap_atomic some lowmem page. 150 */ 151 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); 152} 153 154static inline void shmem_swp_unmap(swp_entry_t *entry) 155{ 156 kunmap_atomic(entry, KM_USER1); 157} 158 159static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 160{ 161 return sb->s_fs_info; 162} 163 164/* 165 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 166 * for shared memory and for shared anonymous (/dev/zero) mappings 167 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 168 * consistent with the pre-accounting of private mappings ... 169 */ 170static inline int shmem_acct_size(unsigned long flags, loff_t size) 171{ 172 return (flags & VM_ACCOUNT) ? 173 security_vm_enough_memory_kern(VM_ACCT(size)) : 0; 174} 175 176static inline void shmem_unacct_size(unsigned long flags, loff_t size) 177{ 178 if (flags & VM_ACCOUNT) 179 vm_unacct_memory(VM_ACCT(size)); 180} 181 182/* 183 * ... whereas tmpfs objects are accounted incrementally as 184 * pages are allocated, in order to allow huge sparse files. 185 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 186 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 187 */ 188static inline int shmem_acct_block(unsigned long flags) 189{ 190 return (flags & VM_ACCOUNT) ? 191 0 : security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)); 192} 193 194static inline void shmem_unacct_blocks(unsigned long flags, long pages) 195{ 196 if (!(flags & VM_ACCOUNT)) 197 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 198} 199 200static const struct super_operations shmem_ops; 201static const struct address_space_operations shmem_aops; 202static const struct file_operations shmem_file_operations; 203static const struct inode_operations shmem_inode_operations; 204static const struct inode_operations shmem_dir_inode_operations; 205static const struct inode_operations shmem_special_inode_operations; 206static struct vm_operations_struct shmem_vm_ops; 207 208static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 209 .ra_pages = 0, /* No readahead */ 210 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 211 .unplug_io_fn = default_unplug_io_fn, 212}; 213 214static LIST_HEAD(shmem_swaplist); 215static DEFINE_MUTEX(shmem_swaplist_mutex); 216 217static void shmem_free_blocks(struct inode *inode, long pages) 218{ 219 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 220 if (sbinfo->max_blocks) { 221 spin_lock(&sbinfo->stat_lock); 222 sbinfo->free_blocks += pages; 223 inode->i_blocks -= pages*BLOCKS_PER_PAGE; 224 spin_unlock(&sbinfo->stat_lock); 225 } 226} 227 228static int shmem_reserve_inode(struct super_block *sb) 229{ 230 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 231 if (sbinfo->max_inodes) { 232 spin_lock(&sbinfo->stat_lock); 233 if (!sbinfo->free_inodes) { 234 spin_unlock(&sbinfo->stat_lock); 235 return -ENOSPC; 236 } 237 sbinfo->free_inodes--; 238 spin_unlock(&sbinfo->stat_lock); 239 } 240 return 0; 241} 242 243static void shmem_free_inode(struct super_block *sb) 244{ 245 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 246 if (sbinfo->max_inodes) { 247 spin_lock(&sbinfo->stat_lock); 248 sbinfo->free_inodes++; 249 spin_unlock(&sbinfo->stat_lock); 250 } 251} 252 253/** 254 * shmem_recalc_inode - recalculate the size of an inode 255 * @inode: inode to recalc 256 * 257 * We have to calculate the free blocks since the mm can drop 258 * undirtied hole pages behind our back. 259 * 260 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 261 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 262 * 263 * It has to be called with the spinlock held. 264 */ 265static void shmem_recalc_inode(struct inode *inode) 266{ 267 struct shmem_inode_info *info = SHMEM_I(inode); 268 long freed; 269 270 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 271 if (freed > 0) { 272 info->alloced -= freed; 273 shmem_unacct_blocks(info->flags, freed); 274 shmem_free_blocks(inode, freed); 275 } 276} 277 278/** 279 * shmem_swp_entry - find the swap vector position in the info structure 280 * @info: info structure for the inode 281 * @index: index of the page to find 282 * @page: optional page to add to the structure. Has to be preset to 283 * all zeros 284 * 285 * If there is no space allocated yet it will return NULL when 286 * page is NULL, else it will use the page for the needed block, 287 * setting it to NULL on return to indicate that it has been used. 288 * 289 * The swap vector is organized the following way: 290 * 291 * There are SHMEM_NR_DIRECT entries directly stored in the 292 * shmem_inode_info structure. So small files do not need an addional 293 * allocation. 294 * 295 * For pages with index > SHMEM_NR_DIRECT there is the pointer 296 * i_indirect which points to a page which holds in the first half 297 * doubly indirect blocks, in the second half triple indirect blocks: 298 * 299 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the 300 * following layout (for SHMEM_NR_DIRECT == 16): 301 * 302 * i_indirect -> dir --> 16-19 303 * | +-> 20-23 304 * | 305 * +-->dir2 --> 24-27 306 * | +-> 28-31 307 * | +-> 32-35 308 * | +-> 36-39 309 * | 310 * +-->dir3 --> 40-43 311 * +-> 44-47 312 * +-> 48-51 313 * +-> 52-55 314 */ 315static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) 316{ 317 unsigned long offset; 318 struct page **dir; 319 struct page *subdir; 320 321 if (index < SHMEM_NR_DIRECT) { 322 shmem_swp_balance_unmap(); 323 return info->i_direct+index; 324 } 325 if (!info->i_indirect) { 326 if (page) { 327 info->i_indirect = *page; 328 *page = NULL; 329 } 330 return NULL; /* need another page */ 331 } 332 333 index -= SHMEM_NR_DIRECT; 334 offset = index % ENTRIES_PER_PAGE; 335 index /= ENTRIES_PER_PAGE; 336 dir = shmem_dir_map(info->i_indirect); 337 338 if (index >= ENTRIES_PER_PAGE/2) { 339 index -= ENTRIES_PER_PAGE/2; 340 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; 341 index %= ENTRIES_PER_PAGE; 342 subdir = *dir; 343 if (!subdir) { 344 if (page) { 345 *dir = *page; 346 *page = NULL; 347 } 348 shmem_dir_unmap(dir); 349 return NULL; /* need another page */ 350 } 351 shmem_dir_unmap(dir); 352 dir = shmem_dir_map(subdir); 353 } 354 355 dir += index; 356 subdir = *dir; 357 if (!subdir) { 358 if (!page || !(subdir = *page)) { 359 shmem_dir_unmap(dir); 360 return NULL; /* need a page */ 361 } 362 *dir = subdir; 363 *page = NULL; 364 } 365 shmem_dir_unmap(dir); 366 return shmem_swp_map(subdir) + offset; 367} 368 369static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) 370{ 371 long incdec = value? 1: -1; 372 373 entry->val = value; 374 info->swapped += incdec; 375 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { 376 struct page *page = kmap_atomic_to_page(entry); 377 set_page_private(page, page_private(page) + incdec); 378 } 379} 380 381/** 382 * shmem_swp_alloc - get the position of the swap entry for the page. 383 * @info: info structure for the inode 384 * @index: index of the page to find 385 * @sgp: check and recheck i_size? skip allocation? 386 * 387 * If the entry does not exist, allocate it. 388 */ 389static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) 390{ 391 struct inode *inode = &info->vfs_inode; 392 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 393 struct page *page = NULL; 394 swp_entry_t *entry; 395 396 if (sgp != SGP_WRITE && 397 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 398 return ERR_PTR(-EINVAL); 399 400 while (!(entry = shmem_swp_entry(info, index, &page))) { 401 if (sgp == SGP_READ) 402 return shmem_swp_map(ZERO_PAGE(0)); 403 /* 404 * Test free_blocks against 1 not 0, since we have 1 data 405 * page (and perhaps indirect index pages) yet to allocate: 406 * a waste to allocate index if we cannot allocate data. 407 */ 408 if (sbinfo->max_blocks) { 409 spin_lock(&sbinfo->stat_lock); 410 if (sbinfo->free_blocks <= 1) { 411 spin_unlock(&sbinfo->stat_lock); 412 return ERR_PTR(-ENOSPC); 413 } 414 sbinfo->free_blocks--; 415 inode->i_blocks += BLOCKS_PER_PAGE; 416 spin_unlock(&sbinfo->stat_lock); 417 } 418 419 spin_unlock(&info->lock); 420 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); 421 if (page) 422 set_page_private(page, 0); 423 spin_lock(&info->lock); 424 425 if (!page) { 426 shmem_free_blocks(inode, 1); 427 return ERR_PTR(-ENOMEM); 428 } 429 if (sgp != SGP_WRITE && 430 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 431 entry = ERR_PTR(-EINVAL); 432 break; 433 } 434 if (info->next_index <= index) 435 info->next_index = index + 1; 436 } 437 if (page) { 438 /* another task gave its page, or truncated the file */ 439 shmem_free_blocks(inode, 1); 440 shmem_dir_free(page); 441 } 442 if (info->next_index <= index && !IS_ERR(entry)) 443 info->next_index = index + 1; 444 return entry; 445} 446 447/** 448 * shmem_free_swp - free some swap entries in a directory 449 * @dir: pointer to the directory 450 * @edir: pointer after last entry of the directory 451 * @punch_lock: pointer to spinlock when needed for the holepunch case 452 */ 453static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, 454 spinlock_t *punch_lock) 455{ 456 spinlock_t *punch_unlock = NULL; 457 swp_entry_t *ptr; 458 int freed = 0; 459 460 for (ptr = dir; ptr < edir; ptr++) { 461 if (ptr->val) { 462 if (unlikely(punch_lock)) { 463 punch_unlock = punch_lock; 464 punch_lock = NULL; 465 spin_lock(punch_unlock); 466 if (!ptr->val) 467 continue; 468 } 469 free_swap_and_cache(*ptr); 470 *ptr = (swp_entry_t){0}; 471 freed++; 472 } 473 } 474 if (punch_unlock) 475 spin_unlock(punch_unlock); 476 return freed; 477} 478 479static int shmem_map_and_free_swp(struct page *subdir, int offset, 480 int limit, struct page ***dir, spinlock_t *punch_lock) 481{ 482 swp_entry_t *ptr; 483 int freed = 0; 484 485 ptr = shmem_swp_map(subdir); 486 for (; offset < limit; offset += LATENCY_LIMIT) { 487 int size = limit - offset; 488 if (size > LATENCY_LIMIT) 489 size = LATENCY_LIMIT; 490 freed += shmem_free_swp(ptr+offset, ptr+offset+size, 491 punch_lock); 492 if (need_resched()) { 493 shmem_swp_unmap(ptr); 494 if (*dir) { 495 shmem_dir_unmap(*dir); 496 *dir = NULL; 497 } 498 cond_resched(); 499 ptr = shmem_swp_map(subdir); 500 } 501 } 502 shmem_swp_unmap(ptr); 503 return freed; 504} 505 506static void shmem_free_pages(struct list_head *next) 507{ 508 struct page *page; 509 int freed = 0; 510 511 do { 512 page = container_of(next, struct page, lru); 513 next = next->next; 514 shmem_dir_free(page); 515 freed++; 516 if (freed >= LATENCY_LIMIT) { 517 cond_resched(); 518 freed = 0; 519 } 520 } while (next); 521} 522 523static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) 524{ 525 struct shmem_inode_info *info = SHMEM_I(inode); 526 unsigned long idx; 527 unsigned long size; 528 unsigned long limit; 529 unsigned long stage; 530 unsigned long diroff; 531 struct page **dir; 532 struct page *topdir; 533 struct page *middir; 534 struct page *subdir; 535 swp_entry_t *ptr; 536 LIST_HEAD(pages_to_free); 537 long nr_pages_to_free = 0; 538 long nr_swaps_freed = 0; 539 int offset; 540 int freed; 541 int punch_hole; 542 spinlock_t *needs_lock; 543 spinlock_t *punch_lock; 544 unsigned long upper_limit; 545 546 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 547 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 548 if (idx >= info->next_index) 549 return; 550 551 spin_lock(&info->lock); 552 info->flags |= SHMEM_TRUNCATE; 553 if (likely(end == (loff_t) -1)) { 554 limit = info->next_index; 555 upper_limit = SHMEM_MAX_INDEX; 556 info->next_index = idx; 557 needs_lock = NULL; 558 punch_hole = 0; 559 } else { 560 if (end + 1 >= inode->i_size) { /* we may free a little more */ 561 limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> 562 PAGE_CACHE_SHIFT; 563 upper_limit = SHMEM_MAX_INDEX; 564 } else { 565 limit = (end + 1) >> PAGE_CACHE_SHIFT; 566 upper_limit = limit; 567 } 568 needs_lock = &info->lock; 569 punch_hole = 1; 570 } 571 572 topdir = info->i_indirect; 573 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { 574 info->i_indirect = NULL; 575 nr_pages_to_free++; 576 list_add(&topdir->lru, &pages_to_free); 577 } 578 spin_unlock(&info->lock); 579 580 if (info->swapped && idx < SHMEM_NR_DIRECT) { 581 ptr = info->i_direct; 582 size = limit; 583 if (size > SHMEM_NR_DIRECT) 584 size = SHMEM_NR_DIRECT; 585 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); 586 } 587 588 /* 589 * If there are no indirect blocks or we are punching a hole 590 * below indirect blocks, nothing to be done. 591 */ 592 if (!topdir || limit <= SHMEM_NR_DIRECT) 593 goto done2; 594 595 /* 596 * The truncation case has already dropped info->lock, and we're safe 597 * because i_size and next_index have already been lowered, preventing 598 * access beyond. But in the punch_hole case, we still need to take 599 * the lock when updating the swap directory, because there might be 600 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or 601 * shmem_writepage. However, whenever we find we can remove a whole 602 * directory page (not at the misaligned start or end of the range), 603 * we first NULLify its pointer in the level above, and then have no 604 * need to take the lock when updating its contents: needs_lock and 605 * punch_lock (either pointing to info->lock or NULL) manage this. 606 */ 607 608 upper_limit -= SHMEM_NR_DIRECT; 609 limit -= SHMEM_NR_DIRECT; 610 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; 611 offset = idx % ENTRIES_PER_PAGE; 612 idx -= offset; 613 614 dir = shmem_dir_map(topdir); 615 stage = ENTRIES_PER_PAGEPAGE/2; 616 if (idx < ENTRIES_PER_PAGEPAGE/2) { 617 middir = topdir; 618 diroff = idx/ENTRIES_PER_PAGE; 619 } else { 620 dir += ENTRIES_PER_PAGE/2; 621 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; 622 while (stage <= idx) 623 stage += ENTRIES_PER_PAGEPAGE; 624 middir = *dir; 625 if (*dir) { 626 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % 627 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; 628 if (!diroff && !offset && upper_limit >= stage) { 629 if (needs_lock) { 630 spin_lock(needs_lock); 631 *dir = NULL; 632 spin_unlock(needs_lock); 633 needs_lock = NULL; 634 } else 635 *dir = NULL; 636 nr_pages_to_free++; 637 list_add(&middir->lru, &pages_to_free); 638 } 639 shmem_dir_unmap(dir); 640 dir = shmem_dir_map(middir); 641 } else { 642 diroff = 0; 643 offset = 0; 644 idx = stage; 645 } 646 } 647 648 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { 649 if (unlikely(idx == stage)) { 650 shmem_dir_unmap(dir); 651 dir = shmem_dir_map(topdir) + 652 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 653 while (!*dir) { 654 dir++; 655 idx += ENTRIES_PER_PAGEPAGE; 656 if (idx >= limit) 657 goto done1; 658 } 659 stage = idx + ENTRIES_PER_PAGEPAGE; 660 middir = *dir; 661 if (punch_hole) 662 needs_lock = &info->lock; 663 if (upper_limit >= stage) { 664 if (needs_lock) { 665 spin_lock(needs_lock); 666 *dir = NULL; 667 spin_unlock(needs_lock); 668 needs_lock = NULL; 669 } else 670 *dir = NULL; 671 nr_pages_to_free++; 672 list_add(&middir->lru, &pages_to_free); 673 } 674 shmem_dir_unmap(dir); 675 cond_resched(); 676 dir = shmem_dir_map(middir); 677 diroff = 0; 678 } 679 punch_lock = needs_lock; 680 subdir = dir[diroff]; 681 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { 682 if (needs_lock) { 683 spin_lock(needs_lock); 684 dir[diroff] = NULL; 685 spin_unlock(needs_lock); 686 punch_lock = NULL; 687 } else 688 dir[diroff] = NULL; 689 nr_pages_to_free++; 690 list_add(&subdir->lru, &pages_to_free); 691 } 692 if (subdir && page_private(subdir) /* has swap entries */) { 693 size = limit - idx; 694 if (size > ENTRIES_PER_PAGE) 695 size = ENTRIES_PER_PAGE; 696 freed = shmem_map_and_free_swp(subdir, 697 offset, size, &dir, punch_lock); 698 if (!dir) 699 dir = shmem_dir_map(middir); 700 nr_swaps_freed += freed; 701 if (offset || punch_lock) { 702 spin_lock(&info->lock); 703 set_page_private(subdir, 704 page_private(subdir) - freed); 705 spin_unlock(&info->lock); 706 } else 707 BUG_ON(page_private(subdir) != freed); 708 } 709 offset = 0; 710 } 711done1: 712 shmem_dir_unmap(dir); 713done2: 714 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { 715 /* 716 * Call truncate_inode_pages again: racing shmem_unuse_inode 717 * may have swizzled a page in from swap since vmtruncate or 718 * generic_delete_inode did it, before we lowered next_index. 719 * Also, though shmem_getpage checks i_size before adding to 720 * cache, no recheck after: so fix the narrow window there too. 721 * 722 * Recalling truncate_inode_pages_range and unmap_mapping_range 723 * every time for punch_hole (which never got a chance to clear 724 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, 725 * yet hardly ever necessary: try to optimize them out later. 726 */ 727 truncate_inode_pages_range(inode->i_mapping, start, end); 728 if (punch_hole) 729 unmap_mapping_range(inode->i_mapping, start, 730 end - start, 1); 731 } 732 733 spin_lock(&info->lock); 734 info->flags &= ~SHMEM_TRUNCATE; 735 info->swapped -= nr_swaps_freed; 736 if (nr_pages_to_free) 737 shmem_free_blocks(inode, nr_pages_to_free); 738 shmem_recalc_inode(inode); 739 spin_unlock(&info->lock); 740 741 /* 742 * Empty swap vector directory pages to be freed? 743 */ 744 if (!list_empty(&pages_to_free)) { 745 pages_to_free.prev->next = NULL; 746 shmem_free_pages(pages_to_free.next); 747 } 748} 749 750static void shmem_truncate(struct inode *inode) 751{ 752 shmem_truncate_range(inode, inode->i_size, (loff_t)-1); 753} 754 755static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 756{ 757 struct inode *inode = dentry->d_inode; 758 struct page *page = NULL; 759 int error; 760 761 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 762 if (attr->ia_size < inode->i_size) { 763 /* 764 * If truncating down to a partial page, then 765 * if that page is already allocated, hold it 766 * in memory until the truncation is over, so 767 * truncate_partial_page cannnot miss it were 768 * it assigned to swap. 769 */ 770 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { 771 (void) shmem_getpage(inode, 772 attr->ia_size>>PAGE_CACHE_SHIFT, 773 &page, SGP_READ, NULL); 774 if (page) 775 unlock_page(page); 776 } 777 /* 778 * Reset SHMEM_PAGEIN flag so that shmem_truncate can 779 * detect if any pages might have been added to cache 780 * after truncate_inode_pages. But we needn't bother 781 * if it's being fully truncated to zero-length: the 782 * nrpages check is efficient enough in that case. 783 */ 784 if (attr->ia_size) { 785 struct shmem_inode_info *info = SHMEM_I(inode); 786 spin_lock(&info->lock); 787 info->flags &= ~SHMEM_PAGEIN; 788 spin_unlock(&info->lock); 789 } 790 } 791 } 792 793 error = inode_change_ok(inode, attr); 794 if (!error) 795 error = inode_setattr(inode, attr); 796#ifdef CONFIG_TMPFS_POSIX_ACL 797 if (!error && (attr->ia_valid & ATTR_MODE)) 798 error = generic_acl_chmod(inode, &shmem_acl_ops); 799#endif 800 if (page) 801 page_cache_release(page); 802 return error; 803} 804 805static void shmem_delete_inode(struct inode *inode) 806{ 807 struct shmem_inode_info *info = SHMEM_I(inode); 808 809 if (inode->i_op->truncate == shmem_truncate) { 810 truncate_inode_pages(inode->i_mapping, 0); 811 shmem_unacct_size(info->flags, inode->i_size); 812 inode->i_size = 0; 813 shmem_truncate(inode); 814 if (!list_empty(&info->swaplist)) { 815 mutex_lock(&shmem_swaplist_mutex); 816 list_del_init(&info->swaplist); 817 mutex_unlock(&shmem_swaplist_mutex); 818 } 819 } 820 BUG_ON(inode->i_blocks); 821 shmem_free_inode(inode->i_sb); 822 clear_inode(inode); 823} 824 825static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) 826{ 827 swp_entry_t *ptr; 828 829 for (ptr = dir; ptr < edir; ptr++) { 830 if (ptr->val == entry.val) 831 return ptr - dir; 832 } 833 return -1; 834} 835 836static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) 837{ 838 struct inode *inode; 839 unsigned long idx; 840 unsigned long size; 841 unsigned long limit; 842 unsigned long stage; 843 struct page **dir; 844 struct page *subdir; 845 swp_entry_t *ptr; 846 int offset; 847 int error; 848 849 idx = 0; 850 ptr = info->i_direct; 851 spin_lock(&info->lock); 852 if (!info->swapped) { 853 list_del_init(&info->swaplist); 854 goto lost2; 855 } 856 limit = info->next_index; 857 size = limit; 858 if (size > SHMEM_NR_DIRECT) 859 size = SHMEM_NR_DIRECT; 860 offset = shmem_find_swp(entry, ptr, ptr+size); 861 if (offset >= 0) 862 goto found; 863 if (!info->i_indirect) 864 goto lost2; 865 866 dir = shmem_dir_map(info->i_indirect); 867 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; 868 869 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { 870 if (unlikely(idx == stage)) { 871 shmem_dir_unmap(dir-1); 872 if (cond_resched_lock(&info->lock)) { 873 /* check it has not been truncated */ 874 if (limit > info->next_index) { 875 limit = info->next_index; 876 if (idx >= limit) 877 goto lost2; 878 } 879 } 880 dir = shmem_dir_map(info->i_indirect) + 881 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 882 while (!*dir) { 883 dir++; 884 idx += ENTRIES_PER_PAGEPAGE; 885 if (idx >= limit) 886 goto lost1; 887 } 888 stage = idx + ENTRIES_PER_PAGEPAGE; 889 subdir = *dir; 890 shmem_dir_unmap(dir); 891 dir = shmem_dir_map(subdir); 892 } 893 subdir = *dir; 894 if (subdir && page_private(subdir)) { 895 ptr = shmem_swp_map(subdir); 896 size = limit - idx; 897 if (size > ENTRIES_PER_PAGE) 898 size = ENTRIES_PER_PAGE; 899 offset = shmem_find_swp(entry, ptr, ptr+size); 900 shmem_swp_unmap(ptr); 901 if (offset >= 0) { 902 shmem_dir_unmap(dir); 903 goto found; 904 } 905 } 906 } 907lost1: 908 shmem_dir_unmap(dir-1); 909lost2: 910 spin_unlock(&info->lock); 911 return 0; 912found: 913 idx += offset; 914 inode = igrab(&info->vfs_inode); 915 spin_unlock(&info->lock); 916 917 /* 918 * Move _head_ to start search for next from here. 919 * But be careful: shmem_delete_inode checks list_empty without taking 920 * mutex, and there's an instant in list_move_tail when info->swaplist 921 * would appear empty, if it were the only one on shmem_swaplist. We 922 * could avoid doing it if inode NULL; or use this minor optimization. 923 */ 924 if (shmem_swaplist.next != &info->swaplist) 925 list_move_tail(&shmem_swaplist, &info->swaplist); 926 mutex_unlock(&shmem_swaplist_mutex); 927 928 error = 1; 929 if (!inode) 930 goto out; 931 /* 932 * Charge page using GFP_KERNEL while we can wait. 933 * Charged back to the user(not to caller) when swap account is used. 934 * add_to_page_cache() will be called with GFP_NOWAIT. 935 */ 936 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL); 937 if (error) 938 goto out; 939 error = radix_tree_preload(GFP_KERNEL); 940 if (error) { 941 mem_cgroup_uncharge_cache_page(page); 942 goto out; 943 } 944 error = 1; 945 946 spin_lock(&info->lock); 947 ptr = shmem_swp_entry(info, idx, NULL); 948 if (ptr && ptr->val == entry.val) { 949 error = add_to_page_cache_locked(page, inode->i_mapping, 950 idx, GFP_NOWAIT); 951 /* does mem_cgroup_uncharge_cache_page on error */ 952 } else /* we must compensate for our precharge above */ 953 mem_cgroup_uncharge_cache_page(page); 954 955 if (error == -EEXIST) { 956 struct page *filepage = find_get_page(inode->i_mapping, idx); 957 error = 1; 958 if (filepage) { 959 /* 960 * There might be a more uptodate page coming down 961 * from a stacked writepage: forget our swappage if so. 962 */ 963 if (PageUptodate(filepage)) 964 error = 0; 965 page_cache_release(filepage); 966 } 967 } 968 if (!error) { 969 delete_from_swap_cache(page); 970 set_page_dirty(page); 971 info->flags |= SHMEM_PAGEIN; 972 shmem_swp_set(info, ptr, 0); 973 swap_free(entry); 974 error = 1; /* not an error, but entry was found */ 975 } 976 if (ptr) 977 shmem_swp_unmap(ptr); 978 spin_unlock(&info->lock); 979 radix_tree_preload_end(); 980out: 981 unlock_page(page); 982 page_cache_release(page); 983 iput(inode); /* allows for NULL */ 984 return error; 985} 986 987/* 988 * shmem_unuse() search for an eventually swapped out shmem page. 989 */ 990int shmem_unuse(swp_entry_t entry, struct page *page) 991{ 992 struct list_head *p, *next; 993 struct shmem_inode_info *info; 994 int found = 0; 995 996 mutex_lock(&shmem_swaplist_mutex); 997 list_for_each_safe(p, next, &shmem_swaplist) { 998 info = list_entry(p, struct shmem_inode_info, swaplist); 999 found = shmem_unuse_inode(info, entry, page); 1000 cond_resched(); 1001 if (found) 1002 goto out; 1003 } 1004 mutex_unlock(&shmem_swaplist_mutex); 1005out: return found; /* 0 or 1 or -ENOMEM */ 1006} 1007 1008/* 1009 * Move the page from the page cache to the swap cache. 1010 */ 1011static int shmem_writepage(struct page *page, struct writeback_control *wbc) 1012{ 1013 struct shmem_inode_info *info; 1014 swp_entry_t *entry, swap; 1015 struct address_space *mapping; 1016 unsigned long index; 1017 struct inode *inode; 1018 1019 BUG_ON(!PageLocked(page)); 1020 mapping = page->mapping; 1021 index = page->index; 1022 inode = mapping->host; 1023 info = SHMEM_I(inode); 1024 if (info->flags & VM_LOCKED) 1025 goto redirty; 1026 if (!total_swap_pages) 1027 goto redirty; 1028 1029 /* 1030 * shmem_backing_dev_info's capabilities prevent regular writeback or 1031 * sync from ever calling shmem_writepage; but a stacking filesystem 1032 * may use the ->writepage of its underlying filesystem, in which case 1033 * tmpfs should write out to swap only in response to memory pressure, 1034 * and not for pdflush or sync. However, in those cases, we do still 1035 * want to check if there's a redundant swappage to be discarded. 1036 */ 1037 if (wbc->for_reclaim) 1038 swap = get_swap_page(); 1039 else 1040 swap.val = 0; 1041 1042 spin_lock(&info->lock); 1043 if (index >= info->next_index) { 1044 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 1045 goto unlock; 1046 } 1047 entry = shmem_swp_entry(info, index, NULL); 1048 if (entry->val) { 1049 /* 1050 * The more uptodate page coming down from a stacked 1051 * writepage should replace our old swappage. 1052 */ 1053 free_swap_and_cache(*entry); 1054 shmem_swp_set(info, entry, 0); 1055 } 1056 shmem_recalc_inode(inode); 1057 1058 if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 1059 remove_from_page_cache(page); 1060 shmem_swp_set(info, entry, swap.val); 1061 shmem_swp_unmap(entry); 1062 if (list_empty(&info->swaplist)) 1063 inode = igrab(inode); 1064 else 1065 inode = NULL; 1066 spin_unlock(&info->lock); 1067 swap_duplicate(swap); 1068 BUG_ON(page_mapped(page)); 1069 page_cache_release(page); /* pagecache ref */ 1070 set_page_dirty(page); 1071 unlock_page(page); 1072 if (inode) { 1073 mutex_lock(&shmem_swaplist_mutex); 1074 /* move instead of add in case we're racing */ 1075 list_move_tail(&info->swaplist, &shmem_swaplist); 1076 mutex_unlock(&shmem_swaplist_mutex); 1077 iput(inode); 1078 } 1079 return 0; 1080 } 1081 1082 shmem_swp_unmap(entry); 1083unlock: 1084 spin_unlock(&info->lock); 1085 swap_free(swap); 1086redirty: 1087 set_page_dirty(page); 1088 if (wbc->for_reclaim) 1089 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1090 unlock_page(page); 1091 return 0; 1092} 1093 1094#ifdef CONFIG_NUMA 1095#ifdef CONFIG_TMPFS 1096static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1097{ 1098 char buffer[64]; 1099 1100 if (!mpol || mpol->mode == MPOL_DEFAULT) 1101 return; /* show nothing */ 1102 1103 mpol_to_str(buffer, sizeof(buffer), mpol, 1); 1104 1105 seq_printf(seq, ",mpol=%s", buffer); 1106} 1107 1108static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1109{ 1110 struct mempolicy *mpol = NULL; 1111 if (sbinfo->mpol) { 1112 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 1113 mpol = sbinfo->mpol; 1114 mpol_get(mpol); 1115 spin_unlock(&sbinfo->stat_lock); 1116 } 1117 return mpol; 1118} 1119#endif /* CONFIG_TMPFS */ 1120 1121static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, 1122 struct shmem_inode_info *info, unsigned long idx) 1123{ 1124 struct mempolicy mpol, *spol; 1125 struct vm_area_struct pvma; 1126 struct page *page; 1127 1128 spol = mpol_cond_copy(&mpol, 1129 mpol_shared_policy_lookup(&info->policy, idx)); 1130 1131 /* Create a pseudo vma that just contains the policy */ 1132 pvma.vm_start = 0; 1133 pvma.vm_pgoff = idx; 1134 pvma.vm_ops = NULL; 1135 pvma.vm_policy = spol; 1136 page = swapin_readahead(entry, gfp, &pvma, 0); 1137 return page; 1138} 1139 1140static struct page *shmem_alloc_page(gfp_t gfp, 1141 struct shmem_inode_info *info, unsigned long idx) 1142{ 1143 struct vm_area_struct pvma; 1144 1145 /* Create a pseudo vma that just contains the policy */ 1146 pvma.vm_start = 0; 1147 pvma.vm_pgoff = idx; 1148 pvma.vm_ops = NULL; 1149 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1150 1151 /* 1152 * alloc_page_vma() will drop the shared policy reference 1153 */ 1154 return alloc_page_vma(gfp, &pvma, 0); 1155} 1156#else /* !CONFIG_NUMA */ 1157#ifdef CONFIG_TMPFS 1158static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p) 1159{ 1160} 1161#endif /* CONFIG_TMPFS */ 1162 1163static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, 1164 struct shmem_inode_info *info, unsigned long idx) 1165{ 1166 return swapin_readahead(entry, gfp, NULL, 0); 1167} 1168 1169static inline struct page *shmem_alloc_page(gfp_t gfp, 1170 struct shmem_inode_info *info, unsigned long idx) 1171{ 1172 return alloc_page(gfp); 1173} 1174#endif /* CONFIG_NUMA */ 1175 1176#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS) 1177static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1178{ 1179 return NULL; 1180} 1181#endif 1182 1183/* 1184 * shmem_getpage - either get the page from swap or allocate a new one 1185 * 1186 * If we allocate a new one we do not mark it dirty. That's up to the 1187 * vm. If we swap it in we mark it dirty since we also free the swap 1188 * entry since a page cannot live in both the swap and page cache 1189 */ 1190static int shmem_getpage(struct inode *inode, unsigned long idx, 1191 struct page **pagep, enum sgp_type sgp, int *type) 1192{ 1193 struct address_space *mapping = inode->i_mapping; 1194 struct shmem_inode_info *info = SHMEM_I(inode); 1195 struct shmem_sb_info *sbinfo; 1196 struct page *filepage = *pagep; 1197 struct page *swappage; 1198 swp_entry_t *entry; 1199 swp_entry_t swap; 1200 gfp_t gfp; 1201 int error; 1202 1203 if (idx >= SHMEM_MAX_INDEX) 1204 return -EFBIG; 1205 1206 if (type) 1207 *type = 0; 1208 1209 /* 1210 * Normally, filepage is NULL on entry, and either found 1211 * uptodate immediately, or allocated and zeroed, or read 1212 * in under swappage, which is then assigned to filepage. 1213 * But shmem_readpage (required for splice) passes in a locked 1214 * filepage, which may be found not uptodate by other callers 1215 * too, and may need to be copied from the swappage read in. 1216 */ 1217repeat: 1218 if (!filepage) 1219 filepage = find_lock_page(mapping, idx); 1220 if (filepage && PageUptodate(filepage)) 1221 goto done; 1222 error = 0; 1223 gfp = mapping_gfp_mask(mapping); 1224 if (!filepage) { 1225 /* 1226 * Try to preload while we can wait, to not make a habit of 1227 * draining atomic reserves; but don't latch on to this cpu. 1228 */ 1229 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM); 1230 if (error) 1231 goto failed; 1232 radix_tree_preload_end(); 1233 } 1234 1235 spin_lock(&info->lock); 1236 shmem_recalc_inode(inode); 1237 entry = shmem_swp_alloc(info, idx, sgp); 1238 if (IS_ERR(entry)) { 1239 spin_unlock(&info->lock); 1240 error = PTR_ERR(entry); 1241 goto failed; 1242 } 1243 swap = *entry; 1244 1245 if (swap.val) { 1246 /* Look it up and read it in.. */ 1247 swappage = lookup_swap_cache(swap); 1248 if (!swappage) { 1249 shmem_swp_unmap(entry); 1250 /* here we actually do the io */ 1251 if (type && !(*type & VM_FAULT_MAJOR)) { 1252 __count_vm_event(PGMAJFAULT); 1253 *type |= VM_FAULT_MAJOR; 1254 } 1255 spin_unlock(&info->lock); 1256 swappage = shmem_swapin(swap, gfp, info, idx); 1257 if (!swappage) { 1258 spin_lock(&info->lock); 1259 entry = shmem_swp_alloc(info, idx, sgp); 1260 if (IS_ERR(entry)) 1261 error = PTR_ERR(entry); 1262 else { 1263 if (entry->val == swap.val) 1264 error = -ENOMEM; 1265 shmem_swp_unmap(entry); 1266 } 1267 spin_unlock(&info->lock); 1268 if (error) 1269 goto failed; 1270 goto repeat; 1271 } 1272 wait_on_page_locked(swappage); 1273 page_cache_release(swappage); 1274 goto repeat; 1275 } 1276 1277 /* We have to do this with page locked to prevent races */ 1278 if (!trylock_page(swappage)) { 1279 shmem_swp_unmap(entry); 1280 spin_unlock(&info->lock); 1281 wait_on_page_locked(swappage); 1282 page_cache_release(swappage); 1283 goto repeat; 1284 } 1285 if (PageWriteback(swappage)) { 1286 shmem_swp_unmap(entry); 1287 spin_unlock(&info->lock); 1288 wait_on_page_writeback(swappage); 1289 unlock_page(swappage); 1290 page_cache_release(swappage); 1291 goto repeat; 1292 } 1293 if (!PageUptodate(swappage)) { 1294 shmem_swp_unmap(entry); 1295 spin_unlock(&info->lock); 1296 unlock_page(swappage); 1297 page_cache_release(swappage); 1298 error = -EIO; 1299 goto failed; 1300 } 1301 1302 if (filepage) { 1303 shmem_swp_set(info, entry, 0); 1304 shmem_swp_unmap(entry); 1305 delete_from_swap_cache(swappage); 1306 spin_unlock(&info->lock); 1307 copy_highpage(filepage, swappage); 1308 unlock_page(swappage); 1309 page_cache_release(swappage); 1310 flush_dcache_page(filepage); 1311 SetPageUptodate(filepage); 1312 set_page_dirty(filepage); 1313 swap_free(swap); 1314 } else if (!(error = add_to_page_cache_locked(swappage, mapping, 1315 idx, GFP_NOWAIT))) { 1316 info->flags |= SHMEM_PAGEIN; 1317 shmem_swp_set(info, entry, 0); 1318 shmem_swp_unmap(entry); 1319 delete_from_swap_cache(swappage); 1320 spin_unlock(&info->lock); 1321 filepage = swappage; 1322 set_page_dirty(filepage); 1323 swap_free(swap); 1324 } else { 1325 shmem_swp_unmap(entry); 1326 spin_unlock(&info->lock); 1327 if (error == -ENOMEM) { 1328 /* allow reclaim from this memory cgroup */ 1329 error = mem_cgroup_shrink_usage(swappage, 1330 current->mm, 1331 gfp); 1332 if (error) { 1333 unlock_page(swappage); 1334 page_cache_release(swappage); 1335 goto failed; 1336 } 1337 } 1338 unlock_page(swappage); 1339 page_cache_release(swappage); 1340 goto repeat; 1341 } 1342 } else if (sgp == SGP_READ && !filepage) { 1343 shmem_swp_unmap(entry); 1344 filepage = find_get_page(mapping, idx); 1345 if (filepage && 1346 (!PageUptodate(filepage) || !trylock_page(filepage))) { 1347 spin_unlock(&info->lock); 1348 wait_on_page_locked(filepage); 1349 page_cache_release(filepage); 1350 filepage = NULL; 1351 goto repeat; 1352 } 1353 spin_unlock(&info->lock); 1354 } else { 1355 shmem_swp_unmap(entry); 1356 sbinfo = SHMEM_SB(inode->i_sb); 1357 if (sbinfo->max_blocks) { 1358 spin_lock(&sbinfo->stat_lock); 1359 if (sbinfo->free_blocks == 0 || 1360 shmem_acct_block(info->flags)) { 1361 spin_unlock(&sbinfo->stat_lock); 1362 spin_unlock(&info->lock); 1363 error = -ENOSPC; 1364 goto failed; 1365 } 1366 sbinfo->free_blocks--; 1367 inode->i_blocks += BLOCKS_PER_PAGE; 1368 spin_unlock(&sbinfo->stat_lock); 1369 } else if (shmem_acct_block(info->flags)) { 1370 spin_unlock(&info->lock); 1371 error = -ENOSPC; 1372 goto failed; 1373 } 1374 1375 if (!filepage) { 1376 int ret; 1377 1378 spin_unlock(&info->lock); 1379 filepage = shmem_alloc_page(gfp, info, idx); 1380 if (!filepage) { 1381 shmem_unacct_blocks(info->flags, 1); 1382 shmem_free_blocks(inode, 1); 1383 error = -ENOMEM; 1384 goto failed; 1385 } 1386 SetPageSwapBacked(filepage); 1387 1388 /* Precharge page while we can wait, compensate after */ 1389 error = mem_cgroup_cache_charge(filepage, current->mm, 1390 GFP_KERNEL); 1391 if (error) { 1392 page_cache_release(filepage); 1393 shmem_unacct_blocks(info->flags, 1); 1394 shmem_free_blocks(inode, 1); 1395 filepage = NULL; 1396 goto failed; 1397 } 1398 1399 spin_lock(&info->lock); 1400 entry = shmem_swp_alloc(info, idx, sgp); 1401 if (IS_ERR(entry)) 1402 error = PTR_ERR(entry); 1403 else { 1404 swap = *entry; 1405 shmem_swp_unmap(entry); 1406 } 1407 ret = error || swap.val; 1408 if (ret) 1409 mem_cgroup_uncharge_cache_page(filepage); 1410 else 1411 ret = add_to_page_cache_lru(filepage, mapping, 1412 idx, GFP_NOWAIT); 1413 /* 1414 * At add_to_page_cache_lru() failure, uncharge will 1415 * be done automatically. 1416 */ 1417 if (ret) { 1418 spin_unlock(&info->lock); 1419 page_cache_release(filepage); 1420 shmem_unacct_blocks(info->flags, 1); 1421 shmem_free_blocks(inode, 1); 1422 filepage = NULL; 1423 if (error) 1424 goto failed; 1425 goto repeat; 1426 } 1427 info->flags |= SHMEM_PAGEIN; 1428 } 1429 1430 info->alloced++; 1431 spin_unlock(&info->lock); 1432 clear_highpage(filepage); 1433 flush_dcache_page(filepage); 1434 SetPageUptodate(filepage); 1435 if (sgp == SGP_DIRTY) 1436 set_page_dirty(filepage); 1437 } 1438done: 1439 *pagep = filepage; 1440 return 0; 1441 1442failed: 1443 if (*pagep != filepage) { 1444 unlock_page(filepage); 1445 page_cache_release(filepage); 1446 } 1447 return error; 1448} 1449 1450static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1451{ 1452 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1453 int error; 1454 int ret; 1455 1456 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 1457 return VM_FAULT_SIGBUS; 1458 1459 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1460 if (error) 1461 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1462 1463 return ret | VM_FAULT_LOCKED; 1464} 1465 1466#ifdef CONFIG_NUMA 1467static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 1468{ 1469 struct inode *i = vma->vm_file->f_path.dentry->d_inode; 1470 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); 1471} 1472 1473static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1474 unsigned long addr) 1475{ 1476 struct inode *i = vma->vm_file->f_path.dentry->d_inode; 1477 unsigned long idx; 1478 1479 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1480 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); 1481} 1482#endif 1483 1484int shmem_lock(struct file *file, int lock, struct user_struct *user) 1485{ 1486 struct inode *inode = file->f_path.dentry->d_inode; 1487 struct shmem_inode_info *info = SHMEM_I(inode); 1488 int retval = -ENOMEM; 1489 1490 spin_lock(&info->lock); 1491 if (lock && !(info->flags & VM_LOCKED)) { 1492 if (!user_shm_lock(inode->i_size, user)) 1493 goto out_nomem; 1494 info->flags |= VM_LOCKED; 1495 mapping_set_unevictable(file->f_mapping); 1496 } 1497 if (!lock && (info->flags & VM_LOCKED) && user) { 1498 user_shm_unlock(inode->i_size, user); 1499 info->flags &= ~VM_LOCKED; 1500 mapping_clear_unevictable(file->f_mapping); 1501 scan_mapping_unevictable_pages(file->f_mapping); 1502 } 1503 retval = 0; 1504 1505out_nomem: 1506 spin_unlock(&info->lock); 1507 return retval; 1508} 1509 1510static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1511{ 1512 file_accessed(file); 1513 vma->vm_ops = &shmem_vm_ops; 1514 vma->vm_flags |= VM_CAN_NONLINEAR; 1515 return 0; 1516} 1517 1518static struct inode * 1519shmem_get_inode(struct super_block *sb, int mode, dev_t dev) 1520{ 1521 struct inode *inode; 1522 struct shmem_inode_info *info; 1523 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1524 1525 if (shmem_reserve_inode(sb)) 1526 return NULL; 1527 1528 inode = new_inode(sb); 1529 if (inode) { 1530 inode->i_mode = mode; 1531 inode->i_uid = current_fsuid(); 1532 inode->i_gid = current_fsgid(); 1533 inode->i_blocks = 0; 1534 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1535 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1536 inode->i_generation = get_seconds(); 1537 info = SHMEM_I(inode); 1538 memset(info, 0, (char *)inode - (char *)info); 1539 spin_lock_init(&info->lock); 1540 INIT_LIST_HEAD(&info->swaplist); 1541 1542 switch (mode & S_IFMT) { 1543 default: 1544 inode->i_op = &shmem_special_inode_operations; 1545 init_special_inode(inode, mode, dev); 1546 break; 1547 case S_IFREG: 1548 inode->i_mapping->a_ops = &shmem_aops; 1549 inode->i_op = &shmem_inode_operations; 1550 inode->i_fop = &shmem_file_operations; 1551 mpol_shared_policy_init(&info->policy, 1552 shmem_get_sbmpol(sbinfo)); 1553 break; 1554 case S_IFDIR: 1555 inc_nlink(inode); 1556 /* Some things misbehave if size == 0 on a directory */ 1557 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1558 inode->i_op = &shmem_dir_inode_operations; 1559 inode->i_fop = &simple_dir_operations; 1560 break; 1561 case S_IFLNK: 1562 /* 1563 * Must not load anything in the rbtree, 1564 * mpol_free_shared_policy will not be called. 1565 */ 1566 mpol_shared_policy_init(&info->policy, NULL); 1567 break; 1568 } 1569 } else 1570 shmem_free_inode(sb); 1571 return inode; 1572} 1573 1574#ifdef CONFIG_TMPFS 1575static const struct inode_operations shmem_symlink_inode_operations; 1576static const struct inode_operations shmem_symlink_inline_operations; 1577 1578/* 1579 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; 1580 * but providing them allows a tmpfs file to be used for splice, sendfile, and 1581 * below the loop driver, in the generic fashion that many filesystems support. 1582 */ 1583static int shmem_readpage(struct file *file, struct page *page) 1584{ 1585 struct inode *inode = page->mapping->host; 1586 int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); 1587 unlock_page(page); 1588 return error; 1589} 1590 1591static int 1592shmem_write_begin(struct file *file, struct address_space *mapping, 1593 loff_t pos, unsigned len, unsigned flags, 1594 struct page **pagep, void **fsdata) 1595{ 1596 struct inode *inode = mapping->host; 1597 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1598 *pagep = NULL; 1599 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1600} 1601 1602static int 1603shmem_write_end(struct file *file, struct address_space *mapping, 1604 loff_t pos, unsigned len, unsigned copied, 1605 struct page *page, void *fsdata) 1606{ 1607 struct inode *inode = mapping->host; 1608 1609 if (pos + copied > inode->i_size) 1610 i_size_write(inode, pos + copied); 1611 1612 unlock_page(page); 1613 set_page_dirty(page); 1614 page_cache_release(page); 1615 1616 return copied; 1617} 1618 1619static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1620{ 1621 struct inode *inode = filp->f_path.dentry->d_inode; 1622 struct address_space *mapping = inode->i_mapping; 1623 unsigned long index, offset; 1624 enum sgp_type sgp = SGP_READ; 1625 1626 /* 1627 * Might this read be for a stacking filesystem? Then when reading 1628 * holes of a sparse file, we actually need to allocate those pages, 1629 * and even mark them dirty, so it cannot exceed the max_blocks limit. 1630 */ 1631 if (segment_eq(get_fs(), KERNEL_DS)) 1632 sgp = SGP_DIRTY; 1633 1634 index = *ppos >> PAGE_CACHE_SHIFT; 1635 offset = *ppos & ~PAGE_CACHE_MASK; 1636 1637 for (;;) { 1638 struct page *page = NULL; 1639 unsigned long end_index, nr, ret; 1640 loff_t i_size = i_size_read(inode); 1641 1642 end_index = i_size >> PAGE_CACHE_SHIFT; 1643 if (index > end_index) 1644 break; 1645 if (index == end_index) { 1646 nr = i_size & ~PAGE_CACHE_MASK; 1647 if (nr <= offset) 1648 break; 1649 } 1650 1651 desc->error = shmem_getpage(inode, index, &page, sgp, NULL); 1652 if (desc->error) { 1653 if (desc->error == -EINVAL) 1654 desc->error = 0; 1655 break; 1656 } 1657 if (page) 1658 unlock_page(page); 1659 1660 /* 1661 * We must evaluate after, since reads (unlike writes) 1662 * are called without i_mutex protection against truncate 1663 */ 1664 nr = PAGE_CACHE_SIZE; 1665 i_size = i_size_read(inode); 1666 end_index = i_size >> PAGE_CACHE_SHIFT; 1667 if (index == end_index) { 1668 nr = i_size & ~PAGE_CACHE_MASK; 1669 if (nr <= offset) { 1670 if (page) 1671 page_cache_release(page); 1672 break; 1673 } 1674 } 1675 nr -= offset; 1676 1677 if (page) { 1678 /* 1679 * If users can be writing to this page using arbitrary 1680 * virtual addresses, take care about potential aliasing 1681 * before reading the page on the kernel side. 1682 */ 1683 if (mapping_writably_mapped(mapping)) 1684 flush_dcache_page(page); 1685 /* 1686 * Mark the page accessed if we read the beginning. 1687 */ 1688 if (!offset) 1689 mark_page_accessed(page); 1690 } else { 1691 page = ZERO_PAGE(0); 1692 page_cache_get(page); 1693 } 1694 1695 /* 1696 * Ok, we have the page, and it's up-to-date, so 1697 * now we can copy it to user space... 1698 * 1699 * The actor routine returns how many bytes were actually used.. 1700 * NOTE! This may not be the same as how much of a user buffer 1701 * we filled up (we may be padding etc), so we can only update 1702 * "pos" here (the actor routine has to update the user buffer 1703 * pointers and the remaining count). 1704 */ 1705 ret = actor(desc, page, offset, nr); 1706 offset += ret; 1707 index += offset >> PAGE_CACHE_SHIFT; 1708 offset &= ~PAGE_CACHE_MASK; 1709 1710 page_cache_release(page); 1711 if (ret != nr || !desc->count) 1712 break; 1713 1714 cond_resched(); 1715 } 1716 1717 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1718 file_accessed(filp); 1719} 1720 1721static ssize_t shmem_file_aio_read(struct kiocb *iocb, 1722 const struct iovec *iov, unsigned long nr_segs, loff_t pos) 1723{ 1724 struct file *filp = iocb->ki_filp; 1725 ssize_t retval; 1726 unsigned long seg; 1727 size_t count; 1728 loff_t *ppos = &iocb->ki_pos; 1729 1730 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE); 1731 if (retval) 1732 return retval; 1733 1734 for (seg = 0; seg < nr_segs; seg++) { 1735 read_descriptor_t desc; 1736 1737 desc.written = 0; 1738 desc.arg.buf = iov[seg].iov_base; 1739 desc.count = iov[seg].iov_len; 1740 if (desc.count == 0) 1741 continue; 1742 desc.error = 0; 1743 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1744 retval += desc.written; 1745 if (desc.error) { 1746 retval = retval ?: desc.error; 1747 break; 1748 } 1749 if (desc.count > 0) 1750 break; 1751 } 1752 return retval; 1753} 1754 1755static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1756{ 1757 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1758 1759 buf->f_type = TMPFS_MAGIC; 1760 buf->f_bsize = PAGE_CACHE_SIZE; 1761 buf->f_namelen = NAME_MAX; 1762 spin_lock(&sbinfo->stat_lock); 1763 if (sbinfo->max_blocks) { 1764 buf->f_blocks = sbinfo->max_blocks; 1765 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; 1766 } 1767 if (sbinfo->max_inodes) { 1768 buf->f_files = sbinfo->max_inodes; 1769 buf->f_ffree = sbinfo->free_inodes; 1770 } 1771 /* else leave those fields 0 like simple_statfs */ 1772 spin_unlock(&sbinfo->stat_lock); 1773 return 0; 1774} 1775 1776/* 1777 * File creation. Allocate an inode, and we're done.. 1778 */ 1779static int 1780shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1781{ 1782 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); 1783 int error = -ENOSPC; 1784 1785 if (inode) { 1786 error = security_inode_init_security(inode, dir, NULL, NULL, 1787 NULL); 1788 if (error) { 1789 if (error != -EOPNOTSUPP) { 1790 iput(inode); 1791 return error; 1792 } 1793 } 1794 error = shmem_acl_init(inode, dir); 1795 if (error) { 1796 iput(inode); 1797 return error; 1798 } 1799 if (dir->i_mode & S_ISGID) { 1800 inode->i_gid = dir->i_gid; 1801 if (S_ISDIR(mode)) 1802 inode->i_mode |= S_ISGID; 1803 } 1804 dir->i_size += BOGO_DIRENT_SIZE; 1805 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1806 d_instantiate(dentry, inode); 1807 dget(dentry); /* Extra count - pin the dentry in core */ 1808 } 1809 return error; 1810} 1811 1812static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1813{ 1814 int error; 1815 1816 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1817 return error; 1818 inc_nlink(dir); 1819 return 0; 1820} 1821 1822static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, 1823 struct nameidata *nd) 1824{ 1825 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1826} 1827 1828/* 1829 * Link a file.. 1830 */ 1831static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1832{ 1833 struct inode *inode = old_dentry->d_inode; 1834 int ret; 1835 1836 /* 1837 * No ordinary (disk based) filesystem counts links as inodes; 1838 * but each new link needs a new dentry, pinning lowmem, and 1839 * tmpfs dentries cannot be pruned until they are unlinked. 1840 */ 1841 ret = shmem_reserve_inode(inode->i_sb); 1842 if (ret) 1843 goto out; 1844 1845 dir->i_size += BOGO_DIRENT_SIZE; 1846 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1847 inc_nlink(inode); 1848 atomic_inc(&inode->i_count); /* New dentry reference */ 1849 dget(dentry); /* Extra pinning count for the created dentry */ 1850 d_instantiate(dentry, inode); 1851out: 1852 return ret; 1853} 1854 1855static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1856{ 1857 struct inode *inode = dentry->d_inode; 1858 1859 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 1860 shmem_free_inode(inode->i_sb); 1861 1862 dir->i_size -= BOGO_DIRENT_SIZE; 1863 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1864 drop_nlink(inode); 1865 dput(dentry); /* Undo the count from "create" - this does all the work */ 1866 return 0; 1867} 1868 1869static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1870{ 1871 if (!simple_empty(dentry)) 1872 return -ENOTEMPTY; 1873 1874 drop_nlink(dentry->d_inode); 1875 drop_nlink(dir); 1876 return shmem_unlink(dir, dentry); 1877} 1878 1879/* 1880 * The VFS layer already does all the dentry stuff for rename, 1881 * we just have to decrement the usage count for the target if 1882 * it exists so that the VFS layer correctly free's it when it 1883 * gets overwritten. 1884 */ 1885static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1886{ 1887 struct inode *inode = old_dentry->d_inode; 1888 int they_are_dirs = S_ISDIR(inode->i_mode); 1889 1890 if (!simple_empty(new_dentry)) 1891 return -ENOTEMPTY; 1892 1893 if (new_dentry->d_inode) { 1894 (void) shmem_unlink(new_dir, new_dentry); 1895 if (they_are_dirs) 1896 drop_nlink(old_dir); 1897 } else if (they_are_dirs) { 1898 drop_nlink(old_dir); 1899 inc_nlink(new_dir); 1900 } 1901 1902 old_dir->i_size -= BOGO_DIRENT_SIZE; 1903 new_dir->i_size += BOGO_DIRENT_SIZE; 1904 old_dir->i_ctime = old_dir->i_mtime = 1905 new_dir->i_ctime = new_dir->i_mtime = 1906 inode->i_ctime = CURRENT_TIME; 1907 return 0; 1908} 1909 1910static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1911{ 1912 int error; 1913 int len; 1914 struct inode *inode; 1915 struct page *page = NULL; 1916 char *kaddr; 1917 struct shmem_inode_info *info; 1918 1919 len = strlen(symname) + 1; 1920 if (len > PAGE_CACHE_SIZE) 1921 return -ENAMETOOLONG; 1922 1923 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); 1924 if (!inode) 1925 return -ENOSPC; 1926 1927 error = security_inode_init_security(inode, dir, NULL, NULL, 1928 NULL); 1929 if (error) { 1930 if (error != -EOPNOTSUPP) { 1931 iput(inode); 1932 return error; 1933 } 1934 error = 0; 1935 } 1936 1937 info = SHMEM_I(inode); 1938 inode->i_size = len-1; 1939 if (len <= (char *)inode - (char *)info) { 1940 /* do it inline */ 1941 memcpy(info, symname, len); 1942 inode->i_op = &shmem_symlink_inline_operations; 1943 } else { 1944 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 1945 if (error) { 1946 iput(inode); 1947 return error; 1948 } 1949 unlock_page(page); 1950 inode->i_mapping->a_ops = &shmem_aops; 1951 inode->i_op = &shmem_symlink_inode_operations; 1952 kaddr = kmap_atomic(page, KM_USER0); 1953 memcpy(kaddr, symname, len); 1954 kunmap_atomic(kaddr, KM_USER0); 1955 set_page_dirty(page); 1956 page_cache_release(page); 1957 } 1958 if (dir->i_mode & S_ISGID) 1959 inode->i_gid = dir->i_gid; 1960 dir->i_size += BOGO_DIRENT_SIZE; 1961 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1962 d_instantiate(dentry, inode); 1963 dget(dentry); 1964 return 0; 1965} 1966 1967static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 1968{ 1969 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); 1970 return NULL; 1971} 1972 1973static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 1974{ 1975 struct page *page = NULL; 1976 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 1977 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); 1978 if (page) 1979 unlock_page(page); 1980 return page; 1981} 1982 1983static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 1984{ 1985 if (!IS_ERR(nd_get_link(nd))) { 1986 struct page *page = cookie; 1987 kunmap(page); 1988 mark_page_accessed(page); 1989 page_cache_release(page); 1990 } 1991} 1992 1993static const struct inode_operations shmem_symlink_inline_operations = { 1994 .readlink = generic_readlink, 1995 .follow_link = shmem_follow_link_inline, 1996}; 1997 1998static const struct inode_operations shmem_symlink_inode_operations = { 1999 .truncate = shmem_truncate, 2000 .readlink = generic_readlink, 2001 .follow_link = shmem_follow_link, 2002 .put_link = shmem_put_link, 2003}; 2004 2005#ifdef CONFIG_TMPFS_POSIX_ACL 2006/* 2007 * Superblocks without xattr inode operations will get security.* xattr 2008 * support from the VFS "for free". As soon as we have any other xattrs 2009 * like ACLs, we also need to implement the security.* handlers at 2010 * filesystem level, though. 2011 */ 2012 2013static size_t shmem_xattr_security_list(struct inode *inode, char *list, 2014 size_t list_len, const char *name, 2015 size_t name_len) 2016{ 2017 return security_inode_listsecurity(inode, list, list_len); 2018} 2019 2020static int shmem_xattr_security_get(struct inode *inode, const char *name, 2021 void *buffer, size_t size) 2022{ 2023 if (strcmp(name, "") == 0) 2024 return -EINVAL; 2025 return xattr_getsecurity(inode, name, buffer, size); 2026} 2027 2028static int shmem_xattr_security_set(struct inode *inode, const char *name, 2029 const void *value, size_t size, int flags) 2030{ 2031 if (strcmp(name, "") == 0) 2032 return -EINVAL; 2033 return security_inode_setsecurity(inode, name, value, size, flags); 2034} 2035 2036static struct xattr_handler shmem_xattr_security_handler = { 2037 .prefix = XATTR_SECURITY_PREFIX, 2038 .list = shmem_xattr_security_list, 2039 .get = shmem_xattr_security_get, 2040 .set = shmem_xattr_security_set, 2041}; 2042 2043static struct xattr_handler *shmem_xattr_handlers[] = { 2044 &shmem_xattr_acl_access_handler, 2045 &shmem_xattr_acl_default_handler, 2046 &shmem_xattr_security_handler, 2047 NULL 2048}; 2049#endif 2050 2051static struct dentry *shmem_get_parent(struct dentry *child) 2052{ 2053 return ERR_PTR(-ESTALE); 2054} 2055 2056static int shmem_match(struct inode *ino, void *vfh) 2057{ 2058 __u32 *fh = vfh; 2059 __u64 inum = fh[2]; 2060 inum = (inum << 32) | fh[1]; 2061 return ino->i_ino == inum && fh[0] == ino->i_generation; 2062} 2063 2064static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2065 struct fid *fid, int fh_len, int fh_type) 2066{ 2067 struct inode *inode; 2068 struct dentry *dentry = NULL; 2069 u64 inum = fid->raw[2]; 2070 inum = (inum << 32) | fid->raw[1]; 2071 2072 if (fh_len < 3) 2073 return NULL; 2074 2075 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2076 shmem_match, fid->raw); 2077 if (inode) { 2078 dentry = d_find_alias(inode); 2079 iput(inode); 2080 } 2081 2082 return dentry; 2083} 2084 2085static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 2086 int connectable) 2087{ 2088 struct inode *inode = dentry->d_inode; 2089 2090 if (*len < 3) 2091 return 255; 2092 2093 if (hlist_unhashed(&inode->i_hash)) { 2094 /* Unfortunately insert_inode_hash is not idempotent, 2095 * so as we hash inodes here rather than at creation 2096 * time, we need a lock to ensure we only try 2097 * to do it once 2098 */ 2099 static DEFINE_SPINLOCK(lock); 2100 spin_lock(&lock); 2101 if (hlist_unhashed(&inode->i_hash)) 2102 __insert_inode_hash(inode, 2103 inode->i_ino + inode->i_generation); 2104 spin_unlock(&lock); 2105 } 2106 2107 fh[0] = inode->i_generation; 2108 fh[1] = inode->i_ino; 2109 fh[2] = ((__u64)inode->i_ino) >> 32; 2110 2111 *len = 3; 2112 return 1; 2113} 2114 2115static const struct export_operations shmem_export_ops = { 2116 .get_parent = shmem_get_parent, 2117 .encode_fh = shmem_encode_fh, 2118 .fh_to_dentry = shmem_fh_to_dentry, 2119}; 2120 2121static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 2122 bool remount) 2123{ 2124 char *this_char, *value, *rest; 2125 2126 while (options != NULL) { 2127 this_char = options; 2128 for (;;) { 2129 /* 2130 * NUL-terminate this option: unfortunately, 2131 * mount options form a comma-separated list, 2132 * but mpol's nodelist may also contain commas. 2133 */ 2134 options = strchr(options, ','); 2135 if (options == NULL) 2136 break; 2137 options++; 2138 if (!isdigit(*options)) { 2139 options[-1] = '\0'; 2140 break; 2141 } 2142 } 2143 if (!*this_char) 2144 continue; 2145 if ((value = strchr(this_char,'=')) != NULL) { 2146 *value++ = 0; 2147 } else { 2148 printk(KERN_ERR 2149 "tmpfs: No value for mount option '%s'\n", 2150 this_char); 2151 return 1; 2152 } 2153 2154 if (!strcmp(this_char,"size")) { 2155 unsigned long long size; 2156 size = memparse(value,&rest); 2157 if (*rest == '%') { 2158 size <<= PAGE_SHIFT; 2159 size *= totalram_pages; 2160 do_div(size, 100); 2161 rest++; 2162 } 2163 if (*rest) 2164 goto bad_val; 2165 sbinfo->max_blocks = 2166 DIV_ROUND_UP(size, PAGE_CACHE_SIZE); 2167 } else if (!strcmp(this_char,"nr_blocks")) { 2168 sbinfo->max_blocks = memparse(value, &rest); 2169 if (*rest) 2170 goto bad_val; 2171 } else if (!strcmp(this_char,"nr_inodes")) { 2172 sbinfo->max_inodes = memparse(value, &rest); 2173 if (*rest) 2174 goto bad_val; 2175 } else if (!strcmp(this_char,"mode")) { 2176 if (remount) 2177 continue; 2178 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; 2179 if (*rest) 2180 goto bad_val; 2181 } else if (!strcmp(this_char,"uid")) { 2182 if (remount) 2183 continue; 2184 sbinfo->uid = simple_strtoul(value, &rest, 0); 2185 if (*rest) 2186 goto bad_val; 2187 } else if (!strcmp(this_char,"gid")) { 2188 if (remount) 2189 continue; 2190 sbinfo->gid = simple_strtoul(value, &rest, 0); 2191 if (*rest) 2192 goto bad_val; 2193 } else if (!strcmp(this_char,"mpol")) { 2194 if (mpol_parse_str(value, &sbinfo->mpol, 1)) 2195 goto bad_val; 2196 } else { 2197 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2198 this_char); 2199 return 1; 2200 } 2201 } 2202 return 0; 2203 2204bad_val: 2205 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2206 value, this_char); 2207 return 1; 2208 2209} 2210 2211static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2212{ 2213 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2214 struct shmem_sb_info config = *sbinfo; 2215 unsigned long blocks; 2216 unsigned long inodes; 2217 int error = -EINVAL; 2218 2219 if (shmem_parse_options(data, &config, true)) 2220 return error; 2221 2222 spin_lock(&sbinfo->stat_lock); 2223 blocks = sbinfo->max_blocks - sbinfo->free_blocks; 2224 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2225 if (config.max_blocks < blocks) 2226 goto out; 2227 if (config.max_inodes < inodes) 2228 goto out; 2229 /* 2230 * Those tests also disallow limited->unlimited while any are in 2231 * use, so i_blocks will always be zero when max_blocks is zero; 2232 * but we must separately disallow unlimited->limited, because 2233 * in that case we have no record of how much is already in use. 2234 */ 2235 if (config.max_blocks && !sbinfo->max_blocks) 2236 goto out; 2237 if (config.max_inodes && !sbinfo->max_inodes) 2238 goto out; 2239 2240 error = 0; 2241 sbinfo->max_blocks = config.max_blocks; 2242 sbinfo->free_blocks = config.max_blocks - blocks; 2243 sbinfo->max_inodes = config.max_inodes; 2244 sbinfo->free_inodes = config.max_inodes - inodes; 2245 2246 mpol_put(sbinfo->mpol); 2247 sbinfo->mpol = config.mpol; /* transfers initial ref */ 2248out: 2249 spin_unlock(&sbinfo->stat_lock); 2250 return error; 2251} 2252 2253static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs) 2254{ 2255 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb); 2256 2257 if (sbinfo->max_blocks != shmem_default_max_blocks()) 2258 seq_printf(seq, ",size=%luk", 2259 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); 2260 if (sbinfo->max_inodes != shmem_default_max_inodes()) 2261 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 2262 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 2263 seq_printf(seq, ",mode=%03o", sbinfo->mode); 2264 if (sbinfo->uid != 0) 2265 seq_printf(seq, ",uid=%u", sbinfo->uid); 2266 if (sbinfo->gid != 0) 2267 seq_printf(seq, ",gid=%u", sbinfo->gid); 2268 shmem_show_mpol(seq, sbinfo->mpol); 2269 return 0; 2270} 2271#endif /* CONFIG_TMPFS */ 2272 2273static void shmem_put_super(struct super_block *sb) 2274{ 2275 kfree(sb->s_fs_info); 2276 sb->s_fs_info = NULL; 2277} 2278 2279static int shmem_fill_super(struct super_block *sb, 2280 void *data, int silent) 2281{ 2282 struct inode *inode; 2283 struct dentry *root; 2284 struct shmem_sb_info *sbinfo; 2285 int err = -ENOMEM; 2286 2287 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2288 sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), 2289 L1_CACHE_BYTES), GFP_KERNEL); 2290 if (!sbinfo) 2291 return -ENOMEM; 2292 2293 sbinfo->max_blocks = 0; 2294 sbinfo->max_inodes = 0; 2295 sbinfo->mode = S_IRWXUGO | S_ISVTX; 2296 sbinfo->uid = current_fsuid(); 2297 sbinfo->gid = current_fsgid(); 2298 sbinfo->mpol = NULL; 2299 sb->s_fs_info = sbinfo; 2300 2301#ifdef CONFIG_TMPFS 2302 /* 2303 * Per default we only allow half of the physical ram per 2304 * tmpfs instance, limiting inodes to one per page of lowmem; 2305 * but the internal instance is left unlimited. 2306 */ 2307 if (!(sb->s_flags & MS_NOUSER)) { 2308 sbinfo->max_blocks = shmem_default_max_blocks(); 2309 sbinfo->max_inodes = shmem_default_max_inodes(); 2310 if (shmem_parse_options(data, sbinfo, false)) { 2311 err = -EINVAL; 2312 goto failed; 2313 } 2314 } 2315 sb->s_export_op = &shmem_export_ops; 2316#else 2317 sb->s_flags |= MS_NOUSER; 2318#endif 2319 2320 spin_lock_init(&sbinfo->stat_lock); 2321 sbinfo->free_blocks = sbinfo->max_blocks; 2322 sbinfo->free_inodes = sbinfo->max_inodes; 2323 2324 sb->s_maxbytes = SHMEM_MAX_BYTES; 2325 sb->s_blocksize = PAGE_CACHE_SIZE; 2326 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2327 sb->s_magic = TMPFS_MAGIC; 2328 sb->s_op = &shmem_ops; 2329 sb->s_time_gran = 1; 2330#ifdef CONFIG_TMPFS_POSIX_ACL 2331 sb->s_xattr = shmem_xattr_handlers; 2332 sb->s_flags |= MS_POSIXACL; 2333#endif 2334 2335 inode = shmem_get_inode(sb, S_IFDIR | sbinfo->mode, 0); 2336 if (!inode) 2337 goto failed; 2338 inode->i_uid = sbinfo->uid; 2339 inode->i_gid = sbinfo->gid; 2340 root = d_alloc_root(inode); 2341 if (!root) 2342 goto failed_iput; 2343 sb->s_root = root; 2344 return 0; 2345 2346failed_iput: 2347 iput(inode); 2348failed: 2349 shmem_put_super(sb); 2350 return err; 2351} 2352 2353static struct kmem_cache *shmem_inode_cachep; 2354 2355static struct inode *shmem_alloc_inode(struct super_block *sb) 2356{ 2357 struct shmem_inode_info *p; 2358 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2359 if (!p) 2360 return NULL; 2361 return &p->vfs_inode; 2362} 2363 2364static void shmem_destroy_inode(struct inode *inode) 2365{ 2366 if ((inode->i_mode & S_IFMT) == S_IFREG) { 2367 /* only struct inode is valid if it's an inline symlink */ 2368 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2369 } 2370 shmem_acl_destroy_inode(inode); 2371 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2372} 2373 2374static void init_once(void *foo) 2375{ 2376 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2377 2378 inode_init_once(&p->vfs_inode); 2379#ifdef CONFIG_TMPFS_POSIX_ACL 2380 p->i_acl = NULL; 2381 p->i_default_acl = NULL; 2382#endif 2383} 2384 2385static int init_inodecache(void) 2386{ 2387 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2388 sizeof(struct shmem_inode_info), 2389 0, SLAB_PANIC, init_once); 2390 return 0; 2391} 2392 2393static void destroy_inodecache(void) 2394{ 2395 kmem_cache_destroy(shmem_inode_cachep); 2396} 2397 2398static const struct address_space_operations shmem_aops = { 2399 .writepage = shmem_writepage, 2400 .set_page_dirty = __set_page_dirty_no_writeback, 2401#ifdef CONFIG_TMPFS 2402 .readpage = shmem_readpage, 2403 .write_begin = shmem_write_begin, 2404 .write_end = shmem_write_end, 2405#endif 2406 .migratepage = migrate_page, 2407}; 2408 2409static const struct file_operations shmem_file_operations = { 2410 .mmap = shmem_mmap, 2411#ifdef CONFIG_TMPFS 2412 .llseek = generic_file_llseek, 2413 .read = do_sync_read, 2414 .write = do_sync_write, 2415 .aio_read = shmem_file_aio_read, 2416 .aio_write = generic_file_aio_write, 2417 .fsync = simple_sync_file, 2418 .splice_read = generic_file_splice_read, 2419 .splice_write = generic_file_splice_write, 2420#endif 2421}; 2422 2423static const struct inode_operations shmem_inode_operations = { 2424 .truncate = shmem_truncate, 2425 .setattr = shmem_notify_change, 2426 .truncate_range = shmem_truncate_range, 2427#ifdef CONFIG_TMPFS_POSIX_ACL 2428 .setxattr = generic_setxattr, 2429 .getxattr = generic_getxattr, 2430 .listxattr = generic_listxattr, 2431 .removexattr = generic_removexattr, 2432 .permission = shmem_permission, 2433#endif 2434 2435}; 2436 2437static const struct inode_operations shmem_dir_inode_operations = { 2438#ifdef CONFIG_TMPFS 2439 .create = shmem_create, 2440 .lookup = simple_lookup, 2441 .link = shmem_link, 2442 .unlink = shmem_unlink, 2443 .symlink = shmem_symlink, 2444 .mkdir = shmem_mkdir, 2445 .rmdir = shmem_rmdir, 2446 .mknod = shmem_mknod, 2447 .rename = shmem_rename, 2448#endif 2449#ifdef CONFIG_TMPFS_POSIX_ACL 2450 .setattr = shmem_notify_change, 2451 .setxattr = generic_setxattr, 2452 .getxattr = generic_getxattr, 2453 .listxattr = generic_listxattr, 2454 .removexattr = generic_removexattr, 2455 .permission = shmem_permission, 2456#endif 2457}; 2458 2459static const struct inode_operations shmem_special_inode_operations = { 2460#ifdef CONFIG_TMPFS_POSIX_ACL 2461 .setattr = shmem_notify_change, 2462 .setxattr = generic_setxattr, 2463 .getxattr = generic_getxattr, 2464 .listxattr = generic_listxattr, 2465 .removexattr = generic_removexattr, 2466 .permission = shmem_permission, 2467#endif 2468}; 2469 2470static const struct super_operations shmem_ops = { 2471 .alloc_inode = shmem_alloc_inode, 2472 .destroy_inode = shmem_destroy_inode, 2473#ifdef CONFIG_TMPFS 2474 .statfs = shmem_statfs, 2475 .remount_fs = shmem_remount_fs, 2476 .show_options = shmem_show_options, 2477#endif 2478 .delete_inode = shmem_delete_inode, 2479 .drop_inode = generic_delete_inode, 2480 .put_super = shmem_put_super, 2481}; 2482 2483static struct vm_operations_struct shmem_vm_ops = { 2484 .fault = shmem_fault, 2485#ifdef CONFIG_NUMA 2486 .set_policy = shmem_set_policy, 2487 .get_policy = shmem_get_policy, 2488#endif 2489}; 2490 2491 2492static int shmem_get_sb(struct file_system_type *fs_type, 2493 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 2494{ 2495 return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); 2496} 2497 2498static struct file_system_type tmpfs_fs_type = { 2499 .owner = THIS_MODULE, 2500 .name = "tmpfs", 2501 .get_sb = shmem_get_sb, 2502 .kill_sb = kill_litter_super, 2503}; 2504 2505static int __init init_tmpfs(void) 2506{ 2507 int error; 2508 2509 error = bdi_init(&shmem_backing_dev_info); 2510 if (error) 2511 goto out4; 2512 2513 error = init_inodecache(); 2514 if (error) 2515 goto out3; 2516 2517 error = register_filesystem(&tmpfs_fs_type); 2518 if (error) { 2519 printk(KERN_ERR "Could not register tmpfs\n"); 2520 goto out2; 2521 } 2522 2523 shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, 2524 tmpfs_fs_type.name, NULL); 2525 if (IS_ERR(shm_mnt)) { 2526 error = PTR_ERR(shm_mnt); 2527 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2528 goto out1; 2529 } 2530 return 0; 2531 2532out1: 2533 unregister_filesystem(&tmpfs_fs_type); 2534out2: 2535 destroy_inodecache(); 2536out3: 2537 bdi_destroy(&shmem_backing_dev_info); 2538out4: 2539 shm_mnt = ERR_PTR(error); 2540 return error; 2541} 2542 2543#else /* !CONFIG_SHMEM */ 2544 2545/* 2546 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 2547 * 2548 * This is intended for small system where the benefits of the full 2549 * shmem code (swap-backed and resource-limited) are outweighed by 2550 * their complexity. On systems without swap this code should be 2551 * effectively equivalent, but much lighter weight. 2552 */ 2553 2554#include <linux/ramfs.h> 2555 2556static struct file_system_type tmpfs_fs_type = { 2557 .name = "tmpfs", 2558 .get_sb = ramfs_get_sb, 2559 .kill_sb = kill_litter_super, 2560}; 2561 2562static int __init init_tmpfs(void) 2563{ 2564 BUG_ON(register_filesystem(&tmpfs_fs_type) != 0); 2565 2566 shm_mnt = kern_mount(&tmpfs_fs_type); 2567 BUG_ON(IS_ERR(shm_mnt)); 2568 2569 return 0; 2570} 2571 2572int shmem_unuse(swp_entry_t entry, struct page *page) 2573{ 2574 return 0; 2575} 2576 2577#define shmem_file_operations ramfs_file_operations 2578#define shmem_vm_ops generic_file_vm_ops 2579#define shmem_get_inode ramfs_get_inode 2580#define shmem_acct_size(a, b) 0 2581#define shmem_unacct_size(a, b) do {} while (0) 2582#define SHMEM_MAX_BYTES LLONG_MAX 2583 2584#endif /* CONFIG_SHMEM */ 2585 2586/* common code */ 2587 2588/** 2589 * shmem_file_setup - get an unlinked file living in tmpfs 2590 * @name: name for dentry (to be seen in /proc/<pid>/maps 2591 * @size: size to be set for the file 2592 * @flags: vm_flags 2593 */ 2594struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) 2595{ 2596 int error; 2597 struct file *file; 2598 struct inode *inode; 2599 struct dentry *dentry, *root; 2600 struct qstr this; 2601 2602 if (IS_ERR(shm_mnt)) 2603 return (void *)shm_mnt; 2604 2605 if (size < 0 || size > SHMEM_MAX_BYTES) 2606 return ERR_PTR(-EINVAL); 2607 2608 if (shmem_acct_size(flags, size)) 2609 return ERR_PTR(-ENOMEM); 2610 2611 error = -ENOMEM; 2612 this.name = name; 2613 this.len = strlen(name); 2614 this.hash = 0; /* will go */ 2615 root = shm_mnt->mnt_root; 2616 dentry = d_alloc(root, &this); 2617 if (!dentry) 2618 goto put_memory; 2619 2620 error = -ENFILE; 2621 file = get_empty_filp(); 2622 if (!file) 2623 goto put_dentry; 2624 2625 error = -ENOSPC; 2626 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 2627 if (!inode) 2628 goto close_file; 2629 2630#ifdef CONFIG_SHMEM 2631 SHMEM_I(inode)->flags = flags & VM_ACCOUNT; 2632#endif 2633 d_instantiate(dentry, inode); 2634 inode->i_size = size; 2635 inode->i_nlink = 0; /* It is unlinked */ 2636 init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, 2637 &shmem_file_operations); 2638 2639#ifndef CONFIG_MMU 2640 error = ramfs_nommu_expand_for_mapping(inode, size); 2641 if (error) 2642 goto close_file; 2643#endif 2644 return file; 2645 2646close_file: 2647 put_filp(file); 2648put_dentry: 2649 dput(dentry); 2650put_memory: 2651 shmem_unacct_size(flags, size); 2652 return ERR_PTR(error); 2653} 2654EXPORT_SYMBOL_GPL(shmem_file_setup); 2655 2656/** 2657 * shmem_zero_setup - setup a shared anonymous mapping 2658 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2659 */ 2660int shmem_zero_setup(struct vm_area_struct *vma) 2661{ 2662 struct file *file; 2663 loff_t size = vma->vm_end - vma->vm_start; 2664 2665 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2666 if (IS_ERR(file)) 2667 return PTR_ERR(file); 2668 2669 if (vma->vm_file) 2670 fput(vma->vm_file); 2671 vma->vm_file = file; 2672 vma->vm_ops = &shmem_vm_ops; 2673 return 0; 2674} 2675 2676module_init(init_tmpfs) 2677