shmem.c revision d3602444e1e3485890eea5f61366e19a287c00c4
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2005 Hugh Dickins. 10 * Copyright (C) 2002-2005 VERITAS Software Corporation. 11 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * 13 * Extended attribute support for tmpfs: 14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 16 * 17 * This file is released under the GPL. 18 */ 19 20/* 21 * This virtual memory filesystem is heavily based on the ramfs. It 22 * extends ramfs by the ability to use swap and honor resource limits 23 * which makes it a completely usable filesystem. 24 */ 25 26#include <linux/module.h> 27#include <linux/init.h> 28#include <linux/fs.h> 29#include <linux/xattr.h> 30#include <linux/exportfs.h> 31#include <linux/generic_acl.h> 32#include <linux/mm.h> 33#include <linux/mman.h> 34#include <linux/file.h> 35#include <linux/swap.h> 36#include <linux/pagemap.h> 37#include <linux/string.h> 38#include <linux/slab.h> 39#include <linux/backing-dev.h> 40#include <linux/shmem_fs.h> 41#include <linux/mount.h> 42#include <linux/writeback.h> 43#include <linux/vfs.h> 44#include <linux/blkdev.h> 45#include <linux/security.h> 46#include <linux/swapops.h> 47#include <linux/mempolicy.h> 48#include <linux/namei.h> 49#include <linux/ctype.h> 50#include <linux/migrate.h> 51#include <linux/highmem.h> 52 53#include <asm/uaccess.h> 54#include <asm/div64.h> 55#include <asm/pgtable.h> 56 57/* This magic number is used in glibc for posix shared memory */ 58#define TMPFS_MAGIC 0x01021994 59 60#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) 61#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) 62#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 63 64#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) 65#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) 66 67#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 68 69/* info->flags needs VM_flags to handle pagein/truncate races efficiently */ 70#define SHMEM_PAGEIN VM_READ 71#define SHMEM_TRUNCATE VM_WRITE 72 73/* Definition to limit shmem_truncate's steps between cond_rescheds */ 74#define LATENCY_LIMIT 64 75 76/* Pretend that each entry is of this size in directory's i_size */ 77#define BOGO_DIRENT_SIZE 20 78 79/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 80enum sgp_type { 81 SGP_READ, /* don't exceed i_size, don't allocate page */ 82 SGP_CACHE, /* don't exceed i_size, may allocate page */ 83 SGP_WRITE, /* may exceed i_size, may allocate page */ 84}; 85 86static int shmem_getpage(struct inode *inode, unsigned long idx, 87 struct page **pagep, enum sgp_type sgp, int *type); 88 89static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) 90{ 91 /* 92 * The above definition of ENTRIES_PER_PAGE, and the use of 93 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 94 * might be reconsidered if it ever diverges from PAGE_SIZE. 95 * 96 * Mobility flags are masked out as swap vectors cannot move 97 */ 98 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, 99 PAGE_CACHE_SHIFT-PAGE_SHIFT); 100} 101 102static inline void shmem_dir_free(struct page *page) 103{ 104 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); 105} 106 107static struct page **shmem_dir_map(struct page *page) 108{ 109 return (struct page **)kmap_atomic(page, KM_USER0); 110} 111 112static inline void shmem_dir_unmap(struct page **dir) 113{ 114 kunmap_atomic(dir, KM_USER0); 115} 116 117static swp_entry_t *shmem_swp_map(struct page *page) 118{ 119 return (swp_entry_t *)kmap_atomic(page, KM_USER1); 120} 121 122static inline void shmem_swp_balance_unmap(void) 123{ 124 /* 125 * When passing a pointer to an i_direct entry, to code which 126 * also handles indirect entries and so will shmem_swp_unmap, 127 * we must arrange for the preempt count to remain in balance. 128 * What kmap_atomic of a lowmem page does depends on config 129 * and architecture, so pretend to kmap_atomic some lowmem page. 130 */ 131 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); 132} 133 134static inline void shmem_swp_unmap(swp_entry_t *entry) 135{ 136 kunmap_atomic(entry, KM_USER1); 137} 138 139static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 140{ 141 return sb->s_fs_info; 142} 143 144/* 145 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 146 * for shared memory and for shared anonymous (/dev/zero) mappings 147 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 148 * consistent with the pre-accounting of private mappings ... 149 */ 150static inline int shmem_acct_size(unsigned long flags, loff_t size) 151{ 152 return (flags & VM_ACCOUNT)? 153 security_vm_enough_memory(VM_ACCT(size)): 0; 154} 155 156static inline void shmem_unacct_size(unsigned long flags, loff_t size) 157{ 158 if (flags & VM_ACCOUNT) 159 vm_unacct_memory(VM_ACCT(size)); 160} 161 162/* 163 * ... whereas tmpfs objects are accounted incrementally as 164 * pages are allocated, in order to allow huge sparse files. 165 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 166 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 167 */ 168static inline int shmem_acct_block(unsigned long flags) 169{ 170 return (flags & VM_ACCOUNT)? 171 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); 172} 173 174static inline void shmem_unacct_blocks(unsigned long flags, long pages) 175{ 176 if (!(flags & VM_ACCOUNT)) 177 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 178} 179 180static const struct super_operations shmem_ops; 181static const struct address_space_operations shmem_aops; 182static const struct file_operations shmem_file_operations; 183static const struct inode_operations shmem_inode_operations; 184static const struct inode_operations shmem_dir_inode_operations; 185static const struct inode_operations shmem_special_inode_operations; 186static struct vm_operations_struct shmem_vm_ops; 187 188static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 189 .ra_pages = 0, /* No readahead */ 190 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, 191 .unplug_io_fn = default_unplug_io_fn, 192}; 193 194static LIST_HEAD(shmem_swaplist); 195static DEFINE_SPINLOCK(shmem_swaplist_lock); 196 197static void shmem_free_blocks(struct inode *inode, long pages) 198{ 199 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 200 if (sbinfo->max_blocks) { 201 spin_lock(&sbinfo->stat_lock); 202 sbinfo->free_blocks += pages; 203 inode->i_blocks -= pages*BLOCKS_PER_PAGE; 204 spin_unlock(&sbinfo->stat_lock); 205 } 206} 207 208/* 209 * shmem_recalc_inode - recalculate the size of an inode 210 * 211 * @inode: inode to recalc 212 * 213 * We have to calculate the free blocks since the mm can drop 214 * undirtied hole pages behind our back. 215 * 216 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 217 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 218 * 219 * It has to be called with the spinlock held. 220 */ 221static void shmem_recalc_inode(struct inode *inode) 222{ 223 struct shmem_inode_info *info = SHMEM_I(inode); 224 long freed; 225 226 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 227 if (freed > 0) { 228 info->alloced -= freed; 229 shmem_unacct_blocks(info->flags, freed); 230 shmem_free_blocks(inode, freed); 231 } 232} 233 234/* 235 * shmem_swp_entry - find the swap vector position in the info structure 236 * 237 * @info: info structure for the inode 238 * @index: index of the page to find 239 * @page: optional page to add to the structure. Has to be preset to 240 * all zeros 241 * 242 * If there is no space allocated yet it will return NULL when 243 * page is NULL, else it will use the page for the needed block, 244 * setting it to NULL on return to indicate that it has been used. 245 * 246 * The swap vector is organized the following way: 247 * 248 * There are SHMEM_NR_DIRECT entries directly stored in the 249 * shmem_inode_info structure. So small files do not need an addional 250 * allocation. 251 * 252 * For pages with index > SHMEM_NR_DIRECT there is the pointer 253 * i_indirect which points to a page which holds in the first half 254 * doubly indirect blocks, in the second half triple indirect blocks: 255 * 256 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the 257 * following layout (for SHMEM_NR_DIRECT == 16): 258 * 259 * i_indirect -> dir --> 16-19 260 * | +-> 20-23 261 * | 262 * +-->dir2 --> 24-27 263 * | +-> 28-31 264 * | +-> 32-35 265 * | +-> 36-39 266 * | 267 * +-->dir3 --> 40-43 268 * +-> 44-47 269 * +-> 48-51 270 * +-> 52-55 271 */ 272static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) 273{ 274 unsigned long offset; 275 struct page **dir; 276 struct page *subdir; 277 278 if (index < SHMEM_NR_DIRECT) { 279 shmem_swp_balance_unmap(); 280 return info->i_direct+index; 281 } 282 if (!info->i_indirect) { 283 if (page) { 284 info->i_indirect = *page; 285 *page = NULL; 286 } 287 return NULL; /* need another page */ 288 } 289 290 index -= SHMEM_NR_DIRECT; 291 offset = index % ENTRIES_PER_PAGE; 292 index /= ENTRIES_PER_PAGE; 293 dir = shmem_dir_map(info->i_indirect); 294 295 if (index >= ENTRIES_PER_PAGE/2) { 296 index -= ENTRIES_PER_PAGE/2; 297 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; 298 index %= ENTRIES_PER_PAGE; 299 subdir = *dir; 300 if (!subdir) { 301 if (page) { 302 *dir = *page; 303 *page = NULL; 304 } 305 shmem_dir_unmap(dir); 306 return NULL; /* need another page */ 307 } 308 shmem_dir_unmap(dir); 309 dir = shmem_dir_map(subdir); 310 } 311 312 dir += index; 313 subdir = *dir; 314 if (!subdir) { 315 if (!page || !(subdir = *page)) { 316 shmem_dir_unmap(dir); 317 return NULL; /* need a page */ 318 } 319 *dir = subdir; 320 *page = NULL; 321 } 322 shmem_dir_unmap(dir); 323 return shmem_swp_map(subdir) + offset; 324} 325 326static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) 327{ 328 long incdec = value? 1: -1; 329 330 entry->val = value; 331 info->swapped += incdec; 332 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { 333 struct page *page = kmap_atomic_to_page(entry); 334 set_page_private(page, page_private(page) + incdec); 335 } 336} 337 338/* 339 * shmem_swp_alloc - get the position of the swap entry for the page. 340 * If it does not exist allocate the entry. 341 * 342 * @info: info structure for the inode 343 * @index: index of the page to find 344 * @sgp: check and recheck i_size? skip allocation? 345 */ 346static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) 347{ 348 struct inode *inode = &info->vfs_inode; 349 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 350 struct page *page = NULL; 351 swp_entry_t *entry; 352 353 if (sgp != SGP_WRITE && 354 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 355 return ERR_PTR(-EINVAL); 356 357 while (!(entry = shmem_swp_entry(info, index, &page))) { 358 if (sgp == SGP_READ) 359 return shmem_swp_map(ZERO_PAGE(0)); 360 /* 361 * Test free_blocks against 1 not 0, since we have 1 data 362 * page (and perhaps indirect index pages) yet to allocate: 363 * a waste to allocate index if we cannot allocate data. 364 */ 365 if (sbinfo->max_blocks) { 366 spin_lock(&sbinfo->stat_lock); 367 if (sbinfo->free_blocks <= 1) { 368 spin_unlock(&sbinfo->stat_lock); 369 return ERR_PTR(-ENOSPC); 370 } 371 sbinfo->free_blocks--; 372 inode->i_blocks += BLOCKS_PER_PAGE; 373 spin_unlock(&sbinfo->stat_lock); 374 } 375 376 spin_unlock(&info->lock); 377 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); 378 if (page) 379 set_page_private(page, 0); 380 spin_lock(&info->lock); 381 382 if (!page) { 383 shmem_free_blocks(inode, 1); 384 return ERR_PTR(-ENOMEM); 385 } 386 if (sgp != SGP_WRITE && 387 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 388 entry = ERR_PTR(-EINVAL); 389 break; 390 } 391 if (info->next_index <= index) 392 info->next_index = index + 1; 393 } 394 if (page) { 395 /* another task gave its page, or truncated the file */ 396 shmem_free_blocks(inode, 1); 397 shmem_dir_free(page); 398 } 399 if (info->next_index <= index && !IS_ERR(entry)) 400 info->next_index = index + 1; 401 return entry; 402} 403 404/* 405 * shmem_free_swp - free some swap entries in a directory 406 * 407 * @dir: pointer to the directory 408 * @edir: pointer after last entry of the directory 409 * @punch_lock: pointer to spinlock when needed for the holepunch case 410 */ 411static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, 412 spinlock_t *punch_lock) 413{ 414 spinlock_t *punch_unlock = NULL; 415 swp_entry_t *ptr; 416 int freed = 0; 417 418 for (ptr = dir; ptr < edir; ptr++) { 419 if (ptr->val) { 420 if (unlikely(punch_lock)) { 421 punch_unlock = punch_lock; 422 punch_lock = NULL; 423 spin_lock(punch_unlock); 424 if (!ptr->val) 425 continue; 426 } 427 free_swap_and_cache(*ptr); 428 *ptr = (swp_entry_t){0}; 429 freed++; 430 } 431 } 432 if (punch_unlock) 433 spin_unlock(punch_unlock); 434 return freed; 435} 436 437static int shmem_map_and_free_swp(struct page *subdir, int offset, 438 int limit, struct page ***dir, spinlock_t *punch_lock) 439{ 440 swp_entry_t *ptr; 441 int freed = 0; 442 443 ptr = shmem_swp_map(subdir); 444 for (; offset < limit; offset += LATENCY_LIMIT) { 445 int size = limit - offset; 446 if (size > LATENCY_LIMIT) 447 size = LATENCY_LIMIT; 448 freed += shmem_free_swp(ptr+offset, ptr+offset+size, 449 punch_lock); 450 if (need_resched()) { 451 shmem_swp_unmap(ptr); 452 if (*dir) { 453 shmem_dir_unmap(*dir); 454 *dir = NULL; 455 } 456 cond_resched(); 457 ptr = shmem_swp_map(subdir); 458 } 459 } 460 shmem_swp_unmap(ptr); 461 return freed; 462} 463 464static void shmem_free_pages(struct list_head *next) 465{ 466 struct page *page; 467 int freed = 0; 468 469 do { 470 page = container_of(next, struct page, lru); 471 next = next->next; 472 shmem_dir_free(page); 473 freed++; 474 if (freed >= LATENCY_LIMIT) { 475 cond_resched(); 476 freed = 0; 477 } 478 } while (next); 479} 480 481static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) 482{ 483 struct shmem_inode_info *info = SHMEM_I(inode); 484 unsigned long idx; 485 unsigned long size; 486 unsigned long limit; 487 unsigned long stage; 488 unsigned long diroff; 489 struct page **dir; 490 struct page *topdir; 491 struct page *middir; 492 struct page *subdir; 493 swp_entry_t *ptr; 494 LIST_HEAD(pages_to_free); 495 long nr_pages_to_free = 0; 496 long nr_swaps_freed = 0; 497 int offset; 498 int freed; 499 int punch_hole; 500 spinlock_t *needs_lock; 501 spinlock_t *punch_lock; 502 unsigned long upper_limit; 503 504 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 505 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 506 if (idx >= info->next_index) 507 return; 508 509 spin_lock(&info->lock); 510 info->flags |= SHMEM_TRUNCATE; 511 if (likely(end == (loff_t) -1)) { 512 limit = info->next_index; 513 upper_limit = SHMEM_MAX_INDEX; 514 info->next_index = idx; 515 needs_lock = NULL; 516 punch_hole = 0; 517 } else { 518 if (end + 1 >= inode->i_size) { /* we may free a little more */ 519 limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> 520 PAGE_CACHE_SHIFT; 521 upper_limit = SHMEM_MAX_INDEX; 522 } else { 523 limit = (end + 1) >> PAGE_CACHE_SHIFT; 524 upper_limit = limit; 525 } 526 needs_lock = &info->lock; 527 punch_hole = 1; 528 } 529 530 topdir = info->i_indirect; 531 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { 532 info->i_indirect = NULL; 533 nr_pages_to_free++; 534 list_add(&topdir->lru, &pages_to_free); 535 } 536 spin_unlock(&info->lock); 537 538 if (info->swapped && idx < SHMEM_NR_DIRECT) { 539 ptr = info->i_direct; 540 size = limit; 541 if (size > SHMEM_NR_DIRECT) 542 size = SHMEM_NR_DIRECT; 543 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); 544 } 545 546 /* 547 * If there are no indirect blocks or we are punching a hole 548 * below indirect blocks, nothing to be done. 549 */ 550 if (!topdir || limit <= SHMEM_NR_DIRECT) 551 goto done2; 552 553 /* 554 * The truncation case has already dropped info->lock, and we're safe 555 * because i_size and next_index have already been lowered, preventing 556 * access beyond. But in the punch_hole case, we still need to take 557 * the lock when updating the swap directory, because there might be 558 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or 559 * shmem_writepage. However, whenever we find we can remove a whole 560 * directory page (not at the misaligned start or end of the range), 561 * we first NULLify its pointer in the level above, and then have no 562 * need to take the lock when updating its contents: needs_lock and 563 * punch_lock (either pointing to info->lock or NULL) manage this. 564 */ 565 566 upper_limit -= SHMEM_NR_DIRECT; 567 limit -= SHMEM_NR_DIRECT; 568 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; 569 offset = idx % ENTRIES_PER_PAGE; 570 idx -= offset; 571 572 dir = shmem_dir_map(topdir); 573 stage = ENTRIES_PER_PAGEPAGE/2; 574 if (idx < ENTRIES_PER_PAGEPAGE/2) { 575 middir = topdir; 576 diroff = idx/ENTRIES_PER_PAGE; 577 } else { 578 dir += ENTRIES_PER_PAGE/2; 579 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; 580 while (stage <= idx) 581 stage += ENTRIES_PER_PAGEPAGE; 582 middir = *dir; 583 if (*dir) { 584 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % 585 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; 586 if (!diroff && !offset && upper_limit >= stage) { 587 if (needs_lock) { 588 spin_lock(needs_lock); 589 *dir = NULL; 590 spin_unlock(needs_lock); 591 needs_lock = NULL; 592 } else 593 *dir = NULL; 594 nr_pages_to_free++; 595 list_add(&middir->lru, &pages_to_free); 596 } 597 shmem_dir_unmap(dir); 598 dir = shmem_dir_map(middir); 599 } else { 600 diroff = 0; 601 offset = 0; 602 idx = stage; 603 } 604 } 605 606 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { 607 if (unlikely(idx == stage)) { 608 shmem_dir_unmap(dir); 609 dir = shmem_dir_map(topdir) + 610 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 611 while (!*dir) { 612 dir++; 613 idx += ENTRIES_PER_PAGEPAGE; 614 if (idx >= limit) 615 goto done1; 616 } 617 stage = idx + ENTRIES_PER_PAGEPAGE; 618 middir = *dir; 619 if (punch_hole) 620 needs_lock = &info->lock; 621 if (upper_limit >= stage) { 622 if (needs_lock) { 623 spin_lock(needs_lock); 624 *dir = NULL; 625 spin_unlock(needs_lock); 626 needs_lock = NULL; 627 } else 628 *dir = NULL; 629 nr_pages_to_free++; 630 list_add(&middir->lru, &pages_to_free); 631 } 632 shmem_dir_unmap(dir); 633 cond_resched(); 634 dir = shmem_dir_map(middir); 635 diroff = 0; 636 } 637 punch_lock = needs_lock; 638 subdir = dir[diroff]; 639 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { 640 if (needs_lock) { 641 spin_lock(needs_lock); 642 dir[diroff] = NULL; 643 spin_unlock(needs_lock); 644 punch_lock = NULL; 645 } else 646 dir[diroff] = NULL; 647 nr_pages_to_free++; 648 list_add(&subdir->lru, &pages_to_free); 649 } 650 if (subdir && page_private(subdir) /* has swap entries */) { 651 size = limit - idx; 652 if (size > ENTRIES_PER_PAGE) 653 size = ENTRIES_PER_PAGE; 654 freed = shmem_map_and_free_swp(subdir, 655 offset, size, &dir, punch_lock); 656 if (!dir) 657 dir = shmem_dir_map(middir); 658 nr_swaps_freed += freed; 659 if (offset || punch_lock) { 660 spin_lock(&info->lock); 661 set_page_private(subdir, 662 page_private(subdir) - freed); 663 spin_unlock(&info->lock); 664 } else 665 BUG_ON(page_private(subdir) != freed); 666 } 667 offset = 0; 668 } 669done1: 670 shmem_dir_unmap(dir); 671done2: 672 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { 673 /* 674 * Call truncate_inode_pages again: racing shmem_unuse_inode 675 * may have swizzled a page in from swap since vmtruncate or 676 * generic_delete_inode did it, before we lowered next_index. 677 * Also, though shmem_getpage checks i_size before adding to 678 * cache, no recheck after: so fix the narrow window there too. 679 * 680 * Recalling truncate_inode_pages_range and unmap_mapping_range 681 * every time for punch_hole (which never got a chance to clear 682 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, 683 * yet hardly ever necessary: try to optimize them out later. 684 */ 685 truncate_inode_pages_range(inode->i_mapping, start, end); 686 if (punch_hole) 687 unmap_mapping_range(inode->i_mapping, start, 688 end - start, 1); 689 } 690 691 spin_lock(&info->lock); 692 info->flags &= ~SHMEM_TRUNCATE; 693 info->swapped -= nr_swaps_freed; 694 if (nr_pages_to_free) 695 shmem_free_blocks(inode, nr_pages_to_free); 696 shmem_recalc_inode(inode); 697 spin_unlock(&info->lock); 698 699 /* 700 * Empty swap vector directory pages to be freed? 701 */ 702 if (!list_empty(&pages_to_free)) { 703 pages_to_free.prev->next = NULL; 704 shmem_free_pages(pages_to_free.next); 705 } 706} 707 708static void shmem_truncate(struct inode *inode) 709{ 710 shmem_truncate_range(inode, inode->i_size, (loff_t)-1); 711} 712 713static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 714{ 715 struct inode *inode = dentry->d_inode; 716 struct page *page = NULL; 717 int error; 718 719 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 720 if (attr->ia_size < inode->i_size) { 721 /* 722 * If truncating down to a partial page, then 723 * if that page is already allocated, hold it 724 * in memory until the truncation is over, so 725 * truncate_partial_page cannnot miss it were 726 * it assigned to swap. 727 */ 728 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { 729 (void) shmem_getpage(inode, 730 attr->ia_size>>PAGE_CACHE_SHIFT, 731 &page, SGP_READ, NULL); 732 if (page) 733 unlock_page(page); 734 } 735 /* 736 * Reset SHMEM_PAGEIN flag so that shmem_truncate can 737 * detect if any pages might have been added to cache 738 * after truncate_inode_pages. But we needn't bother 739 * if it's being fully truncated to zero-length: the 740 * nrpages check is efficient enough in that case. 741 */ 742 if (attr->ia_size) { 743 struct shmem_inode_info *info = SHMEM_I(inode); 744 spin_lock(&info->lock); 745 info->flags &= ~SHMEM_PAGEIN; 746 spin_unlock(&info->lock); 747 } 748 } 749 } 750 751 error = inode_change_ok(inode, attr); 752 if (!error) 753 error = inode_setattr(inode, attr); 754#ifdef CONFIG_TMPFS_POSIX_ACL 755 if (!error && (attr->ia_valid & ATTR_MODE)) 756 error = generic_acl_chmod(inode, &shmem_acl_ops); 757#endif 758 if (page) 759 page_cache_release(page); 760 return error; 761} 762 763static void shmem_delete_inode(struct inode *inode) 764{ 765 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 766 struct shmem_inode_info *info = SHMEM_I(inode); 767 768 if (inode->i_op->truncate == shmem_truncate) { 769 truncate_inode_pages(inode->i_mapping, 0); 770 shmem_unacct_size(info->flags, inode->i_size); 771 inode->i_size = 0; 772 shmem_truncate(inode); 773 if (!list_empty(&info->swaplist)) { 774 spin_lock(&shmem_swaplist_lock); 775 list_del_init(&info->swaplist); 776 spin_unlock(&shmem_swaplist_lock); 777 } 778 } 779 BUG_ON(inode->i_blocks); 780 if (sbinfo->max_inodes) { 781 spin_lock(&sbinfo->stat_lock); 782 sbinfo->free_inodes++; 783 spin_unlock(&sbinfo->stat_lock); 784 } 785 clear_inode(inode); 786} 787 788static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) 789{ 790 swp_entry_t *ptr; 791 792 for (ptr = dir; ptr < edir; ptr++) { 793 if (ptr->val == entry.val) 794 return ptr - dir; 795 } 796 return -1; 797} 798 799static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) 800{ 801 struct inode *inode; 802 unsigned long idx; 803 unsigned long size; 804 unsigned long limit; 805 unsigned long stage; 806 struct page **dir; 807 struct page *subdir; 808 swp_entry_t *ptr; 809 int offset; 810 811 idx = 0; 812 ptr = info->i_direct; 813 spin_lock(&info->lock); 814 limit = info->next_index; 815 size = limit; 816 if (size > SHMEM_NR_DIRECT) 817 size = SHMEM_NR_DIRECT; 818 offset = shmem_find_swp(entry, ptr, ptr+size); 819 if (offset >= 0) { 820 shmem_swp_balance_unmap(); 821 goto found; 822 } 823 if (!info->i_indirect) 824 goto lost2; 825 826 dir = shmem_dir_map(info->i_indirect); 827 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; 828 829 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { 830 if (unlikely(idx == stage)) { 831 shmem_dir_unmap(dir-1); 832 dir = shmem_dir_map(info->i_indirect) + 833 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 834 while (!*dir) { 835 dir++; 836 idx += ENTRIES_PER_PAGEPAGE; 837 if (idx >= limit) 838 goto lost1; 839 } 840 stage = idx + ENTRIES_PER_PAGEPAGE; 841 subdir = *dir; 842 shmem_dir_unmap(dir); 843 dir = shmem_dir_map(subdir); 844 } 845 subdir = *dir; 846 if (subdir && page_private(subdir)) { 847 ptr = shmem_swp_map(subdir); 848 size = limit - idx; 849 if (size > ENTRIES_PER_PAGE) 850 size = ENTRIES_PER_PAGE; 851 offset = shmem_find_swp(entry, ptr, ptr+size); 852 if (offset >= 0) { 853 shmem_dir_unmap(dir); 854 goto found; 855 } 856 shmem_swp_unmap(ptr); 857 } 858 } 859lost1: 860 shmem_dir_unmap(dir-1); 861lost2: 862 spin_unlock(&info->lock); 863 return 0; 864found: 865 idx += offset; 866 inode = &info->vfs_inode; 867 if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) { 868 info->flags |= SHMEM_PAGEIN; 869 shmem_swp_set(info, ptr + offset, 0); 870 } 871 shmem_swp_unmap(ptr); 872 spin_unlock(&info->lock); 873 /* 874 * Decrement swap count even when the entry is left behind: 875 * try_to_unuse will skip over mms, then reincrement count. 876 */ 877 swap_free(entry); 878 return 1; 879} 880 881/* 882 * shmem_unuse() search for an eventually swapped out shmem page. 883 */ 884int shmem_unuse(swp_entry_t entry, struct page *page) 885{ 886 struct list_head *p, *next; 887 struct shmem_inode_info *info; 888 int found = 0; 889 890 spin_lock(&shmem_swaplist_lock); 891 list_for_each_safe(p, next, &shmem_swaplist) { 892 info = list_entry(p, struct shmem_inode_info, swaplist); 893 if (!info->swapped) 894 list_del_init(&info->swaplist); 895 else if (shmem_unuse_inode(info, entry, page)) { 896 /* move head to start search for next from here */ 897 list_move_tail(&shmem_swaplist, &info->swaplist); 898 found = 1; 899 break; 900 } 901 } 902 spin_unlock(&shmem_swaplist_lock); 903 return found; 904} 905 906/* 907 * Move the page from the page cache to the swap cache. 908 */ 909static int shmem_writepage(struct page *page, struct writeback_control *wbc) 910{ 911 struct shmem_inode_info *info; 912 swp_entry_t *entry, swap; 913 struct address_space *mapping; 914 unsigned long index; 915 struct inode *inode; 916 917 BUG_ON(!PageLocked(page)); 918 /* 919 * shmem_backing_dev_info's capabilities prevent regular writeback or 920 * sync from ever calling shmem_writepage; but a stacking filesystem 921 * may use the ->writepage of its underlying filesystem, in which case 922 * we want to do nothing when that underlying filesystem is tmpfs 923 * (writing out to swap is useful as a response to memory pressure, but 924 * of no use to stabilize the data) - just redirty the page, unlock it 925 * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the 926 * page_mapped check below, must be avoided unless we're in reclaim. 927 */ 928 if (!wbc->for_reclaim) { 929 set_page_dirty(page); 930 unlock_page(page); 931 return 0; 932 } 933 BUG_ON(page_mapped(page)); 934 935 mapping = page->mapping; 936 index = page->index; 937 inode = mapping->host; 938 info = SHMEM_I(inode); 939 if (info->flags & VM_LOCKED) 940 goto redirty; 941 swap = get_swap_page(); 942 if (!swap.val) 943 goto redirty; 944 945 spin_lock(&info->lock); 946 shmem_recalc_inode(inode); 947 if (index >= info->next_index) { 948 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 949 goto unlock; 950 } 951 entry = shmem_swp_entry(info, index, NULL); 952 BUG_ON(!entry); 953 BUG_ON(entry->val); 954 955 if (move_to_swap_cache(page, swap) == 0) { 956 shmem_swp_set(info, entry, swap.val); 957 shmem_swp_unmap(entry); 958 spin_unlock(&info->lock); 959 if (list_empty(&info->swaplist)) { 960 spin_lock(&shmem_swaplist_lock); 961 /* move instead of add in case we're racing */ 962 list_move_tail(&info->swaplist, &shmem_swaplist); 963 spin_unlock(&shmem_swaplist_lock); 964 } 965 unlock_page(page); 966 return 0; 967 } 968 969 shmem_swp_unmap(entry); 970unlock: 971 spin_unlock(&info->lock); 972 swap_free(swap); 973redirty: 974 set_page_dirty(page); 975 return AOP_WRITEPAGE_ACTIVATE; /* Return with the page locked */ 976} 977 978#ifdef CONFIG_NUMA 979static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) 980{ 981 char *nodelist = strchr(value, ':'); 982 int err = 1; 983 984 if (nodelist) { 985 /* NUL-terminate policy string */ 986 *nodelist++ = '\0'; 987 if (nodelist_parse(nodelist, *policy_nodes)) 988 goto out; 989 if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY])) 990 goto out; 991 } 992 if (!strcmp(value, "default")) { 993 *policy = MPOL_DEFAULT; 994 /* Don't allow a nodelist */ 995 if (!nodelist) 996 err = 0; 997 } else if (!strcmp(value, "prefer")) { 998 *policy = MPOL_PREFERRED; 999 /* Insist on a nodelist of one node only */ 1000 if (nodelist) { 1001 char *rest = nodelist; 1002 while (isdigit(*rest)) 1003 rest++; 1004 if (!*rest) 1005 err = 0; 1006 } 1007 } else if (!strcmp(value, "bind")) { 1008 *policy = MPOL_BIND; 1009 /* Insist on a nodelist */ 1010 if (nodelist) 1011 err = 0; 1012 } else if (!strcmp(value, "interleave")) { 1013 *policy = MPOL_INTERLEAVE; 1014 /* 1015 * Default to online nodes with memory if no nodelist 1016 */ 1017 if (!nodelist) 1018 *policy_nodes = node_states[N_HIGH_MEMORY]; 1019 err = 0; 1020 } 1021out: 1022 /* Restore string for error message */ 1023 if (nodelist) 1024 *--nodelist = ':'; 1025 return err; 1026} 1027 1028static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, 1029 struct shmem_inode_info *info, unsigned long idx) 1030{ 1031 struct vm_area_struct pvma; 1032 struct page *page; 1033 1034 /* Create a pseudo vma that just contains the policy */ 1035 pvma.vm_start = 0; 1036 pvma.vm_pgoff = idx; 1037 pvma.vm_ops = NULL; 1038 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1039 page = swapin_readahead(entry, gfp, &pvma, 0); 1040 mpol_free(pvma.vm_policy); 1041 return page; 1042} 1043 1044static struct page *shmem_alloc_page(gfp_t gfp, 1045 struct shmem_inode_info *info, unsigned long idx) 1046{ 1047 struct vm_area_struct pvma; 1048 struct page *page; 1049 1050 /* Create a pseudo vma that just contains the policy */ 1051 pvma.vm_start = 0; 1052 pvma.vm_pgoff = idx; 1053 pvma.vm_ops = NULL; 1054 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1055 page = alloc_page_vma(gfp, &pvma, 0); 1056 mpol_free(pvma.vm_policy); 1057 return page; 1058} 1059#else 1060static inline int shmem_parse_mpol(char *value, int *policy, 1061 nodemask_t *policy_nodes) 1062{ 1063 return 1; 1064} 1065 1066static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp, 1067 struct shmem_inode_info *info, unsigned long idx) 1068{ 1069 return swapin_readahead(entry, gfp, NULL, 0); 1070} 1071 1072static inline struct page *shmem_alloc_page(gfp_t gfp, 1073 struct shmem_inode_info *info, unsigned long idx) 1074{ 1075 return alloc_page(gfp); 1076} 1077#endif 1078 1079/* 1080 * shmem_getpage - either get the page from swap or allocate a new one 1081 * 1082 * If we allocate a new one we do not mark it dirty. That's up to the 1083 * vm. If we swap it in we mark it dirty since we also free the swap 1084 * entry since a page cannot live in both the swap and page cache 1085 */ 1086static int shmem_getpage(struct inode *inode, unsigned long idx, 1087 struct page **pagep, enum sgp_type sgp, int *type) 1088{ 1089 struct address_space *mapping = inode->i_mapping; 1090 struct shmem_inode_info *info = SHMEM_I(inode); 1091 struct shmem_sb_info *sbinfo; 1092 struct page *filepage = *pagep; 1093 struct page *swappage; 1094 swp_entry_t *entry; 1095 swp_entry_t swap; 1096 gfp_t gfp; 1097 int error; 1098 1099 if (idx >= SHMEM_MAX_INDEX) 1100 return -EFBIG; 1101 1102 if (type) 1103 *type = 0; 1104 1105 /* 1106 * Normally, filepage is NULL on entry, and either found 1107 * uptodate immediately, or allocated and zeroed, or read 1108 * in under swappage, which is then assigned to filepage. 1109 * But shmem_readpage and shmem_write_begin pass in a locked 1110 * filepage, which may be found not uptodate by other callers 1111 * too, and may need to be copied from the swappage read in. 1112 */ 1113repeat: 1114 if (!filepage) 1115 filepage = find_lock_page(mapping, idx); 1116 if (filepage && PageUptodate(filepage)) 1117 goto done; 1118 error = 0; 1119 gfp = mapping_gfp_mask(mapping); 1120 1121 spin_lock(&info->lock); 1122 shmem_recalc_inode(inode); 1123 entry = shmem_swp_alloc(info, idx, sgp); 1124 if (IS_ERR(entry)) { 1125 spin_unlock(&info->lock); 1126 error = PTR_ERR(entry); 1127 goto failed; 1128 } 1129 swap = *entry; 1130 1131 if (swap.val) { 1132 /* Look it up and read it in.. */ 1133 swappage = lookup_swap_cache(swap); 1134 if (!swappage) { 1135 shmem_swp_unmap(entry); 1136 /* here we actually do the io */ 1137 if (type && !(*type & VM_FAULT_MAJOR)) { 1138 __count_vm_event(PGMAJFAULT); 1139 *type |= VM_FAULT_MAJOR; 1140 } 1141 spin_unlock(&info->lock); 1142 swappage = shmem_swapin(swap, gfp, info, idx); 1143 if (!swappage) { 1144 spin_lock(&info->lock); 1145 entry = shmem_swp_alloc(info, idx, sgp); 1146 if (IS_ERR(entry)) 1147 error = PTR_ERR(entry); 1148 else { 1149 if (entry->val == swap.val) 1150 error = -ENOMEM; 1151 shmem_swp_unmap(entry); 1152 } 1153 spin_unlock(&info->lock); 1154 if (error) 1155 goto failed; 1156 goto repeat; 1157 } 1158 wait_on_page_locked(swappage); 1159 page_cache_release(swappage); 1160 goto repeat; 1161 } 1162 1163 /* We have to do this with page locked to prevent races */ 1164 if (TestSetPageLocked(swappage)) { 1165 shmem_swp_unmap(entry); 1166 spin_unlock(&info->lock); 1167 wait_on_page_locked(swappage); 1168 page_cache_release(swappage); 1169 goto repeat; 1170 } 1171 if (PageWriteback(swappage)) { 1172 shmem_swp_unmap(entry); 1173 spin_unlock(&info->lock); 1174 wait_on_page_writeback(swappage); 1175 unlock_page(swappage); 1176 page_cache_release(swappage); 1177 goto repeat; 1178 } 1179 if (!PageUptodate(swappage)) { 1180 shmem_swp_unmap(entry); 1181 spin_unlock(&info->lock); 1182 unlock_page(swappage); 1183 page_cache_release(swappage); 1184 error = -EIO; 1185 goto failed; 1186 } 1187 1188 if (filepage) { 1189 shmem_swp_set(info, entry, 0); 1190 shmem_swp_unmap(entry); 1191 delete_from_swap_cache(swappage); 1192 spin_unlock(&info->lock); 1193 copy_highpage(filepage, swappage); 1194 unlock_page(swappage); 1195 page_cache_release(swappage); 1196 flush_dcache_page(filepage); 1197 SetPageUptodate(filepage); 1198 set_page_dirty(filepage); 1199 swap_free(swap); 1200 } else if (!(error = move_from_swap_cache( 1201 swappage, idx, mapping))) { 1202 info->flags |= SHMEM_PAGEIN; 1203 shmem_swp_set(info, entry, 0); 1204 shmem_swp_unmap(entry); 1205 spin_unlock(&info->lock); 1206 filepage = swappage; 1207 swap_free(swap); 1208 } else { 1209 shmem_swp_unmap(entry); 1210 spin_unlock(&info->lock); 1211 unlock_page(swappage); 1212 page_cache_release(swappage); 1213 if (error == -ENOMEM) { 1214 /* let kswapd refresh zone for GFP_ATOMICs */ 1215 congestion_wait(WRITE, HZ/50); 1216 } 1217 goto repeat; 1218 } 1219 } else if (sgp == SGP_READ && !filepage) { 1220 shmem_swp_unmap(entry); 1221 filepage = find_get_page(mapping, idx); 1222 if (filepage && 1223 (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { 1224 spin_unlock(&info->lock); 1225 wait_on_page_locked(filepage); 1226 page_cache_release(filepage); 1227 filepage = NULL; 1228 goto repeat; 1229 } 1230 spin_unlock(&info->lock); 1231 } else { 1232 shmem_swp_unmap(entry); 1233 sbinfo = SHMEM_SB(inode->i_sb); 1234 if (sbinfo->max_blocks) { 1235 spin_lock(&sbinfo->stat_lock); 1236 if (sbinfo->free_blocks == 0 || 1237 shmem_acct_block(info->flags)) { 1238 spin_unlock(&sbinfo->stat_lock); 1239 spin_unlock(&info->lock); 1240 error = -ENOSPC; 1241 goto failed; 1242 } 1243 sbinfo->free_blocks--; 1244 inode->i_blocks += BLOCKS_PER_PAGE; 1245 spin_unlock(&sbinfo->stat_lock); 1246 } else if (shmem_acct_block(info->flags)) { 1247 spin_unlock(&info->lock); 1248 error = -ENOSPC; 1249 goto failed; 1250 } 1251 1252 if (!filepage) { 1253 spin_unlock(&info->lock); 1254 filepage = shmem_alloc_page(gfp, info, idx); 1255 if (!filepage) { 1256 shmem_unacct_blocks(info->flags, 1); 1257 shmem_free_blocks(inode, 1); 1258 error = -ENOMEM; 1259 goto failed; 1260 } 1261 1262 spin_lock(&info->lock); 1263 entry = shmem_swp_alloc(info, idx, sgp); 1264 if (IS_ERR(entry)) 1265 error = PTR_ERR(entry); 1266 else { 1267 swap = *entry; 1268 shmem_swp_unmap(entry); 1269 } 1270 if (error || swap.val || 0 != add_to_page_cache_lru( 1271 filepage, mapping, idx, GFP_ATOMIC)) { 1272 spin_unlock(&info->lock); 1273 page_cache_release(filepage); 1274 shmem_unacct_blocks(info->flags, 1); 1275 shmem_free_blocks(inode, 1); 1276 filepage = NULL; 1277 if (error) 1278 goto failed; 1279 goto repeat; 1280 } 1281 info->flags |= SHMEM_PAGEIN; 1282 } 1283 1284 info->alloced++; 1285 spin_unlock(&info->lock); 1286 clear_highpage(filepage); 1287 flush_dcache_page(filepage); 1288 SetPageUptodate(filepage); 1289 } 1290done: 1291 *pagep = filepage; 1292 return 0; 1293 1294failed: 1295 if (*pagep != filepage) { 1296 unlock_page(filepage); 1297 page_cache_release(filepage); 1298 } 1299 return error; 1300} 1301 1302static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1303{ 1304 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1305 int error; 1306 int ret; 1307 1308 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 1309 return VM_FAULT_SIGBUS; 1310 1311 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1312 if (error) 1313 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1314 1315 mark_page_accessed(vmf->page); 1316 return ret | VM_FAULT_LOCKED; 1317} 1318 1319#ifdef CONFIG_NUMA 1320static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 1321{ 1322 struct inode *i = vma->vm_file->f_path.dentry->d_inode; 1323 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); 1324} 1325 1326static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1327 unsigned long addr) 1328{ 1329 struct inode *i = vma->vm_file->f_path.dentry->d_inode; 1330 unsigned long idx; 1331 1332 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1333 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); 1334} 1335#endif 1336 1337int shmem_lock(struct file *file, int lock, struct user_struct *user) 1338{ 1339 struct inode *inode = file->f_path.dentry->d_inode; 1340 struct shmem_inode_info *info = SHMEM_I(inode); 1341 int retval = -ENOMEM; 1342 1343 spin_lock(&info->lock); 1344 if (lock && !(info->flags & VM_LOCKED)) { 1345 if (!user_shm_lock(inode->i_size, user)) 1346 goto out_nomem; 1347 info->flags |= VM_LOCKED; 1348 } 1349 if (!lock && (info->flags & VM_LOCKED) && user) { 1350 user_shm_unlock(inode->i_size, user); 1351 info->flags &= ~VM_LOCKED; 1352 } 1353 retval = 0; 1354out_nomem: 1355 spin_unlock(&info->lock); 1356 return retval; 1357} 1358 1359static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1360{ 1361 file_accessed(file); 1362 vma->vm_ops = &shmem_vm_ops; 1363 vma->vm_flags |= VM_CAN_NONLINEAR; 1364 return 0; 1365} 1366 1367static struct inode * 1368shmem_get_inode(struct super_block *sb, int mode, dev_t dev) 1369{ 1370 struct inode *inode; 1371 struct shmem_inode_info *info; 1372 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1373 1374 if (sbinfo->max_inodes) { 1375 spin_lock(&sbinfo->stat_lock); 1376 if (!sbinfo->free_inodes) { 1377 spin_unlock(&sbinfo->stat_lock); 1378 return NULL; 1379 } 1380 sbinfo->free_inodes--; 1381 spin_unlock(&sbinfo->stat_lock); 1382 } 1383 1384 inode = new_inode(sb); 1385 if (inode) { 1386 inode->i_mode = mode; 1387 inode->i_uid = current->fsuid; 1388 inode->i_gid = current->fsgid; 1389 inode->i_blocks = 0; 1390 inode->i_mapping->a_ops = &shmem_aops; 1391 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1392 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1393 inode->i_generation = get_seconds(); 1394 info = SHMEM_I(inode); 1395 memset(info, 0, (char *)inode - (char *)info); 1396 spin_lock_init(&info->lock); 1397 INIT_LIST_HEAD(&info->swaplist); 1398 1399 switch (mode & S_IFMT) { 1400 default: 1401 inode->i_op = &shmem_special_inode_operations; 1402 init_special_inode(inode, mode, dev); 1403 break; 1404 case S_IFREG: 1405 inode->i_op = &shmem_inode_operations; 1406 inode->i_fop = &shmem_file_operations; 1407 mpol_shared_policy_init(&info->policy, sbinfo->policy, 1408 &sbinfo->policy_nodes); 1409 break; 1410 case S_IFDIR: 1411 inc_nlink(inode); 1412 /* Some things misbehave if size == 0 on a directory */ 1413 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1414 inode->i_op = &shmem_dir_inode_operations; 1415 inode->i_fop = &simple_dir_operations; 1416 break; 1417 case S_IFLNK: 1418 /* 1419 * Must not load anything in the rbtree, 1420 * mpol_free_shared_policy will not be called. 1421 */ 1422 mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, 1423 NULL); 1424 break; 1425 } 1426 } else if (sbinfo->max_inodes) { 1427 spin_lock(&sbinfo->stat_lock); 1428 sbinfo->free_inodes++; 1429 spin_unlock(&sbinfo->stat_lock); 1430 } 1431 return inode; 1432} 1433 1434#ifdef CONFIG_TMPFS 1435static const struct inode_operations shmem_symlink_inode_operations; 1436static const struct inode_operations shmem_symlink_inline_operations; 1437 1438/* 1439 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; 1440 * but providing them allows a tmpfs file to be used for splice, sendfile, and 1441 * below the loop driver, in the generic fashion that many filesystems support. 1442 */ 1443static int shmem_readpage(struct file *file, struct page *page) 1444{ 1445 struct inode *inode = page->mapping->host; 1446 int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); 1447 unlock_page(page); 1448 return error; 1449} 1450 1451static int 1452shmem_write_begin(struct file *file, struct address_space *mapping, 1453 loff_t pos, unsigned len, unsigned flags, 1454 struct page **pagep, void **fsdata) 1455{ 1456 struct inode *inode = mapping->host; 1457 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1458 *pagep = NULL; 1459 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1460} 1461 1462static int 1463shmem_write_end(struct file *file, struct address_space *mapping, 1464 loff_t pos, unsigned len, unsigned copied, 1465 struct page *page, void *fsdata) 1466{ 1467 struct inode *inode = mapping->host; 1468 1469 if (pos + copied > inode->i_size) 1470 i_size_write(inode, pos + copied); 1471 1472 unlock_page(page); 1473 set_page_dirty(page); 1474 page_cache_release(page); 1475 1476 return copied; 1477} 1478 1479static ssize_t 1480shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 1481{ 1482 struct inode *inode = file->f_path.dentry->d_inode; 1483 loff_t pos; 1484 unsigned long written; 1485 ssize_t err; 1486 1487 if ((ssize_t) count < 0) 1488 return -EINVAL; 1489 1490 if (!access_ok(VERIFY_READ, buf, count)) 1491 return -EFAULT; 1492 1493 mutex_lock(&inode->i_mutex); 1494 1495 pos = *ppos; 1496 written = 0; 1497 1498 err = generic_write_checks(file, &pos, &count, 0); 1499 if (err || !count) 1500 goto out; 1501 1502 err = remove_suid(file->f_path.dentry); 1503 if (err) 1504 goto out; 1505 1506 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 1507 1508 do { 1509 struct page *page = NULL; 1510 unsigned long bytes, index, offset; 1511 char *kaddr; 1512 int left; 1513 1514 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 1515 index = pos >> PAGE_CACHE_SHIFT; 1516 bytes = PAGE_CACHE_SIZE - offset; 1517 if (bytes > count) 1518 bytes = count; 1519 1520 /* 1521 * We don't hold page lock across copy from user - 1522 * what would it guard against? - so no deadlock here. 1523 * But it still may be a good idea to prefault below. 1524 */ 1525 1526 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL); 1527 if (err) 1528 break; 1529 1530 unlock_page(page); 1531 left = bytes; 1532 if (PageHighMem(page)) { 1533 volatile unsigned char dummy; 1534 __get_user(dummy, buf); 1535 __get_user(dummy, buf + bytes - 1); 1536 1537 kaddr = kmap_atomic(page, KM_USER0); 1538 left = __copy_from_user_inatomic(kaddr + offset, 1539 buf, bytes); 1540 kunmap_atomic(kaddr, KM_USER0); 1541 } 1542 if (left) { 1543 kaddr = kmap(page); 1544 left = __copy_from_user(kaddr + offset, buf, bytes); 1545 kunmap(page); 1546 } 1547 1548 written += bytes; 1549 count -= bytes; 1550 pos += bytes; 1551 buf += bytes; 1552 if (pos > inode->i_size) 1553 i_size_write(inode, pos); 1554 1555 flush_dcache_page(page); 1556 set_page_dirty(page); 1557 mark_page_accessed(page); 1558 page_cache_release(page); 1559 1560 if (left) { 1561 pos -= left; 1562 written -= left; 1563 err = -EFAULT; 1564 break; 1565 } 1566 1567 /* 1568 * Our dirty pages are not counted in nr_dirty, 1569 * and we do not attempt to balance dirty pages. 1570 */ 1571 1572 cond_resched(); 1573 } while (count); 1574 1575 *ppos = pos; 1576 if (written) 1577 err = written; 1578out: 1579 mutex_unlock(&inode->i_mutex); 1580 return err; 1581} 1582 1583static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1584{ 1585 struct inode *inode = filp->f_path.dentry->d_inode; 1586 struct address_space *mapping = inode->i_mapping; 1587 unsigned long index, offset; 1588 1589 index = *ppos >> PAGE_CACHE_SHIFT; 1590 offset = *ppos & ~PAGE_CACHE_MASK; 1591 1592 for (;;) { 1593 struct page *page = NULL; 1594 unsigned long end_index, nr, ret; 1595 loff_t i_size = i_size_read(inode); 1596 1597 end_index = i_size >> PAGE_CACHE_SHIFT; 1598 if (index > end_index) 1599 break; 1600 if (index == end_index) { 1601 nr = i_size & ~PAGE_CACHE_MASK; 1602 if (nr <= offset) 1603 break; 1604 } 1605 1606 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL); 1607 if (desc->error) { 1608 if (desc->error == -EINVAL) 1609 desc->error = 0; 1610 break; 1611 } 1612 if (page) 1613 unlock_page(page); 1614 1615 /* 1616 * We must evaluate after, since reads (unlike writes) 1617 * are called without i_mutex protection against truncate 1618 */ 1619 nr = PAGE_CACHE_SIZE; 1620 i_size = i_size_read(inode); 1621 end_index = i_size >> PAGE_CACHE_SHIFT; 1622 if (index == end_index) { 1623 nr = i_size & ~PAGE_CACHE_MASK; 1624 if (nr <= offset) { 1625 if (page) 1626 page_cache_release(page); 1627 break; 1628 } 1629 } 1630 nr -= offset; 1631 1632 if (page) { 1633 /* 1634 * If users can be writing to this page using arbitrary 1635 * virtual addresses, take care about potential aliasing 1636 * before reading the page on the kernel side. 1637 */ 1638 if (mapping_writably_mapped(mapping)) 1639 flush_dcache_page(page); 1640 /* 1641 * Mark the page accessed if we read the beginning. 1642 */ 1643 if (!offset) 1644 mark_page_accessed(page); 1645 } else { 1646 page = ZERO_PAGE(0); 1647 page_cache_get(page); 1648 } 1649 1650 /* 1651 * Ok, we have the page, and it's up-to-date, so 1652 * now we can copy it to user space... 1653 * 1654 * The actor routine returns how many bytes were actually used.. 1655 * NOTE! This may not be the same as how much of a user buffer 1656 * we filled up (we may be padding etc), so we can only update 1657 * "pos" here (the actor routine has to update the user buffer 1658 * pointers and the remaining count). 1659 */ 1660 ret = actor(desc, page, offset, nr); 1661 offset += ret; 1662 index += offset >> PAGE_CACHE_SHIFT; 1663 offset &= ~PAGE_CACHE_MASK; 1664 1665 page_cache_release(page); 1666 if (ret != nr || !desc->count) 1667 break; 1668 1669 cond_resched(); 1670 } 1671 1672 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1673 file_accessed(filp); 1674} 1675 1676static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) 1677{ 1678 read_descriptor_t desc; 1679 1680 if ((ssize_t) count < 0) 1681 return -EINVAL; 1682 if (!access_ok(VERIFY_WRITE, buf, count)) 1683 return -EFAULT; 1684 if (!count) 1685 return 0; 1686 1687 desc.written = 0; 1688 desc.count = count; 1689 desc.arg.buf = buf; 1690 desc.error = 0; 1691 1692 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1693 if (desc.written) 1694 return desc.written; 1695 return desc.error; 1696} 1697 1698static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1699{ 1700 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1701 1702 buf->f_type = TMPFS_MAGIC; 1703 buf->f_bsize = PAGE_CACHE_SIZE; 1704 buf->f_namelen = NAME_MAX; 1705 spin_lock(&sbinfo->stat_lock); 1706 if (sbinfo->max_blocks) { 1707 buf->f_blocks = sbinfo->max_blocks; 1708 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; 1709 } 1710 if (sbinfo->max_inodes) { 1711 buf->f_files = sbinfo->max_inodes; 1712 buf->f_ffree = sbinfo->free_inodes; 1713 } 1714 /* else leave those fields 0 like simple_statfs */ 1715 spin_unlock(&sbinfo->stat_lock); 1716 return 0; 1717} 1718 1719/* 1720 * File creation. Allocate an inode, and we're done.. 1721 */ 1722static int 1723shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1724{ 1725 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); 1726 int error = -ENOSPC; 1727 1728 if (inode) { 1729 error = security_inode_init_security(inode, dir, NULL, NULL, 1730 NULL); 1731 if (error) { 1732 if (error != -EOPNOTSUPP) { 1733 iput(inode); 1734 return error; 1735 } 1736 } 1737 error = shmem_acl_init(inode, dir); 1738 if (error) { 1739 iput(inode); 1740 return error; 1741 } 1742 if (dir->i_mode & S_ISGID) { 1743 inode->i_gid = dir->i_gid; 1744 if (S_ISDIR(mode)) 1745 inode->i_mode |= S_ISGID; 1746 } 1747 dir->i_size += BOGO_DIRENT_SIZE; 1748 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1749 d_instantiate(dentry, inode); 1750 dget(dentry); /* Extra count - pin the dentry in core */ 1751 } 1752 return error; 1753} 1754 1755static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1756{ 1757 int error; 1758 1759 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1760 return error; 1761 inc_nlink(dir); 1762 return 0; 1763} 1764 1765static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, 1766 struct nameidata *nd) 1767{ 1768 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1769} 1770 1771/* 1772 * Link a file.. 1773 */ 1774static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1775{ 1776 struct inode *inode = old_dentry->d_inode; 1777 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1778 1779 /* 1780 * No ordinary (disk based) filesystem counts links as inodes; 1781 * but each new link needs a new dentry, pinning lowmem, and 1782 * tmpfs dentries cannot be pruned until they are unlinked. 1783 */ 1784 if (sbinfo->max_inodes) { 1785 spin_lock(&sbinfo->stat_lock); 1786 if (!sbinfo->free_inodes) { 1787 spin_unlock(&sbinfo->stat_lock); 1788 return -ENOSPC; 1789 } 1790 sbinfo->free_inodes--; 1791 spin_unlock(&sbinfo->stat_lock); 1792 } 1793 1794 dir->i_size += BOGO_DIRENT_SIZE; 1795 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1796 inc_nlink(inode); 1797 atomic_inc(&inode->i_count); /* New dentry reference */ 1798 dget(dentry); /* Extra pinning count for the created dentry */ 1799 d_instantiate(dentry, inode); 1800 return 0; 1801} 1802 1803static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1804{ 1805 struct inode *inode = dentry->d_inode; 1806 1807 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { 1808 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1809 if (sbinfo->max_inodes) { 1810 spin_lock(&sbinfo->stat_lock); 1811 sbinfo->free_inodes++; 1812 spin_unlock(&sbinfo->stat_lock); 1813 } 1814 } 1815 1816 dir->i_size -= BOGO_DIRENT_SIZE; 1817 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1818 drop_nlink(inode); 1819 dput(dentry); /* Undo the count from "create" - this does all the work */ 1820 return 0; 1821} 1822 1823static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1824{ 1825 if (!simple_empty(dentry)) 1826 return -ENOTEMPTY; 1827 1828 drop_nlink(dentry->d_inode); 1829 drop_nlink(dir); 1830 return shmem_unlink(dir, dentry); 1831} 1832 1833/* 1834 * The VFS layer already does all the dentry stuff for rename, 1835 * we just have to decrement the usage count for the target if 1836 * it exists so that the VFS layer correctly free's it when it 1837 * gets overwritten. 1838 */ 1839static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1840{ 1841 struct inode *inode = old_dentry->d_inode; 1842 int they_are_dirs = S_ISDIR(inode->i_mode); 1843 1844 if (!simple_empty(new_dentry)) 1845 return -ENOTEMPTY; 1846 1847 if (new_dentry->d_inode) { 1848 (void) shmem_unlink(new_dir, new_dentry); 1849 if (they_are_dirs) 1850 drop_nlink(old_dir); 1851 } else if (they_are_dirs) { 1852 drop_nlink(old_dir); 1853 inc_nlink(new_dir); 1854 } 1855 1856 old_dir->i_size -= BOGO_DIRENT_SIZE; 1857 new_dir->i_size += BOGO_DIRENT_SIZE; 1858 old_dir->i_ctime = old_dir->i_mtime = 1859 new_dir->i_ctime = new_dir->i_mtime = 1860 inode->i_ctime = CURRENT_TIME; 1861 return 0; 1862} 1863 1864static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1865{ 1866 int error; 1867 int len; 1868 struct inode *inode; 1869 struct page *page = NULL; 1870 char *kaddr; 1871 struct shmem_inode_info *info; 1872 1873 len = strlen(symname) + 1; 1874 if (len > PAGE_CACHE_SIZE) 1875 return -ENAMETOOLONG; 1876 1877 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); 1878 if (!inode) 1879 return -ENOSPC; 1880 1881 error = security_inode_init_security(inode, dir, NULL, NULL, 1882 NULL); 1883 if (error) { 1884 if (error != -EOPNOTSUPP) { 1885 iput(inode); 1886 return error; 1887 } 1888 error = 0; 1889 } 1890 1891 info = SHMEM_I(inode); 1892 inode->i_size = len-1; 1893 if (len <= (char *)inode - (char *)info) { 1894 /* do it inline */ 1895 memcpy(info, symname, len); 1896 inode->i_op = &shmem_symlink_inline_operations; 1897 } else { 1898 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 1899 if (error) { 1900 iput(inode); 1901 return error; 1902 } 1903 unlock_page(page); 1904 inode->i_op = &shmem_symlink_inode_operations; 1905 kaddr = kmap_atomic(page, KM_USER0); 1906 memcpy(kaddr, symname, len); 1907 kunmap_atomic(kaddr, KM_USER0); 1908 set_page_dirty(page); 1909 page_cache_release(page); 1910 } 1911 if (dir->i_mode & S_ISGID) 1912 inode->i_gid = dir->i_gid; 1913 dir->i_size += BOGO_DIRENT_SIZE; 1914 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1915 d_instantiate(dentry, inode); 1916 dget(dentry); 1917 return 0; 1918} 1919 1920static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 1921{ 1922 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); 1923 return NULL; 1924} 1925 1926static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 1927{ 1928 struct page *page = NULL; 1929 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 1930 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); 1931 if (page) 1932 unlock_page(page); 1933 return page; 1934} 1935 1936static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 1937{ 1938 if (!IS_ERR(nd_get_link(nd))) { 1939 struct page *page = cookie; 1940 kunmap(page); 1941 mark_page_accessed(page); 1942 page_cache_release(page); 1943 } 1944} 1945 1946static const struct inode_operations shmem_symlink_inline_operations = { 1947 .readlink = generic_readlink, 1948 .follow_link = shmem_follow_link_inline, 1949}; 1950 1951static const struct inode_operations shmem_symlink_inode_operations = { 1952 .truncate = shmem_truncate, 1953 .readlink = generic_readlink, 1954 .follow_link = shmem_follow_link, 1955 .put_link = shmem_put_link, 1956}; 1957 1958#ifdef CONFIG_TMPFS_POSIX_ACL 1959/** 1960 * Superblocks without xattr inode operations will get security.* xattr 1961 * support from the VFS "for free". As soon as we have any other xattrs 1962 * like ACLs, we also need to implement the security.* handlers at 1963 * filesystem level, though. 1964 */ 1965 1966static size_t shmem_xattr_security_list(struct inode *inode, char *list, 1967 size_t list_len, const char *name, 1968 size_t name_len) 1969{ 1970 return security_inode_listsecurity(inode, list, list_len); 1971} 1972 1973static int shmem_xattr_security_get(struct inode *inode, const char *name, 1974 void *buffer, size_t size) 1975{ 1976 if (strcmp(name, "") == 0) 1977 return -EINVAL; 1978 return security_inode_getsecurity(inode, name, buffer, size, 1979 -EOPNOTSUPP); 1980} 1981 1982static int shmem_xattr_security_set(struct inode *inode, const char *name, 1983 const void *value, size_t size, int flags) 1984{ 1985 if (strcmp(name, "") == 0) 1986 return -EINVAL; 1987 return security_inode_setsecurity(inode, name, value, size, flags); 1988} 1989 1990static struct xattr_handler shmem_xattr_security_handler = { 1991 .prefix = XATTR_SECURITY_PREFIX, 1992 .list = shmem_xattr_security_list, 1993 .get = shmem_xattr_security_get, 1994 .set = shmem_xattr_security_set, 1995}; 1996 1997static struct xattr_handler *shmem_xattr_handlers[] = { 1998 &shmem_xattr_acl_access_handler, 1999 &shmem_xattr_acl_default_handler, 2000 &shmem_xattr_security_handler, 2001 NULL 2002}; 2003#endif 2004 2005static struct dentry *shmem_get_parent(struct dentry *child) 2006{ 2007 return ERR_PTR(-ESTALE); 2008} 2009 2010static int shmem_match(struct inode *ino, void *vfh) 2011{ 2012 __u32 *fh = vfh; 2013 __u64 inum = fh[2]; 2014 inum = (inum << 32) | fh[1]; 2015 return ino->i_ino == inum && fh[0] == ino->i_generation; 2016} 2017 2018static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2019 struct fid *fid, int fh_len, int fh_type) 2020{ 2021 struct inode *inode; 2022 struct dentry *dentry = NULL; 2023 u64 inum = fid->raw[2]; 2024 inum = (inum << 32) | fid->raw[1]; 2025 2026 if (fh_len < 3) 2027 return NULL; 2028 2029 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2030 shmem_match, fid->raw); 2031 if (inode) { 2032 dentry = d_find_alias(inode); 2033 iput(inode); 2034 } 2035 2036 return dentry; 2037} 2038 2039static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 2040 int connectable) 2041{ 2042 struct inode *inode = dentry->d_inode; 2043 2044 if (*len < 3) 2045 return 255; 2046 2047 if (hlist_unhashed(&inode->i_hash)) { 2048 /* Unfortunately insert_inode_hash is not idempotent, 2049 * so as we hash inodes here rather than at creation 2050 * time, we need a lock to ensure we only try 2051 * to do it once 2052 */ 2053 static DEFINE_SPINLOCK(lock); 2054 spin_lock(&lock); 2055 if (hlist_unhashed(&inode->i_hash)) 2056 __insert_inode_hash(inode, 2057 inode->i_ino + inode->i_generation); 2058 spin_unlock(&lock); 2059 } 2060 2061 fh[0] = inode->i_generation; 2062 fh[1] = inode->i_ino; 2063 fh[2] = ((__u64)inode->i_ino) >> 32; 2064 2065 *len = 3; 2066 return 1; 2067} 2068 2069static const struct export_operations shmem_export_ops = { 2070 .get_parent = shmem_get_parent, 2071 .encode_fh = shmem_encode_fh, 2072 .fh_to_dentry = shmem_fh_to_dentry, 2073}; 2074 2075static int shmem_parse_options(char *options, int *mode, uid_t *uid, 2076 gid_t *gid, unsigned long *blocks, unsigned long *inodes, 2077 int *policy, nodemask_t *policy_nodes) 2078{ 2079 char *this_char, *value, *rest; 2080 2081 while (options != NULL) { 2082 this_char = options; 2083 for (;;) { 2084 /* 2085 * NUL-terminate this option: unfortunately, 2086 * mount options form a comma-separated list, 2087 * but mpol's nodelist may also contain commas. 2088 */ 2089 options = strchr(options, ','); 2090 if (options == NULL) 2091 break; 2092 options++; 2093 if (!isdigit(*options)) { 2094 options[-1] = '\0'; 2095 break; 2096 } 2097 } 2098 if (!*this_char) 2099 continue; 2100 if ((value = strchr(this_char,'=')) != NULL) { 2101 *value++ = 0; 2102 } else { 2103 printk(KERN_ERR 2104 "tmpfs: No value for mount option '%s'\n", 2105 this_char); 2106 return 1; 2107 } 2108 2109 if (!strcmp(this_char,"size")) { 2110 unsigned long long size; 2111 size = memparse(value,&rest); 2112 if (*rest == '%') { 2113 size <<= PAGE_SHIFT; 2114 size *= totalram_pages; 2115 do_div(size, 100); 2116 rest++; 2117 } 2118 if (*rest) 2119 goto bad_val; 2120 *blocks = size >> PAGE_CACHE_SHIFT; 2121 } else if (!strcmp(this_char,"nr_blocks")) { 2122 *blocks = memparse(value,&rest); 2123 if (*rest) 2124 goto bad_val; 2125 } else if (!strcmp(this_char,"nr_inodes")) { 2126 *inodes = memparse(value,&rest); 2127 if (*rest) 2128 goto bad_val; 2129 } else if (!strcmp(this_char,"mode")) { 2130 if (!mode) 2131 continue; 2132 *mode = simple_strtoul(value,&rest,8); 2133 if (*rest) 2134 goto bad_val; 2135 } else if (!strcmp(this_char,"uid")) { 2136 if (!uid) 2137 continue; 2138 *uid = simple_strtoul(value,&rest,0); 2139 if (*rest) 2140 goto bad_val; 2141 } else if (!strcmp(this_char,"gid")) { 2142 if (!gid) 2143 continue; 2144 *gid = simple_strtoul(value,&rest,0); 2145 if (*rest) 2146 goto bad_val; 2147 } else if (!strcmp(this_char,"mpol")) { 2148 if (shmem_parse_mpol(value,policy,policy_nodes)) 2149 goto bad_val; 2150 } else { 2151 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2152 this_char); 2153 return 1; 2154 } 2155 } 2156 return 0; 2157 2158bad_val: 2159 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2160 value, this_char); 2161 return 1; 2162 2163} 2164 2165static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2166{ 2167 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2168 unsigned long max_blocks = sbinfo->max_blocks; 2169 unsigned long max_inodes = sbinfo->max_inodes; 2170 int policy = sbinfo->policy; 2171 nodemask_t policy_nodes = sbinfo->policy_nodes; 2172 unsigned long blocks; 2173 unsigned long inodes; 2174 int error = -EINVAL; 2175 2176 if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, 2177 &max_inodes, &policy, &policy_nodes)) 2178 return error; 2179 2180 spin_lock(&sbinfo->stat_lock); 2181 blocks = sbinfo->max_blocks - sbinfo->free_blocks; 2182 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2183 if (max_blocks < blocks) 2184 goto out; 2185 if (max_inodes < inodes) 2186 goto out; 2187 /* 2188 * Those tests also disallow limited->unlimited while any are in 2189 * use, so i_blocks will always be zero when max_blocks is zero; 2190 * but we must separately disallow unlimited->limited, because 2191 * in that case we have no record of how much is already in use. 2192 */ 2193 if (max_blocks && !sbinfo->max_blocks) 2194 goto out; 2195 if (max_inodes && !sbinfo->max_inodes) 2196 goto out; 2197 2198 error = 0; 2199 sbinfo->max_blocks = max_blocks; 2200 sbinfo->free_blocks = max_blocks - blocks; 2201 sbinfo->max_inodes = max_inodes; 2202 sbinfo->free_inodes = max_inodes - inodes; 2203 sbinfo->policy = policy; 2204 sbinfo->policy_nodes = policy_nodes; 2205out: 2206 spin_unlock(&sbinfo->stat_lock); 2207 return error; 2208} 2209#endif 2210 2211static void shmem_put_super(struct super_block *sb) 2212{ 2213 kfree(sb->s_fs_info); 2214 sb->s_fs_info = NULL; 2215} 2216 2217static int shmem_fill_super(struct super_block *sb, 2218 void *data, int silent) 2219{ 2220 struct inode *inode; 2221 struct dentry *root; 2222 int mode = S_IRWXUGO | S_ISVTX; 2223 uid_t uid = current->fsuid; 2224 gid_t gid = current->fsgid; 2225 int err = -ENOMEM; 2226 struct shmem_sb_info *sbinfo; 2227 unsigned long blocks = 0; 2228 unsigned long inodes = 0; 2229 int policy = MPOL_DEFAULT; 2230 nodemask_t policy_nodes = node_states[N_HIGH_MEMORY]; 2231 2232#ifdef CONFIG_TMPFS 2233 /* 2234 * Per default we only allow half of the physical ram per 2235 * tmpfs instance, limiting inodes to one per page of lowmem; 2236 * but the internal instance is left unlimited. 2237 */ 2238 if (!(sb->s_flags & MS_NOUSER)) { 2239 blocks = totalram_pages / 2; 2240 inodes = totalram_pages - totalhigh_pages; 2241 if (inodes > blocks) 2242 inodes = blocks; 2243 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks, 2244 &inodes, &policy, &policy_nodes)) 2245 return -EINVAL; 2246 } 2247 sb->s_export_op = &shmem_export_ops; 2248#else 2249 sb->s_flags |= MS_NOUSER; 2250#endif 2251 2252 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2253 sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), 2254 L1_CACHE_BYTES), GFP_KERNEL); 2255 if (!sbinfo) 2256 return -ENOMEM; 2257 2258 spin_lock_init(&sbinfo->stat_lock); 2259 sbinfo->max_blocks = blocks; 2260 sbinfo->free_blocks = blocks; 2261 sbinfo->max_inodes = inodes; 2262 sbinfo->free_inodes = inodes; 2263 sbinfo->policy = policy; 2264 sbinfo->policy_nodes = policy_nodes; 2265 2266 sb->s_fs_info = sbinfo; 2267 sb->s_maxbytes = SHMEM_MAX_BYTES; 2268 sb->s_blocksize = PAGE_CACHE_SIZE; 2269 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2270 sb->s_magic = TMPFS_MAGIC; 2271 sb->s_op = &shmem_ops; 2272 sb->s_time_gran = 1; 2273#ifdef CONFIG_TMPFS_POSIX_ACL 2274 sb->s_xattr = shmem_xattr_handlers; 2275 sb->s_flags |= MS_POSIXACL; 2276#endif 2277 2278 inode = shmem_get_inode(sb, S_IFDIR | mode, 0); 2279 if (!inode) 2280 goto failed; 2281 inode->i_uid = uid; 2282 inode->i_gid = gid; 2283 root = d_alloc_root(inode); 2284 if (!root) 2285 goto failed_iput; 2286 sb->s_root = root; 2287 return 0; 2288 2289failed_iput: 2290 iput(inode); 2291failed: 2292 shmem_put_super(sb); 2293 return err; 2294} 2295 2296static struct kmem_cache *shmem_inode_cachep; 2297 2298static struct inode *shmem_alloc_inode(struct super_block *sb) 2299{ 2300 struct shmem_inode_info *p; 2301 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2302 if (!p) 2303 return NULL; 2304 return &p->vfs_inode; 2305} 2306 2307static void shmem_destroy_inode(struct inode *inode) 2308{ 2309 if ((inode->i_mode & S_IFMT) == S_IFREG) { 2310 /* only struct inode is valid if it's an inline symlink */ 2311 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2312 } 2313 shmem_acl_destroy_inode(inode); 2314 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2315} 2316 2317static void init_once(struct kmem_cache *cachep, void *foo) 2318{ 2319 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2320 2321 inode_init_once(&p->vfs_inode); 2322#ifdef CONFIG_TMPFS_POSIX_ACL 2323 p->i_acl = NULL; 2324 p->i_default_acl = NULL; 2325#endif 2326} 2327 2328static int init_inodecache(void) 2329{ 2330 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2331 sizeof(struct shmem_inode_info), 2332 0, SLAB_PANIC, init_once); 2333 return 0; 2334} 2335 2336static void destroy_inodecache(void) 2337{ 2338 kmem_cache_destroy(shmem_inode_cachep); 2339} 2340 2341static const struct address_space_operations shmem_aops = { 2342 .writepage = shmem_writepage, 2343 .set_page_dirty = __set_page_dirty_no_writeback, 2344#ifdef CONFIG_TMPFS 2345 .readpage = shmem_readpage, 2346 .write_begin = shmem_write_begin, 2347 .write_end = shmem_write_end, 2348#endif 2349 .migratepage = migrate_page, 2350}; 2351 2352static const struct file_operations shmem_file_operations = { 2353 .mmap = shmem_mmap, 2354#ifdef CONFIG_TMPFS 2355 .llseek = generic_file_llseek, 2356 .read = shmem_file_read, 2357 .write = shmem_file_write, 2358 .fsync = simple_sync_file, 2359 .splice_read = generic_file_splice_read, 2360 .splice_write = generic_file_splice_write, 2361#endif 2362}; 2363 2364static const struct inode_operations shmem_inode_operations = { 2365 .truncate = shmem_truncate, 2366 .setattr = shmem_notify_change, 2367 .truncate_range = shmem_truncate_range, 2368#ifdef CONFIG_TMPFS_POSIX_ACL 2369 .setxattr = generic_setxattr, 2370 .getxattr = generic_getxattr, 2371 .listxattr = generic_listxattr, 2372 .removexattr = generic_removexattr, 2373 .permission = shmem_permission, 2374#endif 2375 2376}; 2377 2378static const struct inode_operations shmem_dir_inode_operations = { 2379#ifdef CONFIG_TMPFS 2380 .create = shmem_create, 2381 .lookup = simple_lookup, 2382 .link = shmem_link, 2383 .unlink = shmem_unlink, 2384 .symlink = shmem_symlink, 2385 .mkdir = shmem_mkdir, 2386 .rmdir = shmem_rmdir, 2387 .mknod = shmem_mknod, 2388 .rename = shmem_rename, 2389#endif 2390#ifdef CONFIG_TMPFS_POSIX_ACL 2391 .setattr = shmem_notify_change, 2392 .setxattr = generic_setxattr, 2393 .getxattr = generic_getxattr, 2394 .listxattr = generic_listxattr, 2395 .removexattr = generic_removexattr, 2396 .permission = shmem_permission, 2397#endif 2398}; 2399 2400static const struct inode_operations shmem_special_inode_operations = { 2401#ifdef CONFIG_TMPFS_POSIX_ACL 2402 .setattr = shmem_notify_change, 2403 .setxattr = generic_setxattr, 2404 .getxattr = generic_getxattr, 2405 .listxattr = generic_listxattr, 2406 .removexattr = generic_removexattr, 2407 .permission = shmem_permission, 2408#endif 2409}; 2410 2411static const struct super_operations shmem_ops = { 2412 .alloc_inode = shmem_alloc_inode, 2413 .destroy_inode = shmem_destroy_inode, 2414#ifdef CONFIG_TMPFS 2415 .statfs = shmem_statfs, 2416 .remount_fs = shmem_remount_fs, 2417#endif 2418 .delete_inode = shmem_delete_inode, 2419 .drop_inode = generic_delete_inode, 2420 .put_super = shmem_put_super, 2421}; 2422 2423static struct vm_operations_struct shmem_vm_ops = { 2424 .fault = shmem_fault, 2425#ifdef CONFIG_NUMA 2426 .set_policy = shmem_set_policy, 2427 .get_policy = shmem_get_policy, 2428#endif 2429}; 2430 2431 2432static int shmem_get_sb(struct file_system_type *fs_type, 2433 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 2434{ 2435 return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); 2436} 2437 2438static struct file_system_type tmpfs_fs_type = { 2439 .owner = THIS_MODULE, 2440 .name = "tmpfs", 2441 .get_sb = shmem_get_sb, 2442 .kill_sb = kill_litter_super, 2443}; 2444static struct vfsmount *shm_mnt; 2445 2446static int __init init_tmpfs(void) 2447{ 2448 int error; 2449 2450 error = bdi_init(&shmem_backing_dev_info); 2451 if (error) 2452 goto out4; 2453 2454 error = init_inodecache(); 2455 if (error) 2456 goto out3; 2457 2458 error = register_filesystem(&tmpfs_fs_type); 2459 if (error) { 2460 printk(KERN_ERR "Could not register tmpfs\n"); 2461 goto out2; 2462 } 2463 2464 shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, 2465 tmpfs_fs_type.name, NULL); 2466 if (IS_ERR(shm_mnt)) { 2467 error = PTR_ERR(shm_mnt); 2468 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2469 goto out1; 2470 } 2471 return 0; 2472 2473out1: 2474 unregister_filesystem(&tmpfs_fs_type); 2475out2: 2476 destroy_inodecache(); 2477out3: 2478 bdi_destroy(&shmem_backing_dev_info); 2479out4: 2480 shm_mnt = ERR_PTR(error); 2481 return error; 2482} 2483module_init(init_tmpfs) 2484 2485/* 2486 * shmem_file_setup - get an unlinked file living in tmpfs 2487 * 2488 * @name: name for dentry (to be seen in /proc/<pid>/maps 2489 * @size: size to be set for the file 2490 * 2491 */ 2492struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) 2493{ 2494 int error; 2495 struct file *file; 2496 struct inode *inode; 2497 struct dentry *dentry, *root; 2498 struct qstr this; 2499 2500 if (IS_ERR(shm_mnt)) 2501 return (void *)shm_mnt; 2502 2503 if (size < 0 || size > SHMEM_MAX_BYTES) 2504 return ERR_PTR(-EINVAL); 2505 2506 if (shmem_acct_size(flags, size)) 2507 return ERR_PTR(-ENOMEM); 2508 2509 error = -ENOMEM; 2510 this.name = name; 2511 this.len = strlen(name); 2512 this.hash = 0; /* will go */ 2513 root = shm_mnt->mnt_root; 2514 dentry = d_alloc(root, &this); 2515 if (!dentry) 2516 goto put_memory; 2517 2518 error = -ENFILE; 2519 file = get_empty_filp(); 2520 if (!file) 2521 goto put_dentry; 2522 2523 error = -ENOSPC; 2524 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 2525 if (!inode) 2526 goto close_file; 2527 2528 SHMEM_I(inode)->flags = flags & VM_ACCOUNT; 2529 d_instantiate(dentry, inode); 2530 inode->i_size = size; 2531 inode->i_nlink = 0; /* It is unlinked */ 2532 init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, 2533 &shmem_file_operations); 2534 return file; 2535 2536close_file: 2537 put_filp(file); 2538put_dentry: 2539 dput(dentry); 2540put_memory: 2541 shmem_unacct_size(flags, size); 2542 return ERR_PTR(error); 2543} 2544 2545/* 2546 * shmem_zero_setup - setup a shared anonymous mapping 2547 * 2548 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2549 */ 2550int shmem_zero_setup(struct vm_area_struct *vma) 2551{ 2552 struct file *file; 2553 loff_t size = vma->vm_end - vma->vm_start; 2554 2555 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2556 if (IS_ERR(file)) 2557 return PTR_ERR(file); 2558 2559 if (vma->vm_file) 2560 fput(vma->vm_file); 2561 vma->vm_file = file; 2562 vma->vm_ops = &shmem_vm_ops; 2563 return 0; 2564} 2565