shmem.c revision 39655164405940d4818224a085e35420e2f97aed
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2005 Hugh Dickins. 10 * Copyright (C) 2002-2005 VERITAS Software Corporation. 11 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * 13 * Extended attribute support for tmpfs: 14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 16 * 17 * This file is released under the GPL. 18 */ 19 20/* 21 * This virtual memory filesystem is heavily based on the ramfs. It 22 * extends ramfs by the ability to use swap and honor resource limits 23 * which makes it a completely usable filesystem. 24 */ 25 26#include <linux/module.h> 27#include <linux/init.h> 28#include <linux/fs.h> 29#include <linux/xattr.h> 30#include <linux/exportfs.h> 31#include <linux/generic_acl.h> 32#include <linux/mm.h> 33#include <linux/mman.h> 34#include <linux/file.h> 35#include <linux/swap.h> 36#include <linux/pagemap.h> 37#include <linux/string.h> 38#include <linux/slab.h> 39#include <linux/backing-dev.h> 40#include <linux/shmem_fs.h> 41#include <linux/mount.h> 42#include <linux/writeback.h> 43#include <linux/vfs.h> 44#include <linux/blkdev.h> 45#include <linux/security.h> 46#include <linux/swapops.h> 47#include <linux/mempolicy.h> 48#include <linux/namei.h> 49#include <linux/ctype.h> 50#include <linux/migrate.h> 51#include <linux/highmem.h> 52 53#include <asm/uaccess.h> 54#include <asm/div64.h> 55#include <asm/pgtable.h> 56 57/* This magic number is used in glibc for posix shared memory */ 58#define TMPFS_MAGIC 0x01021994 59 60#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) 61#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) 62#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 63 64#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) 65#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) 66 67#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 68 69/* info->flags needs VM_flags to handle pagein/truncate races efficiently */ 70#define SHMEM_PAGEIN VM_READ 71#define SHMEM_TRUNCATE VM_WRITE 72 73/* Definition to limit shmem_truncate's steps between cond_rescheds */ 74#define LATENCY_LIMIT 64 75 76/* Pretend that each entry is of this size in directory's i_size */ 77#define BOGO_DIRENT_SIZE 20 78 79/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 80enum sgp_type { 81 SGP_QUICK, /* don't try more than file page cache lookup */ 82 SGP_READ, /* don't exceed i_size, don't allocate page */ 83 SGP_CACHE, /* don't exceed i_size, may allocate page */ 84 SGP_WRITE, /* may exceed i_size, may allocate page */ 85 SGP_FAULT, /* same as SGP_CACHE, return with page locked */ 86}; 87 88static int shmem_getpage(struct inode *inode, unsigned long idx, 89 struct page **pagep, enum sgp_type sgp, int *type); 90 91static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) 92{ 93 /* 94 * The above definition of ENTRIES_PER_PAGE, and the use of 95 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 96 * might be reconsidered if it ever diverges from PAGE_SIZE. 97 * 98 * Mobility flags are masked out as swap vectors cannot move 99 */ 100 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, 101 PAGE_CACHE_SHIFT-PAGE_SHIFT); 102} 103 104static inline void shmem_dir_free(struct page *page) 105{ 106 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); 107} 108 109static struct page **shmem_dir_map(struct page *page) 110{ 111 return (struct page **)kmap_atomic(page, KM_USER0); 112} 113 114static inline void shmem_dir_unmap(struct page **dir) 115{ 116 kunmap_atomic(dir, KM_USER0); 117} 118 119static swp_entry_t *shmem_swp_map(struct page *page) 120{ 121 return (swp_entry_t *)kmap_atomic(page, KM_USER1); 122} 123 124static inline void shmem_swp_balance_unmap(void) 125{ 126 /* 127 * When passing a pointer to an i_direct entry, to code which 128 * also handles indirect entries and so will shmem_swp_unmap, 129 * we must arrange for the preempt count to remain in balance. 130 * What kmap_atomic of a lowmem page does depends on config 131 * and architecture, so pretend to kmap_atomic some lowmem page. 132 */ 133 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); 134} 135 136static inline void shmem_swp_unmap(swp_entry_t *entry) 137{ 138 kunmap_atomic(entry, KM_USER1); 139} 140 141static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 142{ 143 return sb->s_fs_info; 144} 145 146/* 147 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 148 * for shared memory and for shared anonymous (/dev/zero) mappings 149 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 150 * consistent with the pre-accounting of private mappings ... 151 */ 152static inline int shmem_acct_size(unsigned long flags, loff_t size) 153{ 154 return (flags & VM_ACCOUNT)? 155 security_vm_enough_memory(VM_ACCT(size)): 0; 156} 157 158static inline void shmem_unacct_size(unsigned long flags, loff_t size) 159{ 160 if (flags & VM_ACCOUNT) 161 vm_unacct_memory(VM_ACCT(size)); 162} 163 164/* 165 * ... whereas tmpfs objects are accounted incrementally as 166 * pages are allocated, in order to allow huge sparse files. 167 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 168 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 169 */ 170static inline int shmem_acct_block(unsigned long flags) 171{ 172 return (flags & VM_ACCOUNT)? 173 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); 174} 175 176static inline void shmem_unacct_blocks(unsigned long flags, long pages) 177{ 178 if (!(flags & VM_ACCOUNT)) 179 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 180} 181 182static const struct super_operations shmem_ops; 183static const struct address_space_operations shmem_aops; 184static const struct file_operations shmem_file_operations; 185static const struct inode_operations shmem_inode_operations; 186static const struct inode_operations shmem_dir_inode_operations; 187static const struct inode_operations shmem_special_inode_operations; 188static struct vm_operations_struct shmem_vm_ops; 189 190static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 191 .ra_pages = 0, /* No readahead */ 192 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, 193 .unplug_io_fn = default_unplug_io_fn, 194}; 195 196static LIST_HEAD(shmem_swaplist); 197static DEFINE_SPINLOCK(shmem_swaplist_lock); 198 199static void shmem_free_blocks(struct inode *inode, long pages) 200{ 201 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 202 if (sbinfo->max_blocks) { 203 spin_lock(&sbinfo->stat_lock); 204 sbinfo->free_blocks += pages; 205 inode->i_blocks -= pages*BLOCKS_PER_PAGE; 206 spin_unlock(&sbinfo->stat_lock); 207 } 208} 209 210/* 211 * shmem_recalc_inode - recalculate the size of an inode 212 * 213 * @inode: inode to recalc 214 * 215 * We have to calculate the free blocks since the mm can drop 216 * undirtied hole pages behind our back. 217 * 218 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 219 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 220 * 221 * It has to be called with the spinlock held. 222 */ 223static void shmem_recalc_inode(struct inode *inode) 224{ 225 struct shmem_inode_info *info = SHMEM_I(inode); 226 long freed; 227 228 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 229 if (freed > 0) { 230 info->alloced -= freed; 231 shmem_unacct_blocks(info->flags, freed); 232 shmem_free_blocks(inode, freed); 233 } 234} 235 236/* 237 * shmem_swp_entry - find the swap vector position in the info structure 238 * 239 * @info: info structure for the inode 240 * @index: index of the page to find 241 * @page: optional page to add to the structure. Has to be preset to 242 * all zeros 243 * 244 * If there is no space allocated yet it will return NULL when 245 * page is NULL, else it will use the page for the needed block, 246 * setting it to NULL on return to indicate that it has been used. 247 * 248 * The swap vector is organized the following way: 249 * 250 * There are SHMEM_NR_DIRECT entries directly stored in the 251 * shmem_inode_info structure. So small files do not need an addional 252 * allocation. 253 * 254 * For pages with index > SHMEM_NR_DIRECT there is the pointer 255 * i_indirect which points to a page which holds in the first half 256 * doubly indirect blocks, in the second half triple indirect blocks: 257 * 258 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the 259 * following layout (for SHMEM_NR_DIRECT == 16): 260 * 261 * i_indirect -> dir --> 16-19 262 * | +-> 20-23 263 * | 264 * +-->dir2 --> 24-27 265 * | +-> 28-31 266 * | +-> 32-35 267 * | +-> 36-39 268 * | 269 * +-->dir3 --> 40-43 270 * +-> 44-47 271 * +-> 48-51 272 * +-> 52-55 273 */ 274static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) 275{ 276 unsigned long offset; 277 struct page **dir; 278 struct page *subdir; 279 280 if (index < SHMEM_NR_DIRECT) { 281 shmem_swp_balance_unmap(); 282 return info->i_direct+index; 283 } 284 if (!info->i_indirect) { 285 if (page) { 286 info->i_indirect = *page; 287 *page = NULL; 288 } 289 return NULL; /* need another page */ 290 } 291 292 index -= SHMEM_NR_DIRECT; 293 offset = index % ENTRIES_PER_PAGE; 294 index /= ENTRIES_PER_PAGE; 295 dir = shmem_dir_map(info->i_indirect); 296 297 if (index >= ENTRIES_PER_PAGE/2) { 298 index -= ENTRIES_PER_PAGE/2; 299 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; 300 index %= ENTRIES_PER_PAGE; 301 subdir = *dir; 302 if (!subdir) { 303 if (page) { 304 *dir = *page; 305 *page = NULL; 306 } 307 shmem_dir_unmap(dir); 308 return NULL; /* need another page */ 309 } 310 shmem_dir_unmap(dir); 311 dir = shmem_dir_map(subdir); 312 } 313 314 dir += index; 315 subdir = *dir; 316 if (!subdir) { 317 if (!page || !(subdir = *page)) { 318 shmem_dir_unmap(dir); 319 return NULL; /* need a page */ 320 } 321 *dir = subdir; 322 *page = NULL; 323 } 324 shmem_dir_unmap(dir); 325 return shmem_swp_map(subdir) + offset; 326} 327 328static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) 329{ 330 long incdec = value? 1: -1; 331 332 entry->val = value; 333 info->swapped += incdec; 334 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { 335 struct page *page = kmap_atomic_to_page(entry); 336 set_page_private(page, page_private(page) + incdec); 337 } 338} 339 340/* 341 * shmem_swp_alloc - get the position of the swap entry for the page. 342 * If it does not exist allocate the entry. 343 * 344 * @info: info structure for the inode 345 * @index: index of the page to find 346 * @sgp: check and recheck i_size? skip allocation? 347 */ 348static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) 349{ 350 struct inode *inode = &info->vfs_inode; 351 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 352 struct page *page = NULL; 353 swp_entry_t *entry; 354 355 if (sgp != SGP_WRITE && 356 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 357 return ERR_PTR(-EINVAL); 358 359 while (!(entry = shmem_swp_entry(info, index, &page))) { 360 if (sgp == SGP_READ) 361 return shmem_swp_map(ZERO_PAGE(0)); 362 /* 363 * Test free_blocks against 1 not 0, since we have 1 data 364 * page (and perhaps indirect index pages) yet to allocate: 365 * a waste to allocate index if we cannot allocate data. 366 */ 367 if (sbinfo->max_blocks) { 368 spin_lock(&sbinfo->stat_lock); 369 if (sbinfo->free_blocks <= 1) { 370 spin_unlock(&sbinfo->stat_lock); 371 return ERR_PTR(-ENOSPC); 372 } 373 sbinfo->free_blocks--; 374 inode->i_blocks += BLOCKS_PER_PAGE; 375 spin_unlock(&sbinfo->stat_lock); 376 } 377 378 spin_unlock(&info->lock); 379 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping)); 380 if (page) 381 set_page_private(page, 0); 382 spin_lock(&info->lock); 383 384 if (!page) { 385 shmem_free_blocks(inode, 1); 386 return ERR_PTR(-ENOMEM); 387 } 388 if (sgp != SGP_WRITE && 389 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 390 entry = ERR_PTR(-EINVAL); 391 break; 392 } 393 if (info->next_index <= index) 394 info->next_index = index + 1; 395 } 396 if (page) { 397 /* another task gave its page, or truncated the file */ 398 shmem_free_blocks(inode, 1); 399 shmem_dir_free(page); 400 } 401 if (info->next_index <= index && !IS_ERR(entry)) 402 info->next_index = index + 1; 403 return entry; 404} 405 406/* 407 * shmem_free_swp - free some swap entries in a directory 408 * 409 * @dir: pointer to the directory 410 * @edir: pointer after last entry of the directory 411 * @punch_lock: pointer to spinlock when needed for the holepunch case 412 */ 413static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir, 414 spinlock_t *punch_lock) 415{ 416 spinlock_t *punch_unlock = NULL; 417 swp_entry_t *ptr; 418 int freed = 0; 419 420 for (ptr = dir; ptr < edir; ptr++) { 421 if (ptr->val) { 422 if (unlikely(punch_lock)) { 423 punch_unlock = punch_lock; 424 punch_lock = NULL; 425 spin_lock(punch_unlock); 426 if (!ptr->val) 427 continue; 428 } 429 free_swap_and_cache(*ptr); 430 *ptr = (swp_entry_t){0}; 431 freed++; 432 } 433 } 434 if (punch_unlock) 435 spin_unlock(punch_unlock); 436 return freed; 437} 438 439static int shmem_map_and_free_swp(struct page *subdir, int offset, 440 int limit, struct page ***dir, spinlock_t *punch_lock) 441{ 442 swp_entry_t *ptr; 443 int freed = 0; 444 445 ptr = shmem_swp_map(subdir); 446 for (; offset < limit; offset += LATENCY_LIMIT) { 447 int size = limit - offset; 448 if (size > LATENCY_LIMIT) 449 size = LATENCY_LIMIT; 450 freed += shmem_free_swp(ptr+offset, ptr+offset+size, 451 punch_lock); 452 if (need_resched()) { 453 shmem_swp_unmap(ptr); 454 if (*dir) { 455 shmem_dir_unmap(*dir); 456 *dir = NULL; 457 } 458 cond_resched(); 459 ptr = shmem_swp_map(subdir); 460 } 461 } 462 shmem_swp_unmap(ptr); 463 return freed; 464} 465 466static void shmem_free_pages(struct list_head *next) 467{ 468 struct page *page; 469 int freed = 0; 470 471 do { 472 page = container_of(next, struct page, lru); 473 next = next->next; 474 shmem_dir_free(page); 475 freed++; 476 if (freed >= LATENCY_LIMIT) { 477 cond_resched(); 478 freed = 0; 479 } 480 } while (next); 481} 482 483static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) 484{ 485 struct shmem_inode_info *info = SHMEM_I(inode); 486 unsigned long idx; 487 unsigned long size; 488 unsigned long limit; 489 unsigned long stage; 490 unsigned long diroff; 491 struct page **dir; 492 struct page *topdir; 493 struct page *middir; 494 struct page *subdir; 495 swp_entry_t *ptr; 496 LIST_HEAD(pages_to_free); 497 long nr_pages_to_free = 0; 498 long nr_swaps_freed = 0; 499 int offset; 500 int freed; 501 int punch_hole; 502 spinlock_t *needs_lock; 503 spinlock_t *punch_lock; 504 unsigned long upper_limit; 505 506 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 507 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 508 if (idx >= info->next_index) 509 return; 510 511 spin_lock(&info->lock); 512 info->flags |= SHMEM_TRUNCATE; 513 if (likely(end == (loff_t) -1)) { 514 limit = info->next_index; 515 upper_limit = SHMEM_MAX_INDEX; 516 info->next_index = idx; 517 needs_lock = NULL; 518 punch_hole = 0; 519 } else { 520 if (end + 1 >= inode->i_size) { /* we may free a little more */ 521 limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >> 522 PAGE_CACHE_SHIFT; 523 upper_limit = SHMEM_MAX_INDEX; 524 } else { 525 limit = (end + 1) >> PAGE_CACHE_SHIFT; 526 upper_limit = limit; 527 } 528 needs_lock = &info->lock; 529 punch_hole = 1; 530 } 531 532 topdir = info->i_indirect; 533 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { 534 info->i_indirect = NULL; 535 nr_pages_to_free++; 536 list_add(&topdir->lru, &pages_to_free); 537 } 538 spin_unlock(&info->lock); 539 540 if (info->swapped && idx < SHMEM_NR_DIRECT) { 541 ptr = info->i_direct; 542 size = limit; 543 if (size > SHMEM_NR_DIRECT) 544 size = SHMEM_NR_DIRECT; 545 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock); 546 } 547 548 /* 549 * If there are no indirect blocks or we are punching a hole 550 * below indirect blocks, nothing to be done. 551 */ 552 if (!topdir || limit <= SHMEM_NR_DIRECT) 553 goto done2; 554 555 /* 556 * The truncation case has already dropped info->lock, and we're safe 557 * because i_size and next_index have already been lowered, preventing 558 * access beyond. But in the punch_hole case, we still need to take 559 * the lock when updating the swap directory, because there might be 560 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or 561 * shmem_writepage. However, whenever we find we can remove a whole 562 * directory page (not at the misaligned start or end of the range), 563 * we first NULLify its pointer in the level above, and then have no 564 * need to take the lock when updating its contents: needs_lock and 565 * punch_lock (either pointing to info->lock or NULL) manage this. 566 */ 567 568 upper_limit -= SHMEM_NR_DIRECT; 569 limit -= SHMEM_NR_DIRECT; 570 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; 571 offset = idx % ENTRIES_PER_PAGE; 572 idx -= offset; 573 574 dir = shmem_dir_map(topdir); 575 stage = ENTRIES_PER_PAGEPAGE/2; 576 if (idx < ENTRIES_PER_PAGEPAGE/2) { 577 middir = topdir; 578 diroff = idx/ENTRIES_PER_PAGE; 579 } else { 580 dir += ENTRIES_PER_PAGE/2; 581 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; 582 while (stage <= idx) 583 stage += ENTRIES_PER_PAGEPAGE; 584 middir = *dir; 585 if (*dir) { 586 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % 587 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; 588 if (!diroff && !offset && upper_limit >= stage) { 589 if (needs_lock) { 590 spin_lock(needs_lock); 591 *dir = NULL; 592 spin_unlock(needs_lock); 593 needs_lock = NULL; 594 } else 595 *dir = NULL; 596 nr_pages_to_free++; 597 list_add(&middir->lru, &pages_to_free); 598 } 599 shmem_dir_unmap(dir); 600 dir = shmem_dir_map(middir); 601 } else { 602 diroff = 0; 603 offset = 0; 604 idx = stage; 605 } 606 } 607 608 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { 609 if (unlikely(idx == stage)) { 610 shmem_dir_unmap(dir); 611 dir = shmem_dir_map(topdir) + 612 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 613 while (!*dir) { 614 dir++; 615 idx += ENTRIES_PER_PAGEPAGE; 616 if (idx >= limit) 617 goto done1; 618 } 619 stage = idx + ENTRIES_PER_PAGEPAGE; 620 middir = *dir; 621 if (punch_hole) 622 needs_lock = &info->lock; 623 if (upper_limit >= stage) { 624 if (needs_lock) { 625 spin_lock(needs_lock); 626 *dir = NULL; 627 spin_unlock(needs_lock); 628 needs_lock = NULL; 629 } else 630 *dir = NULL; 631 nr_pages_to_free++; 632 list_add(&middir->lru, &pages_to_free); 633 } 634 shmem_dir_unmap(dir); 635 cond_resched(); 636 dir = shmem_dir_map(middir); 637 diroff = 0; 638 } 639 punch_lock = needs_lock; 640 subdir = dir[diroff]; 641 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) { 642 if (needs_lock) { 643 spin_lock(needs_lock); 644 dir[diroff] = NULL; 645 spin_unlock(needs_lock); 646 punch_lock = NULL; 647 } else 648 dir[diroff] = NULL; 649 nr_pages_to_free++; 650 list_add(&subdir->lru, &pages_to_free); 651 } 652 if (subdir && page_private(subdir) /* has swap entries */) { 653 size = limit - idx; 654 if (size > ENTRIES_PER_PAGE) 655 size = ENTRIES_PER_PAGE; 656 freed = shmem_map_and_free_swp(subdir, 657 offset, size, &dir, punch_lock); 658 if (!dir) 659 dir = shmem_dir_map(middir); 660 nr_swaps_freed += freed; 661 if (offset || punch_lock) { 662 spin_lock(&info->lock); 663 set_page_private(subdir, 664 page_private(subdir) - freed); 665 spin_unlock(&info->lock); 666 } else 667 BUG_ON(page_private(subdir) != freed); 668 } 669 offset = 0; 670 } 671done1: 672 shmem_dir_unmap(dir); 673done2: 674 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { 675 /* 676 * Call truncate_inode_pages again: racing shmem_unuse_inode 677 * may have swizzled a page in from swap since vmtruncate or 678 * generic_delete_inode did it, before we lowered next_index. 679 * Also, though shmem_getpage checks i_size before adding to 680 * cache, no recheck after: so fix the narrow window there too. 681 * 682 * Recalling truncate_inode_pages_range and unmap_mapping_range 683 * every time for punch_hole (which never got a chance to clear 684 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive, 685 * yet hardly ever necessary: try to optimize them out later. 686 */ 687 truncate_inode_pages_range(inode->i_mapping, start, end); 688 if (punch_hole) 689 unmap_mapping_range(inode->i_mapping, start, 690 end - start, 1); 691 } 692 693 spin_lock(&info->lock); 694 info->flags &= ~SHMEM_TRUNCATE; 695 info->swapped -= nr_swaps_freed; 696 if (nr_pages_to_free) 697 shmem_free_blocks(inode, nr_pages_to_free); 698 shmem_recalc_inode(inode); 699 spin_unlock(&info->lock); 700 701 /* 702 * Empty swap vector directory pages to be freed? 703 */ 704 if (!list_empty(&pages_to_free)) { 705 pages_to_free.prev->next = NULL; 706 shmem_free_pages(pages_to_free.next); 707 } 708} 709 710static void shmem_truncate(struct inode *inode) 711{ 712 shmem_truncate_range(inode, inode->i_size, (loff_t)-1); 713} 714 715static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 716{ 717 struct inode *inode = dentry->d_inode; 718 struct page *page = NULL; 719 int error; 720 721 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 722 if (attr->ia_size < inode->i_size) { 723 /* 724 * If truncating down to a partial page, then 725 * if that page is already allocated, hold it 726 * in memory until the truncation is over, so 727 * truncate_partial_page cannnot miss it were 728 * it assigned to swap. 729 */ 730 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { 731 (void) shmem_getpage(inode, 732 attr->ia_size>>PAGE_CACHE_SHIFT, 733 &page, SGP_READ, NULL); 734 } 735 /* 736 * Reset SHMEM_PAGEIN flag so that shmem_truncate can 737 * detect if any pages might have been added to cache 738 * after truncate_inode_pages. But we needn't bother 739 * if it's being fully truncated to zero-length: the 740 * nrpages check is efficient enough in that case. 741 */ 742 if (attr->ia_size) { 743 struct shmem_inode_info *info = SHMEM_I(inode); 744 spin_lock(&info->lock); 745 info->flags &= ~SHMEM_PAGEIN; 746 spin_unlock(&info->lock); 747 } 748 } 749 } 750 751 error = inode_change_ok(inode, attr); 752 if (!error) 753 error = inode_setattr(inode, attr); 754#ifdef CONFIG_TMPFS_POSIX_ACL 755 if (!error && (attr->ia_valid & ATTR_MODE)) 756 error = generic_acl_chmod(inode, &shmem_acl_ops); 757#endif 758 if (page) 759 page_cache_release(page); 760 return error; 761} 762 763static void shmem_delete_inode(struct inode *inode) 764{ 765 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 766 struct shmem_inode_info *info = SHMEM_I(inode); 767 768 if (inode->i_op->truncate == shmem_truncate) { 769 truncate_inode_pages(inode->i_mapping, 0); 770 shmem_unacct_size(info->flags, inode->i_size); 771 inode->i_size = 0; 772 shmem_truncate(inode); 773 if (!list_empty(&info->swaplist)) { 774 spin_lock(&shmem_swaplist_lock); 775 list_del_init(&info->swaplist); 776 spin_unlock(&shmem_swaplist_lock); 777 } 778 } 779 BUG_ON(inode->i_blocks); 780 if (sbinfo->max_inodes) { 781 spin_lock(&sbinfo->stat_lock); 782 sbinfo->free_inodes++; 783 spin_unlock(&sbinfo->stat_lock); 784 } 785 clear_inode(inode); 786} 787 788static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) 789{ 790 swp_entry_t *ptr; 791 792 for (ptr = dir; ptr < edir; ptr++) { 793 if (ptr->val == entry.val) 794 return ptr - dir; 795 } 796 return -1; 797} 798 799static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) 800{ 801 struct inode *inode; 802 unsigned long idx; 803 unsigned long size; 804 unsigned long limit; 805 unsigned long stage; 806 struct page **dir; 807 struct page *subdir; 808 swp_entry_t *ptr; 809 int offset; 810 811 idx = 0; 812 ptr = info->i_direct; 813 spin_lock(&info->lock); 814 limit = info->next_index; 815 size = limit; 816 if (size > SHMEM_NR_DIRECT) 817 size = SHMEM_NR_DIRECT; 818 offset = shmem_find_swp(entry, ptr, ptr+size); 819 if (offset >= 0) { 820 shmem_swp_balance_unmap(); 821 goto found; 822 } 823 if (!info->i_indirect) 824 goto lost2; 825 826 dir = shmem_dir_map(info->i_indirect); 827 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; 828 829 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { 830 if (unlikely(idx == stage)) { 831 shmem_dir_unmap(dir-1); 832 dir = shmem_dir_map(info->i_indirect) + 833 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 834 while (!*dir) { 835 dir++; 836 idx += ENTRIES_PER_PAGEPAGE; 837 if (idx >= limit) 838 goto lost1; 839 } 840 stage = idx + ENTRIES_PER_PAGEPAGE; 841 subdir = *dir; 842 shmem_dir_unmap(dir); 843 dir = shmem_dir_map(subdir); 844 } 845 subdir = *dir; 846 if (subdir && page_private(subdir)) { 847 ptr = shmem_swp_map(subdir); 848 size = limit - idx; 849 if (size > ENTRIES_PER_PAGE) 850 size = ENTRIES_PER_PAGE; 851 offset = shmem_find_swp(entry, ptr, ptr+size); 852 if (offset >= 0) { 853 shmem_dir_unmap(dir); 854 goto found; 855 } 856 shmem_swp_unmap(ptr); 857 } 858 } 859lost1: 860 shmem_dir_unmap(dir-1); 861lost2: 862 spin_unlock(&info->lock); 863 return 0; 864found: 865 idx += offset; 866 inode = &info->vfs_inode; 867 if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) { 868 info->flags |= SHMEM_PAGEIN; 869 shmem_swp_set(info, ptr + offset, 0); 870 } 871 shmem_swp_unmap(ptr); 872 spin_unlock(&info->lock); 873 /* 874 * Decrement swap count even when the entry is left behind: 875 * try_to_unuse will skip over mms, then reincrement count. 876 */ 877 swap_free(entry); 878 return 1; 879} 880 881/* 882 * shmem_unuse() search for an eventually swapped out shmem page. 883 */ 884int shmem_unuse(swp_entry_t entry, struct page *page) 885{ 886 struct list_head *p, *next; 887 struct shmem_inode_info *info; 888 int found = 0; 889 890 spin_lock(&shmem_swaplist_lock); 891 list_for_each_safe(p, next, &shmem_swaplist) { 892 info = list_entry(p, struct shmem_inode_info, swaplist); 893 if (!info->swapped) 894 list_del_init(&info->swaplist); 895 else if (shmem_unuse_inode(info, entry, page)) { 896 /* move head to start search for next from here */ 897 list_move_tail(&shmem_swaplist, &info->swaplist); 898 found = 1; 899 break; 900 } 901 } 902 spin_unlock(&shmem_swaplist_lock); 903 return found; 904} 905 906/* 907 * Move the page from the page cache to the swap cache. 908 */ 909static int shmem_writepage(struct page *page, struct writeback_control *wbc) 910{ 911 struct shmem_inode_info *info; 912 swp_entry_t *entry, swap; 913 struct address_space *mapping; 914 unsigned long index; 915 struct inode *inode; 916 917 BUG_ON(!PageLocked(page)); 918 BUG_ON(page_mapped(page)); 919 920 mapping = page->mapping; 921 index = page->index; 922 inode = mapping->host; 923 info = SHMEM_I(inode); 924 if (info->flags & VM_LOCKED) 925 goto redirty; 926 swap = get_swap_page(); 927 if (!swap.val) 928 goto redirty; 929 930 spin_lock(&info->lock); 931 shmem_recalc_inode(inode); 932 if (index >= info->next_index) { 933 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 934 goto unlock; 935 } 936 entry = shmem_swp_entry(info, index, NULL); 937 BUG_ON(!entry); 938 BUG_ON(entry->val); 939 940 if (move_to_swap_cache(page, swap) == 0) { 941 shmem_swp_set(info, entry, swap.val); 942 shmem_swp_unmap(entry); 943 spin_unlock(&info->lock); 944 if (list_empty(&info->swaplist)) { 945 spin_lock(&shmem_swaplist_lock); 946 /* move instead of add in case we're racing */ 947 list_move_tail(&info->swaplist, &shmem_swaplist); 948 spin_unlock(&shmem_swaplist_lock); 949 } 950 unlock_page(page); 951 return 0; 952 } 953 954 shmem_swp_unmap(entry); 955unlock: 956 spin_unlock(&info->lock); 957 swap_free(swap); 958redirty: 959 set_page_dirty(page); 960 return AOP_WRITEPAGE_ACTIVATE; /* Return with the page locked */ 961} 962 963#ifdef CONFIG_NUMA 964static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) 965{ 966 char *nodelist = strchr(value, ':'); 967 int err = 1; 968 969 if (nodelist) { 970 /* NUL-terminate policy string */ 971 *nodelist++ = '\0'; 972 if (nodelist_parse(nodelist, *policy_nodes)) 973 goto out; 974 if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY])) 975 goto out; 976 } 977 if (!strcmp(value, "default")) { 978 *policy = MPOL_DEFAULT; 979 /* Don't allow a nodelist */ 980 if (!nodelist) 981 err = 0; 982 } else if (!strcmp(value, "prefer")) { 983 *policy = MPOL_PREFERRED; 984 /* Insist on a nodelist of one node only */ 985 if (nodelist) { 986 char *rest = nodelist; 987 while (isdigit(*rest)) 988 rest++; 989 if (!*rest) 990 err = 0; 991 } 992 } else if (!strcmp(value, "bind")) { 993 *policy = MPOL_BIND; 994 /* Insist on a nodelist */ 995 if (nodelist) 996 err = 0; 997 } else if (!strcmp(value, "interleave")) { 998 *policy = MPOL_INTERLEAVE; 999 /* 1000 * Default to online nodes with memory if no nodelist 1001 */ 1002 if (!nodelist) 1003 *policy_nodes = node_states[N_HIGH_MEMORY]; 1004 err = 0; 1005 } 1006out: 1007 /* Restore string for error message */ 1008 if (nodelist) 1009 *--nodelist = ':'; 1010 return err; 1011} 1012 1013static struct page *shmem_swapin_async(struct shared_policy *p, 1014 swp_entry_t entry, unsigned long idx) 1015{ 1016 struct page *page; 1017 struct vm_area_struct pvma; 1018 1019 /* Create a pseudo vma that just contains the policy */ 1020 memset(&pvma, 0, sizeof(struct vm_area_struct)); 1021 pvma.vm_end = PAGE_SIZE; 1022 pvma.vm_pgoff = idx; 1023 pvma.vm_policy = mpol_shared_policy_lookup(p, idx); 1024 page = read_swap_cache_async(entry, &pvma, 0); 1025 mpol_free(pvma.vm_policy); 1026 return page; 1027} 1028 1029static struct page *shmem_swapin(struct shmem_inode_info *info, 1030 swp_entry_t entry, unsigned long idx) 1031{ 1032 struct shared_policy *p = &info->policy; 1033 int i, num; 1034 struct page *page; 1035 unsigned long offset; 1036 1037 num = valid_swaphandles(entry, &offset); 1038 for (i = 0; i < num; offset++, i++) { 1039 page = shmem_swapin_async(p, 1040 swp_entry(swp_type(entry), offset), idx); 1041 if (!page) 1042 break; 1043 page_cache_release(page); 1044 } 1045 lru_add_drain(); /* Push any new pages onto the LRU now */ 1046 return shmem_swapin_async(p, entry, idx); 1047} 1048 1049static struct page * 1050shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, 1051 unsigned long idx) 1052{ 1053 struct vm_area_struct pvma; 1054 struct page *page; 1055 1056 memset(&pvma, 0, sizeof(struct vm_area_struct)); 1057 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 1058 pvma.vm_pgoff = idx; 1059 pvma.vm_end = PAGE_SIZE; 1060 page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); 1061 mpol_free(pvma.vm_policy); 1062 return page; 1063} 1064#else 1065static inline int shmem_parse_mpol(char *value, int *policy, 1066 nodemask_t *policy_nodes) 1067{ 1068 return 1; 1069} 1070 1071static inline struct page * 1072shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) 1073{ 1074 swapin_readahead(entry, 0, NULL); 1075 return read_swap_cache_async(entry, NULL, 0); 1076} 1077 1078static inline struct page * 1079shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) 1080{ 1081 return alloc_page(gfp | __GFP_ZERO); 1082} 1083#endif 1084 1085/* 1086 * shmem_getpage - either get the page from swap or allocate a new one 1087 * 1088 * If we allocate a new one we do not mark it dirty. That's up to the 1089 * vm. If we swap it in we mark it dirty since we also free the swap 1090 * entry since a page cannot live in both the swap and page cache 1091 */ 1092static int shmem_getpage(struct inode *inode, unsigned long idx, 1093 struct page **pagep, enum sgp_type sgp, int *type) 1094{ 1095 struct address_space *mapping = inode->i_mapping; 1096 struct shmem_inode_info *info = SHMEM_I(inode); 1097 struct shmem_sb_info *sbinfo; 1098 struct page *filepage = *pagep; 1099 struct page *swappage; 1100 swp_entry_t *entry; 1101 swp_entry_t swap; 1102 int error; 1103 1104 if (idx >= SHMEM_MAX_INDEX) 1105 return -EFBIG; 1106 1107 if (type) 1108 *type = 0; 1109 1110 /* 1111 * Normally, filepage is NULL on entry, and either found 1112 * uptodate immediately, or allocated and zeroed, or read 1113 * in under swappage, which is then assigned to filepage. 1114 * But shmem_readpage and shmem_write_begin pass in a locked 1115 * filepage, which may be found not uptodate by other callers 1116 * too, and may need to be copied from the swappage read in. 1117 */ 1118repeat: 1119 if (!filepage) 1120 filepage = find_lock_page(mapping, idx); 1121 if (filepage && PageUptodate(filepage)) 1122 goto done; 1123 error = 0; 1124 if (sgp == SGP_QUICK) 1125 goto failed; 1126 1127 spin_lock(&info->lock); 1128 shmem_recalc_inode(inode); 1129 entry = shmem_swp_alloc(info, idx, sgp); 1130 if (IS_ERR(entry)) { 1131 spin_unlock(&info->lock); 1132 error = PTR_ERR(entry); 1133 goto failed; 1134 } 1135 swap = *entry; 1136 1137 if (swap.val) { 1138 /* Look it up and read it in.. */ 1139 swappage = lookup_swap_cache(swap); 1140 if (!swappage) { 1141 shmem_swp_unmap(entry); 1142 /* here we actually do the io */ 1143 if (type && !(*type & VM_FAULT_MAJOR)) { 1144 __count_vm_event(PGMAJFAULT); 1145 *type |= VM_FAULT_MAJOR; 1146 } 1147 spin_unlock(&info->lock); 1148 swappage = shmem_swapin(info, swap, idx); 1149 if (!swappage) { 1150 spin_lock(&info->lock); 1151 entry = shmem_swp_alloc(info, idx, sgp); 1152 if (IS_ERR(entry)) 1153 error = PTR_ERR(entry); 1154 else { 1155 if (entry->val == swap.val) 1156 error = -ENOMEM; 1157 shmem_swp_unmap(entry); 1158 } 1159 spin_unlock(&info->lock); 1160 if (error) 1161 goto failed; 1162 goto repeat; 1163 } 1164 wait_on_page_locked(swappage); 1165 page_cache_release(swappage); 1166 goto repeat; 1167 } 1168 1169 /* We have to do this with page locked to prevent races */ 1170 if (TestSetPageLocked(swappage)) { 1171 shmem_swp_unmap(entry); 1172 spin_unlock(&info->lock); 1173 wait_on_page_locked(swappage); 1174 page_cache_release(swappage); 1175 goto repeat; 1176 } 1177 if (PageWriteback(swappage)) { 1178 shmem_swp_unmap(entry); 1179 spin_unlock(&info->lock); 1180 wait_on_page_writeback(swappage); 1181 unlock_page(swappage); 1182 page_cache_release(swappage); 1183 goto repeat; 1184 } 1185 if (!PageUptodate(swappage)) { 1186 shmem_swp_unmap(entry); 1187 spin_unlock(&info->lock); 1188 unlock_page(swappage); 1189 page_cache_release(swappage); 1190 error = -EIO; 1191 goto failed; 1192 } 1193 1194 if (filepage) { 1195 shmem_swp_set(info, entry, 0); 1196 shmem_swp_unmap(entry); 1197 delete_from_swap_cache(swappage); 1198 spin_unlock(&info->lock); 1199 copy_highpage(filepage, swappage); 1200 unlock_page(swappage); 1201 page_cache_release(swappage); 1202 flush_dcache_page(filepage); 1203 SetPageUptodate(filepage); 1204 set_page_dirty(filepage); 1205 swap_free(swap); 1206 } else if (!(error = move_from_swap_cache( 1207 swappage, idx, mapping))) { 1208 info->flags |= SHMEM_PAGEIN; 1209 shmem_swp_set(info, entry, 0); 1210 shmem_swp_unmap(entry); 1211 spin_unlock(&info->lock); 1212 filepage = swappage; 1213 swap_free(swap); 1214 } else { 1215 shmem_swp_unmap(entry); 1216 spin_unlock(&info->lock); 1217 unlock_page(swappage); 1218 page_cache_release(swappage); 1219 if (error == -ENOMEM) { 1220 /* let kswapd refresh zone for GFP_ATOMICs */ 1221 congestion_wait(WRITE, HZ/50); 1222 } 1223 goto repeat; 1224 } 1225 } else if (sgp == SGP_READ && !filepage) { 1226 shmem_swp_unmap(entry); 1227 filepage = find_get_page(mapping, idx); 1228 if (filepage && 1229 (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { 1230 spin_unlock(&info->lock); 1231 wait_on_page_locked(filepage); 1232 page_cache_release(filepage); 1233 filepage = NULL; 1234 goto repeat; 1235 } 1236 spin_unlock(&info->lock); 1237 } else { 1238 shmem_swp_unmap(entry); 1239 sbinfo = SHMEM_SB(inode->i_sb); 1240 if (sbinfo->max_blocks) { 1241 spin_lock(&sbinfo->stat_lock); 1242 if (sbinfo->free_blocks == 0 || 1243 shmem_acct_block(info->flags)) { 1244 spin_unlock(&sbinfo->stat_lock); 1245 spin_unlock(&info->lock); 1246 error = -ENOSPC; 1247 goto failed; 1248 } 1249 sbinfo->free_blocks--; 1250 inode->i_blocks += BLOCKS_PER_PAGE; 1251 spin_unlock(&sbinfo->stat_lock); 1252 } else if (shmem_acct_block(info->flags)) { 1253 spin_unlock(&info->lock); 1254 error = -ENOSPC; 1255 goto failed; 1256 } 1257 1258 if (!filepage) { 1259 spin_unlock(&info->lock); 1260 filepage = shmem_alloc_page(mapping_gfp_mask(mapping), 1261 info, 1262 idx); 1263 if (!filepage) { 1264 shmem_unacct_blocks(info->flags, 1); 1265 shmem_free_blocks(inode, 1); 1266 error = -ENOMEM; 1267 goto failed; 1268 } 1269 1270 spin_lock(&info->lock); 1271 entry = shmem_swp_alloc(info, idx, sgp); 1272 if (IS_ERR(entry)) 1273 error = PTR_ERR(entry); 1274 else { 1275 swap = *entry; 1276 shmem_swp_unmap(entry); 1277 } 1278 if (error || swap.val || 0 != add_to_page_cache_lru( 1279 filepage, mapping, idx, GFP_ATOMIC)) { 1280 spin_unlock(&info->lock); 1281 page_cache_release(filepage); 1282 shmem_unacct_blocks(info->flags, 1); 1283 shmem_free_blocks(inode, 1); 1284 filepage = NULL; 1285 if (error) 1286 goto failed; 1287 goto repeat; 1288 } 1289 info->flags |= SHMEM_PAGEIN; 1290 } 1291 1292 info->alloced++; 1293 spin_unlock(&info->lock); 1294 flush_dcache_page(filepage); 1295 SetPageUptodate(filepage); 1296 } 1297done: 1298 if (*pagep != filepage) { 1299 *pagep = filepage; 1300 if (sgp != SGP_FAULT) 1301 unlock_page(filepage); 1302 1303 } 1304 return 0; 1305 1306failed: 1307 if (*pagep != filepage) { 1308 unlock_page(filepage); 1309 page_cache_release(filepage); 1310 } 1311 return error; 1312} 1313 1314static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1315{ 1316 struct inode *inode = vma->vm_file->f_path.dentry->d_inode; 1317 int error; 1318 int ret; 1319 1320 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 1321 return VM_FAULT_SIGBUS; 1322 1323 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_FAULT, &ret); 1324 if (error) 1325 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1326 1327 mark_page_accessed(vmf->page); 1328 return ret | VM_FAULT_LOCKED; 1329} 1330 1331#ifdef CONFIG_NUMA 1332static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 1333{ 1334 struct inode *i = vma->vm_file->f_path.dentry->d_inode; 1335 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); 1336} 1337 1338static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 1339 unsigned long addr) 1340{ 1341 struct inode *i = vma->vm_file->f_path.dentry->d_inode; 1342 unsigned long idx; 1343 1344 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1345 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); 1346} 1347#endif 1348 1349int shmem_lock(struct file *file, int lock, struct user_struct *user) 1350{ 1351 struct inode *inode = file->f_path.dentry->d_inode; 1352 struct shmem_inode_info *info = SHMEM_I(inode); 1353 int retval = -ENOMEM; 1354 1355 spin_lock(&info->lock); 1356 if (lock && !(info->flags & VM_LOCKED)) { 1357 if (!user_shm_lock(inode->i_size, user)) 1358 goto out_nomem; 1359 info->flags |= VM_LOCKED; 1360 } 1361 if (!lock && (info->flags & VM_LOCKED) && user) { 1362 user_shm_unlock(inode->i_size, user); 1363 info->flags &= ~VM_LOCKED; 1364 } 1365 retval = 0; 1366out_nomem: 1367 spin_unlock(&info->lock); 1368 return retval; 1369} 1370 1371static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1372{ 1373 file_accessed(file); 1374 vma->vm_ops = &shmem_vm_ops; 1375 vma->vm_flags |= VM_CAN_NONLINEAR; 1376 return 0; 1377} 1378 1379static struct inode * 1380shmem_get_inode(struct super_block *sb, int mode, dev_t dev) 1381{ 1382 struct inode *inode; 1383 struct shmem_inode_info *info; 1384 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1385 1386 if (sbinfo->max_inodes) { 1387 spin_lock(&sbinfo->stat_lock); 1388 if (!sbinfo->free_inodes) { 1389 spin_unlock(&sbinfo->stat_lock); 1390 return NULL; 1391 } 1392 sbinfo->free_inodes--; 1393 spin_unlock(&sbinfo->stat_lock); 1394 } 1395 1396 inode = new_inode(sb); 1397 if (inode) { 1398 inode->i_mode = mode; 1399 inode->i_uid = current->fsuid; 1400 inode->i_gid = current->fsgid; 1401 inode->i_blocks = 0; 1402 inode->i_mapping->a_ops = &shmem_aops; 1403 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1404 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1405 inode->i_generation = get_seconds(); 1406 info = SHMEM_I(inode); 1407 memset(info, 0, (char *)inode - (char *)info); 1408 spin_lock_init(&info->lock); 1409 INIT_LIST_HEAD(&info->swaplist); 1410 1411 switch (mode & S_IFMT) { 1412 default: 1413 inode->i_op = &shmem_special_inode_operations; 1414 init_special_inode(inode, mode, dev); 1415 break; 1416 case S_IFREG: 1417 inode->i_op = &shmem_inode_operations; 1418 inode->i_fop = &shmem_file_operations; 1419 mpol_shared_policy_init(&info->policy, sbinfo->policy, 1420 &sbinfo->policy_nodes); 1421 break; 1422 case S_IFDIR: 1423 inc_nlink(inode); 1424 /* Some things misbehave if size == 0 on a directory */ 1425 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1426 inode->i_op = &shmem_dir_inode_operations; 1427 inode->i_fop = &simple_dir_operations; 1428 break; 1429 case S_IFLNK: 1430 /* 1431 * Must not load anything in the rbtree, 1432 * mpol_free_shared_policy will not be called. 1433 */ 1434 mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, 1435 NULL); 1436 break; 1437 } 1438 } else if (sbinfo->max_inodes) { 1439 spin_lock(&sbinfo->stat_lock); 1440 sbinfo->free_inodes++; 1441 spin_unlock(&sbinfo->stat_lock); 1442 } 1443 return inode; 1444} 1445 1446#ifdef CONFIG_TMPFS 1447static const struct inode_operations shmem_symlink_inode_operations; 1448static const struct inode_operations shmem_symlink_inline_operations; 1449 1450/* 1451 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin; 1452 * but providing them allows a tmpfs file to be used for splice, sendfile, and 1453 * below the loop driver, in the generic fashion that many filesystems support. 1454 */ 1455static int shmem_readpage(struct file *file, struct page *page) 1456{ 1457 struct inode *inode = page->mapping->host; 1458 int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL); 1459 unlock_page(page); 1460 return error; 1461} 1462 1463static int 1464shmem_write_begin(struct file *file, struct address_space *mapping, 1465 loff_t pos, unsigned len, unsigned flags, 1466 struct page **pagep, void **fsdata) 1467{ 1468 struct inode *inode = mapping->host; 1469 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1470 *pagep = NULL; 1471 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1472} 1473 1474static int 1475shmem_write_end(struct file *file, struct address_space *mapping, 1476 loff_t pos, unsigned len, unsigned copied, 1477 struct page *page, void *fsdata) 1478{ 1479 struct inode *inode = mapping->host; 1480 1481 set_page_dirty(page); 1482 page_cache_release(page); 1483 1484 if (pos+copied > inode->i_size) 1485 i_size_write(inode, pos+copied); 1486 1487 return copied; 1488} 1489 1490static ssize_t 1491shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 1492{ 1493 struct inode *inode = file->f_path.dentry->d_inode; 1494 loff_t pos; 1495 unsigned long written; 1496 ssize_t err; 1497 1498 if ((ssize_t) count < 0) 1499 return -EINVAL; 1500 1501 if (!access_ok(VERIFY_READ, buf, count)) 1502 return -EFAULT; 1503 1504 mutex_lock(&inode->i_mutex); 1505 1506 pos = *ppos; 1507 written = 0; 1508 1509 err = generic_write_checks(file, &pos, &count, 0); 1510 if (err || !count) 1511 goto out; 1512 1513 err = remove_suid(file->f_path.dentry); 1514 if (err) 1515 goto out; 1516 1517 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 1518 1519 do { 1520 struct page *page = NULL; 1521 unsigned long bytes, index, offset; 1522 char *kaddr; 1523 int left; 1524 1525 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 1526 index = pos >> PAGE_CACHE_SHIFT; 1527 bytes = PAGE_CACHE_SIZE - offset; 1528 if (bytes > count) 1529 bytes = count; 1530 1531 /* 1532 * We don't hold page lock across copy from user - 1533 * what would it guard against? - so no deadlock here. 1534 * But it still may be a good idea to prefault below. 1535 */ 1536 1537 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL); 1538 if (err) 1539 break; 1540 1541 left = bytes; 1542 if (PageHighMem(page)) { 1543 volatile unsigned char dummy; 1544 __get_user(dummy, buf); 1545 __get_user(dummy, buf + bytes - 1); 1546 1547 kaddr = kmap_atomic(page, KM_USER0); 1548 left = __copy_from_user_inatomic(kaddr + offset, 1549 buf, bytes); 1550 kunmap_atomic(kaddr, KM_USER0); 1551 } 1552 if (left) { 1553 kaddr = kmap(page); 1554 left = __copy_from_user(kaddr + offset, buf, bytes); 1555 kunmap(page); 1556 } 1557 1558 written += bytes; 1559 count -= bytes; 1560 pos += bytes; 1561 buf += bytes; 1562 if (pos > inode->i_size) 1563 i_size_write(inode, pos); 1564 1565 flush_dcache_page(page); 1566 set_page_dirty(page); 1567 mark_page_accessed(page); 1568 page_cache_release(page); 1569 1570 if (left) { 1571 pos -= left; 1572 written -= left; 1573 err = -EFAULT; 1574 break; 1575 } 1576 1577 /* 1578 * Our dirty pages are not counted in nr_dirty, 1579 * and we do not attempt to balance dirty pages. 1580 */ 1581 1582 cond_resched(); 1583 } while (count); 1584 1585 *ppos = pos; 1586 if (written) 1587 err = written; 1588out: 1589 mutex_unlock(&inode->i_mutex); 1590 return err; 1591} 1592 1593static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1594{ 1595 struct inode *inode = filp->f_path.dentry->d_inode; 1596 struct address_space *mapping = inode->i_mapping; 1597 unsigned long index, offset; 1598 1599 index = *ppos >> PAGE_CACHE_SHIFT; 1600 offset = *ppos & ~PAGE_CACHE_MASK; 1601 1602 for (;;) { 1603 struct page *page = NULL; 1604 unsigned long end_index, nr, ret; 1605 loff_t i_size = i_size_read(inode); 1606 1607 end_index = i_size >> PAGE_CACHE_SHIFT; 1608 if (index > end_index) 1609 break; 1610 if (index == end_index) { 1611 nr = i_size & ~PAGE_CACHE_MASK; 1612 if (nr <= offset) 1613 break; 1614 } 1615 1616 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL); 1617 if (desc->error) { 1618 if (desc->error == -EINVAL) 1619 desc->error = 0; 1620 break; 1621 } 1622 1623 /* 1624 * We must evaluate after, since reads (unlike writes) 1625 * are called without i_mutex protection against truncate 1626 */ 1627 nr = PAGE_CACHE_SIZE; 1628 i_size = i_size_read(inode); 1629 end_index = i_size >> PAGE_CACHE_SHIFT; 1630 if (index == end_index) { 1631 nr = i_size & ~PAGE_CACHE_MASK; 1632 if (nr <= offset) { 1633 if (page) 1634 page_cache_release(page); 1635 break; 1636 } 1637 } 1638 nr -= offset; 1639 1640 if (page) { 1641 /* 1642 * If users can be writing to this page using arbitrary 1643 * virtual addresses, take care about potential aliasing 1644 * before reading the page on the kernel side. 1645 */ 1646 if (mapping_writably_mapped(mapping)) 1647 flush_dcache_page(page); 1648 /* 1649 * Mark the page accessed if we read the beginning. 1650 */ 1651 if (!offset) 1652 mark_page_accessed(page); 1653 } else { 1654 page = ZERO_PAGE(0); 1655 page_cache_get(page); 1656 } 1657 1658 /* 1659 * Ok, we have the page, and it's up-to-date, so 1660 * now we can copy it to user space... 1661 * 1662 * The actor routine returns how many bytes were actually used.. 1663 * NOTE! This may not be the same as how much of a user buffer 1664 * we filled up (we may be padding etc), so we can only update 1665 * "pos" here (the actor routine has to update the user buffer 1666 * pointers and the remaining count). 1667 */ 1668 ret = actor(desc, page, offset, nr); 1669 offset += ret; 1670 index += offset >> PAGE_CACHE_SHIFT; 1671 offset &= ~PAGE_CACHE_MASK; 1672 1673 page_cache_release(page); 1674 if (ret != nr || !desc->count) 1675 break; 1676 1677 cond_resched(); 1678 } 1679 1680 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1681 file_accessed(filp); 1682} 1683 1684static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) 1685{ 1686 read_descriptor_t desc; 1687 1688 if ((ssize_t) count < 0) 1689 return -EINVAL; 1690 if (!access_ok(VERIFY_WRITE, buf, count)) 1691 return -EFAULT; 1692 if (!count) 1693 return 0; 1694 1695 desc.written = 0; 1696 desc.count = count; 1697 desc.arg.buf = buf; 1698 desc.error = 0; 1699 1700 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1701 if (desc.written) 1702 return desc.written; 1703 return desc.error; 1704} 1705 1706static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1707{ 1708 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1709 1710 buf->f_type = TMPFS_MAGIC; 1711 buf->f_bsize = PAGE_CACHE_SIZE; 1712 buf->f_namelen = NAME_MAX; 1713 spin_lock(&sbinfo->stat_lock); 1714 if (sbinfo->max_blocks) { 1715 buf->f_blocks = sbinfo->max_blocks; 1716 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; 1717 } 1718 if (sbinfo->max_inodes) { 1719 buf->f_files = sbinfo->max_inodes; 1720 buf->f_ffree = sbinfo->free_inodes; 1721 } 1722 /* else leave those fields 0 like simple_statfs */ 1723 spin_unlock(&sbinfo->stat_lock); 1724 return 0; 1725} 1726 1727/* 1728 * File creation. Allocate an inode, and we're done.. 1729 */ 1730static int 1731shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1732{ 1733 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); 1734 int error = -ENOSPC; 1735 1736 if (inode) { 1737 error = security_inode_init_security(inode, dir, NULL, NULL, 1738 NULL); 1739 if (error) { 1740 if (error != -EOPNOTSUPP) { 1741 iput(inode); 1742 return error; 1743 } 1744 } 1745 error = shmem_acl_init(inode, dir); 1746 if (error) { 1747 iput(inode); 1748 return error; 1749 } 1750 if (dir->i_mode & S_ISGID) { 1751 inode->i_gid = dir->i_gid; 1752 if (S_ISDIR(mode)) 1753 inode->i_mode |= S_ISGID; 1754 } 1755 dir->i_size += BOGO_DIRENT_SIZE; 1756 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1757 d_instantiate(dentry, inode); 1758 dget(dentry); /* Extra count - pin the dentry in core */ 1759 } 1760 return error; 1761} 1762 1763static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1764{ 1765 int error; 1766 1767 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1768 return error; 1769 inc_nlink(dir); 1770 return 0; 1771} 1772 1773static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, 1774 struct nameidata *nd) 1775{ 1776 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1777} 1778 1779/* 1780 * Link a file.. 1781 */ 1782static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1783{ 1784 struct inode *inode = old_dentry->d_inode; 1785 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1786 1787 /* 1788 * No ordinary (disk based) filesystem counts links as inodes; 1789 * but each new link needs a new dentry, pinning lowmem, and 1790 * tmpfs dentries cannot be pruned until they are unlinked. 1791 */ 1792 if (sbinfo->max_inodes) { 1793 spin_lock(&sbinfo->stat_lock); 1794 if (!sbinfo->free_inodes) { 1795 spin_unlock(&sbinfo->stat_lock); 1796 return -ENOSPC; 1797 } 1798 sbinfo->free_inodes--; 1799 spin_unlock(&sbinfo->stat_lock); 1800 } 1801 1802 dir->i_size += BOGO_DIRENT_SIZE; 1803 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1804 inc_nlink(inode); 1805 atomic_inc(&inode->i_count); /* New dentry reference */ 1806 dget(dentry); /* Extra pinning count for the created dentry */ 1807 d_instantiate(dentry, inode); 1808 return 0; 1809} 1810 1811static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1812{ 1813 struct inode *inode = dentry->d_inode; 1814 1815 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { 1816 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1817 if (sbinfo->max_inodes) { 1818 spin_lock(&sbinfo->stat_lock); 1819 sbinfo->free_inodes++; 1820 spin_unlock(&sbinfo->stat_lock); 1821 } 1822 } 1823 1824 dir->i_size -= BOGO_DIRENT_SIZE; 1825 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1826 drop_nlink(inode); 1827 dput(dentry); /* Undo the count from "create" - this does all the work */ 1828 return 0; 1829} 1830 1831static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1832{ 1833 if (!simple_empty(dentry)) 1834 return -ENOTEMPTY; 1835 1836 drop_nlink(dentry->d_inode); 1837 drop_nlink(dir); 1838 return shmem_unlink(dir, dentry); 1839} 1840 1841/* 1842 * The VFS layer already does all the dentry stuff for rename, 1843 * we just have to decrement the usage count for the target if 1844 * it exists so that the VFS layer correctly free's it when it 1845 * gets overwritten. 1846 */ 1847static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1848{ 1849 struct inode *inode = old_dentry->d_inode; 1850 int they_are_dirs = S_ISDIR(inode->i_mode); 1851 1852 if (!simple_empty(new_dentry)) 1853 return -ENOTEMPTY; 1854 1855 if (new_dentry->d_inode) { 1856 (void) shmem_unlink(new_dir, new_dentry); 1857 if (they_are_dirs) 1858 drop_nlink(old_dir); 1859 } else if (they_are_dirs) { 1860 drop_nlink(old_dir); 1861 inc_nlink(new_dir); 1862 } 1863 1864 old_dir->i_size -= BOGO_DIRENT_SIZE; 1865 new_dir->i_size += BOGO_DIRENT_SIZE; 1866 old_dir->i_ctime = old_dir->i_mtime = 1867 new_dir->i_ctime = new_dir->i_mtime = 1868 inode->i_ctime = CURRENT_TIME; 1869 return 0; 1870} 1871 1872static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1873{ 1874 int error; 1875 int len; 1876 struct inode *inode; 1877 struct page *page = NULL; 1878 char *kaddr; 1879 struct shmem_inode_info *info; 1880 1881 len = strlen(symname) + 1; 1882 if (len > PAGE_CACHE_SIZE) 1883 return -ENAMETOOLONG; 1884 1885 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); 1886 if (!inode) 1887 return -ENOSPC; 1888 1889 error = security_inode_init_security(inode, dir, NULL, NULL, 1890 NULL); 1891 if (error) { 1892 if (error != -EOPNOTSUPP) { 1893 iput(inode); 1894 return error; 1895 } 1896 error = 0; 1897 } 1898 1899 info = SHMEM_I(inode); 1900 inode->i_size = len-1; 1901 if (len <= (char *)inode - (char *)info) { 1902 /* do it inline */ 1903 memcpy(info, symname, len); 1904 inode->i_op = &shmem_symlink_inline_operations; 1905 } else { 1906 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 1907 if (error) { 1908 iput(inode); 1909 return error; 1910 } 1911 inode->i_op = &shmem_symlink_inode_operations; 1912 kaddr = kmap_atomic(page, KM_USER0); 1913 memcpy(kaddr, symname, len); 1914 kunmap_atomic(kaddr, KM_USER0); 1915 set_page_dirty(page); 1916 page_cache_release(page); 1917 } 1918 if (dir->i_mode & S_ISGID) 1919 inode->i_gid = dir->i_gid; 1920 dir->i_size += BOGO_DIRENT_SIZE; 1921 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1922 d_instantiate(dentry, inode); 1923 dget(dentry); 1924 return 0; 1925} 1926 1927static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 1928{ 1929 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); 1930 return NULL; 1931} 1932 1933static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 1934{ 1935 struct page *page = NULL; 1936 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 1937 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); 1938 return page; 1939} 1940 1941static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 1942{ 1943 if (!IS_ERR(nd_get_link(nd))) { 1944 struct page *page = cookie; 1945 kunmap(page); 1946 mark_page_accessed(page); 1947 page_cache_release(page); 1948 } 1949} 1950 1951static const struct inode_operations shmem_symlink_inline_operations = { 1952 .readlink = generic_readlink, 1953 .follow_link = shmem_follow_link_inline, 1954}; 1955 1956static const struct inode_operations shmem_symlink_inode_operations = { 1957 .truncate = shmem_truncate, 1958 .readlink = generic_readlink, 1959 .follow_link = shmem_follow_link, 1960 .put_link = shmem_put_link, 1961}; 1962 1963#ifdef CONFIG_TMPFS_POSIX_ACL 1964/** 1965 * Superblocks without xattr inode operations will get security.* xattr 1966 * support from the VFS "for free". As soon as we have any other xattrs 1967 * like ACLs, we also need to implement the security.* handlers at 1968 * filesystem level, though. 1969 */ 1970 1971static size_t shmem_xattr_security_list(struct inode *inode, char *list, 1972 size_t list_len, const char *name, 1973 size_t name_len) 1974{ 1975 return security_inode_listsecurity(inode, list, list_len); 1976} 1977 1978static int shmem_xattr_security_get(struct inode *inode, const char *name, 1979 void *buffer, size_t size) 1980{ 1981 if (strcmp(name, "") == 0) 1982 return -EINVAL; 1983 return security_inode_getsecurity(inode, name, buffer, size, 1984 -EOPNOTSUPP); 1985} 1986 1987static int shmem_xattr_security_set(struct inode *inode, const char *name, 1988 const void *value, size_t size, int flags) 1989{ 1990 if (strcmp(name, "") == 0) 1991 return -EINVAL; 1992 return security_inode_setsecurity(inode, name, value, size, flags); 1993} 1994 1995static struct xattr_handler shmem_xattr_security_handler = { 1996 .prefix = XATTR_SECURITY_PREFIX, 1997 .list = shmem_xattr_security_list, 1998 .get = shmem_xattr_security_get, 1999 .set = shmem_xattr_security_set, 2000}; 2001 2002static struct xattr_handler *shmem_xattr_handlers[] = { 2003 &shmem_xattr_acl_access_handler, 2004 &shmem_xattr_acl_default_handler, 2005 &shmem_xattr_security_handler, 2006 NULL 2007}; 2008#endif 2009 2010static struct dentry *shmem_get_parent(struct dentry *child) 2011{ 2012 return ERR_PTR(-ESTALE); 2013} 2014 2015static int shmem_match(struct inode *ino, void *vfh) 2016{ 2017 __u32 *fh = vfh; 2018 __u64 inum = fh[2]; 2019 inum = (inum << 32) | fh[1]; 2020 return ino->i_ino == inum && fh[0] == ino->i_generation; 2021} 2022 2023static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 2024 struct fid *fid, int fh_len, int fh_type) 2025{ 2026 struct inode *inode; 2027 struct dentry *dentry = NULL; 2028 u64 inum = fid->raw[2]; 2029 inum = (inum << 32) | fid->raw[1]; 2030 2031 if (fh_len < 3) 2032 return NULL; 2033 2034 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 2035 shmem_match, fid->raw); 2036 if (inode) { 2037 dentry = d_find_alias(inode); 2038 iput(inode); 2039 } 2040 2041 return dentry; 2042} 2043 2044static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 2045 int connectable) 2046{ 2047 struct inode *inode = dentry->d_inode; 2048 2049 if (*len < 3) 2050 return 255; 2051 2052 if (hlist_unhashed(&inode->i_hash)) { 2053 /* Unfortunately insert_inode_hash is not idempotent, 2054 * so as we hash inodes here rather than at creation 2055 * time, we need a lock to ensure we only try 2056 * to do it once 2057 */ 2058 static DEFINE_SPINLOCK(lock); 2059 spin_lock(&lock); 2060 if (hlist_unhashed(&inode->i_hash)) 2061 __insert_inode_hash(inode, 2062 inode->i_ino + inode->i_generation); 2063 spin_unlock(&lock); 2064 } 2065 2066 fh[0] = inode->i_generation; 2067 fh[1] = inode->i_ino; 2068 fh[2] = ((__u64)inode->i_ino) >> 32; 2069 2070 *len = 3; 2071 return 1; 2072} 2073 2074static const struct export_operations shmem_export_ops = { 2075 .get_parent = shmem_get_parent, 2076 .encode_fh = shmem_encode_fh, 2077 .fh_to_dentry = shmem_fh_to_dentry, 2078}; 2079 2080static int shmem_parse_options(char *options, int *mode, uid_t *uid, 2081 gid_t *gid, unsigned long *blocks, unsigned long *inodes, 2082 int *policy, nodemask_t *policy_nodes) 2083{ 2084 char *this_char, *value, *rest; 2085 2086 while (options != NULL) { 2087 this_char = options; 2088 for (;;) { 2089 /* 2090 * NUL-terminate this option: unfortunately, 2091 * mount options form a comma-separated list, 2092 * but mpol's nodelist may also contain commas. 2093 */ 2094 options = strchr(options, ','); 2095 if (options == NULL) 2096 break; 2097 options++; 2098 if (!isdigit(*options)) { 2099 options[-1] = '\0'; 2100 break; 2101 } 2102 } 2103 if (!*this_char) 2104 continue; 2105 if ((value = strchr(this_char,'=')) != NULL) { 2106 *value++ = 0; 2107 } else { 2108 printk(KERN_ERR 2109 "tmpfs: No value for mount option '%s'\n", 2110 this_char); 2111 return 1; 2112 } 2113 2114 if (!strcmp(this_char,"size")) { 2115 unsigned long long size; 2116 size = memparse(value,&rest); 2117 if (*rest == '%') { 2118 size <<= PAGE_SHIFT; 2119 size *= totalram_pages; 2120 do_div(size, 100); 2121 rest++; 2122 } 2123 if (*rest) 2124 goto bad_val; 2125 *blocks = size >> PAGE_CACHE_SHIFT; 2126 } else if (!strcmp(this_char,"nr_blocks")) { 2127 *blocks = memparse(value,&rest); 2128 if (*rest) 2129 goto bad_val; 2130 } else if (!strcmp(this_char,"nr_inodes")) { 2131 *inodes = memparse(value,&rest); 2132 if (*rest) 2133 goto bad_val; 2134 } else if (!strcmp(this_char,"mode")) { 2135 if (!mode) 2136 continue; 2137 *mode = simple_strtoul(value,&rest,8); 2138 if (*rest) 2139 goto bad_val; 2140 } else if (!strcmp(this_char,"uid")) { 2141 if (!uid) 2142 continue; 2143 *uid = simple_strtoul(value,&rest,0); 2144 if (*rest) 2145 goto bad_val; 2146 } else if (!strcmp(this_char,"gid")) { 2147 if (!gid) 2148 continue; 2149 *gid = simple_strtoul(value,&rest,0); 2150 if (*rest) 2151 goto bad_val; 2152 } else if (!strcmp(this_char,"mpol")) { 2153 if (shmem_parse_mpol(value,policy,policy_nodes)) 2154 goto bad_val; 2155 } else { 2156 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 2157 this_char); 2158 return 1; 2159 } 2160 } 2161 return 0; 2162 2163bad_val: 2164 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 2165 value, this_char); 2166 return 1; 2167 2168} 2169 2170static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 2171{ 2172 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2173 unsigned long max_blocks = sbinfo->max_blocks; 2174 unsigned long max_inodes = sbinfo->max_inodes; 2175 int policy = sbinfo->policy; 2176 nodemask_t policy_nodes = sbinfo->policy_nodes; 2177 unsigned long blocks; 2178 unsigned long inodes; 2179 int error = -EINVAL; 2180 2181 if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, 2182 &max_inodes, &policy, &policy_nodes)) 2183 return error; 2184 2185 spin_lock(&sbinfo->stat_lock); 2186 blocks = sbinfo->max_blocks - sbinfo->free_blocks; 2187 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2188 if (max_blocks < blocks) 2189 goto out; 2190 if (max_inodes < inodes) 2191 goto out; 2192 /* 2193 * Those tests also disallow limited->unlimited while any are in 2194 * use, so i_blocks will always be zero when max_blocks is zero; 2195 * but we must separately disallow unlimited->limited, because 2196 * in that case we have no record of how much is already in use. 2197 */ 2198 if (max_blocks && !sbinfo->max_blocks) 2199 goto out; 2200 if (max_inodes && !sbinfo->max_inodes) 2201 goto out; 2202 2203 error = 0; 2204 sbinfo->max_blocks = max_blocks; 2205 sbinfo->free_blocks = max_blocks - blocks; 2206 sbinfo->max_inodes = max_inodes; 2207 sbinfo->free_inodes = max_inodes - inodes; 2208 sbinfo->policy = policy; 2209 sbinfo->policy_nodes = policy_nodes; 2210out: 2211 spin_unlock(&sbinfo->stat_lock); 2212 return error; 2213} 2214#endif 2215 2216static void shmem_put_super(struct super_block *sb) 2217{ 2218 kfree(sb->s_fs_info); 2219 sb->s_fs_info = NULL; 2220} 2221 2222static int shmem_fill_super(struct super_block *sb, 2223 void *data, int silent) 2224{ 2225 struct inode *inode; 2226 struct dentry *root; 2227 int mode = S_IRWXUGO | S_ISVTX; 2228 uid_t uid = current->fsuid; 2229 gid_t gid = current->fsgid; 2230 int err = -ENOMEM; 2231 struct shmem_sb_info *sbinfo; 2232 unsigned long blocks = 0; 2233 unsigned long inodes = 0; 2234 int policy = MPOL_DEFAULT; 2235 nodemask_t policy_nodes = node_states[N_HIGH_MEMORY]; 2236 2237#ifdef CONFIG_TMPFS 2238 /* 2239 * Per default we only allow half of the physical ram per 2240 * tmpfs instance, limiting inodes to one per page of lowmem; 2241 * but the internal instance is left unlimited. 2242 */ 2243 if (!(sb->s_flags & MS_NOUSER)) { 2244 blocks = totalram_pages / 2; 2245 inodes = totalram_pages - totalhigh_pages; 2246 if (inodes > blocks) 2247 inodes = blocks; 2248 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks, 2249 &inodes, &policy, &policy_nodes)) 2250 return -EINVAL; 2251 } 2252 sb->s_export_op = &shmem_export_ops; 2253#else 2254 sb->s_flags |= MS_NOUSER; 2255#endif 2256 2257 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2258 sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), 2259 L1_CACHE_BYTES), GFP_KERNEL); 2260 if (!sbinfo) 2261 return -ENOMEM; 2262 2263 spin_lock_init(&sbinfo->stat_lock); 2264 sbinfo->max_blocks = blocks; 2265 sbinfo->free_blocks = blocks; 2266 sbinfo->max_inodes = inodes; 2267 sbinfo->free_inodes = inodes; 2268 sbinfo->policy = policy; 2269 sbinfo->policy_nodes = policy_nodes; 2270 2271 sb->s_fs_info = sbinfo; 2272 sb->s_maxbytes = SHMEM_MAX_BYTES; 2273 sb->s_blocksize = PAGE_CACHE_SIZE; 2274 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2275 sb->s_magic = TMPFS_MAGIC; 2276 sb->s_op = &shmem_ops; 2277 sb->s_time_gran = 1; 2278#ifdef CONFIG_TMPFS_POSIX_ACL 2279 sb->s_xattr = shmem_xattr_handlers; 2280 sb->s_flags |= MS_POSIXACL; 2281#endif 2282 2283 inode = shmem_get_inode(sb, S_IFDIR | mode, 0); 2284 if (!inode) 2285 goto failed; 2286 inode->i_uid = uid; 2287 inode->i_gid = gid; 2288 root = d_alloc_root(inode); 2289 if (!root) 2290 goto failed_iput; 2291 sb->s_root = root; 2292 return 0; 2293 2294failed_iput: 2295 iput(inode); 2296failed: 2297 shmem_put_super(sb); 2298 return err; 2299} 2300 2301static struct kmem_cache *shmem_inode_cachep; 2302 2303static struct inode *shmem_alloc_inode(struct super_block *sb) 2304{ 2305 struct shmem_inode_info *p; 2306 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 2307 if (!p) 2308 return NULL; 2309 return &p->vfs_inode; 2310} 2311 2312static void shmem_destroy_inode(struct inode *inode) 2313{ 2314 if ((inode->i_mode & S_IFMT) == S_IFREG) { 2315 /* only struct inode is valid if it's an inline symlink */ 2316 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2317 } 2318 shmem_acl_destroy_inode(inode); 2319 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2320} 2321 2322static void init_once(struct kmem_cache *cachep, void *foo) 2323{ 2324 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2325 2326 inode_init_once(&p->vfs_inode); 2327#ifdef CONFIG_TMPFS_POSIX_ACL 2328 p->i_acl = NULL; 2329 p->i_default_acl = NULL; 2330#endif 2331} 2332 2333static int init_inodecache(void) 2334{ 2335 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2336 sizeof(struct shmem_inode_info), 2337 0, SLAB_PANIC, init_once); 2338 return 0; 2339} 2340 2341static void destroy_inodecache(void) 2342{ 2343 kmem_cache_destroy(shmem_inode_cachep); 2344} 2345 2346static const struct address_space_operations shmem_aops = { 2347 .writepage = shmem_writepage, 2348 .set_page_dirty = __set_page_dirty_no_writeback, 2349#ifdef CONFIG_TMPFS 2350 .readpage = shmem_readpage, 2351 .write_begin = shmem_write_begin, 2352 .write_end = shmem_write_end, 2353#endif 2354 .migratepage = migrate_page, 2355}; 2356 2357static const struct file_operations shmem_file_operations = { 2358 .mmap = shmem_mmap, 2359#ifdef CONFIG_TMPFS 2360 .llseek = generic_file_llseek, 2361 .read = shmem_file_read, 2362 .write = shmem_file_write, 2363 .fsync = simple_sync_file, 2364 .splice_read = generic_file_splice_read, 2365 .splice_write = generic_file_splice_write, 2366#endif 2367}; 2368 2369static const struct inode_operations shmem_inode_operations = { 2370 .truncate = shmem_truncate, 2371 .setattr = shmem_notify_change, 2372 .truncate_range = shmem_truncate_range, 2373#ifdef CONFIG_TMPFS_POSIX_ACL 2374 .setxattr = generic_setxattr, 2375 .getxattr = generic_getxattr, 2376 .listxattr = generic_listxattr, 2377 .removexattr = generic_removexattr, 2378 .permission = shmem_permission, 2379#endif 2380 2381}; 2382 2383static const struct inode_operations shmem_dir_inode_operations = { 2384#ifdef CONFIG_TMPFS 2385 .create = shmem_create, 2386 .lookup = simple_lookup, 2387 .link = shmem_link, 2388 .unlink = shmem_unlink, 2389 .symlink = shmem_symlink, 2390 .mkdir = shmem_mkdir, 2391 .rmdir = shmem_rmdir, 2392 .mknod = shmem_mknod, 2393 .rename = shmem_rename, 2394#endif 2395#ifdef CONFIG_TMPFS_POSIX_ACL 2396 .setattr = shmem_notify_change, 2397 .setxattr = generic_setxattr, 2398 .getxattr = generic_getxattr, 2399 .listxattr = generic_listxattr, 2400 .removexattr = generic_removexattr, 2401 .permission = shmem_permission, 2402#endif 2403}; 2404 2405static const struct inode_operations shmem_special_inode_operations = { 2406#ifdef CONFIG_TMPFS_POSIX_ACL 2407 .setattr = shmem_notify_change, 2408 .setxattr = generic_setxattr, 2409 .getxattr = generic_getxattr, 2410 .listxattr = generic_listxattr, 2411 .removexattr = generic_removexattr, 2412 .permission = shmem_permission, 2413#endif 2414}; 2415 2416static const struct super_operations shmem_ops = { 2417 .alloc_inode = shmem_alloc_inode, 2418 .destroy_inode = shmem_destroy_inode, 2419#ifdef CONFIG_TMPFS 2420 .statfs = shmem_statfs, 2421 .remount_fs = shmem_remount_fs, 2422#endif 2423 .delete_inode = shmem_delete_inode, 2424 .drop_inode = generic_delete_inode, 2425 .put_super = shmem_put_super, 2426}; 2427 2428static struct vm_operations_struct shmem_vm_ops = { 2429 .fault = shmem_fault, 2430#ifdef CONFIG_NUMA 2431 .set_policy = shmem_set_policy, 2432 .get_policy = shmem_get_policy, 2433#endif 2434}; 2435 2436 2437static int shmem_get_sb(struct file_system_type *fs_type, 2438 int flags, const char *dev_name, void *data, struct vfsmount *mnt) 2439{ 2440 return get_sb_nodev(fs_type, flags, data, shmem_fill_super, mnt); 2441} 2442 2443static struct file_system_type tmpfs_fs_type = { 2444 .owner = THIS_MODULE, 2445 .name = "tmpfs", 2446 .get_sb = shmem_get_sb, 2447 .kill_sb = kill_litter_super, 2448}; 2449static struct vfsmount *shm_mnt; 2450 2451static int __init init_tmpfs(void) 2452{ 2453 int error; 2454 2455 error = bdi_init(&shmem_backing_dev_info); 2456 if (error) 2457 goto out4; 2458 2459 error = init_inodecache(); 2460 if (error) 2461 goto out3; 2462 2463 error = register_filesystem(&tmpfs_fs_type); 2464 if (error) { 2465 printk(KERN_ERR "Could not register tmpfs\n"); 2466 goto out2; 2467 } 2468 2469 shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER, 2470 tmpfs_fs_type.name, NULL); 2471 if (IS_ERR(shm_mnt)) { 2472 error = PTR_ERR(shm_mnt); 2473 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2474 goto out1; 2475 } 2476 return 0; 2477 2478out1: 2479 unregister_filesystem(&tmpfs_fs_type); 2480out2: 2481 destroy_inodecache(); 2482out3: 2483 bdi_destroy(&shmem_backing_dev_info); 2484out4: 2485 shm_mnt = ERR_PTR(error); 2486 return error; 2487} 2488module_init(init_tmpfs) 2489 2490/* 2491 * shmem_file_setup - get an unlinked file living in tmpfs 2492 * 2493 * @name: name for dentry (to be seen in /proc/<pid>/maps 2494 * @size: size to be set for the file 2495 * 2496 */ 2497struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) 2498{ 2499 int error; 2500 struct file *file; 2501 struct inode *inode; 2502 struct dentry *dentry, *root; 2503 struct qstr this; 2504 2505 if (IS_ERR(shm_mnt)) 2506 return (void *)shm_mnt; 2507 2508 if (size < 0 || size > SHMEM_MAX_BYTES) 2509 return ERR_PTR(-EINVAL); 2510 2511 if (shmem_acct_size(flags, size)) 2512 return ERR_PTR(-ENOMEM); 2513 2514 error = -ENOMEM; 2515 this.name = name; 2516 this.len = strlen(name); 2517 this.hash = 0; /* will go */ 2518 root = shm_mnt->mnt_root; 2519 dentry = d_alloc(root, &this); 2520 if (!dentry) 2521 goto put_memory; 2522 2523 error = -ENFILE; 2524 file = get_empty_filp(); 2525 if (!file) 2526 goto put_dentry; 2527 2528 error = -ENOSPC; 2529 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 2530 if (!inode) 2531 goto close_file; 2532 2533 SHMEM_I(inode)->flags = flags & VM_ACCOUNT; 2534 d_instantiate(dentry, inode); 2535 inode->i_size = size; 2536 inode->i_nlink = 0; /* It is unlinked */ 2537 init_file(file, shm_mnt, dentry, FMODE_WRITE | FMODE_READ, 2538 &shmem_file_operations); 2539 return file; 2540 2541close_file: 2542 put_filp(file); 2543put_dentry: 2544 dput(dentry); 2545put_memory: 2546 shmem_unacct_size(flags, size); 2547 return ERR_PTR(error); 2548} 2549 2550/* 2551 * shmem_zero_setup - setup a shared anonymous mapping 2552 * 2553 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2554 */ 2555int shmem_zero_setup(struct vm_area_struct *vma) 2556{ 2557 struct file *file; 2558 loff_t size = vma->vm_end - vma->vm_start; 2559 2560 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2561 if (IS_ERR(file)) 2562 return PTR_ERR(file); 2563 2564 if (vma->vm_file) 2565 fput(vma->vm_file); 2566 vma->vm_file = file; 2567 vma->vm_ops = &shmem_vm_ops; 2568 return 0; 2569} 2570