shmem.c revision d15c023b44e5d323f1f4130b85d29f08e43433b1
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2005 Hugh Dickins. 10 * Copyright (C) 2002-2005 VERITAS Software Corporation. 11 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * 13 * Extended attribute support for tmpfs: 14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 16 * 17 * This file is released under the GPL. 18 */ 19 20/* 21 * This virtual memory filesystem is heavily based on the ramfs. It 22 * extends ramfs by the ability to use swap and honor resource limits 23 * which makes it a completely usable filesystem. 24 */ 25 26#include <linux/config.h> 27#include <linux/module.h> 28#include <linux/init.h> 29#include <linux/devfs_fs_kernel.h> 30#include <linux/fs.h> 31#include <linux/mm.h> 32#include <linux/mman.h> 33#include <linux/file.h> 34#include <linux/swap.h> 35#include <linux/pagemap.h> 36#include <linux/string.h> 37#include <linux/slab.h> 38#include <linux/backing-dev.h> 39#include <linux/shmem_fs.h> 40#include <linux/mount.h> 41#include <linux/writeback.h> 42#include <linux/vfs.h> 43#include <linux/blkdev.h> 44#include <linux/security.h> 45#include <linux/swapops.h> 46#include <linux/mempolicy.h> 47#include <linux/namei.h> 48#include <linux/ctype.h> 49#include <asm/uaccess.h> 50#include <asm/div64.h> 51#include <asm/pgtable.h> 52 53/* This magic number is used in glibc for posix shared memory */ 54#define TMPFS_MAGIC 0x01021994 55 56#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) 57#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) 58#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 59 60#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) 61#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) 62 63#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 64 65/* info->flags needs VM_flags to handle pagein/truncate races efficiently */ 66#define SHMEM_PAGEIN VM_READ 67#define SHMEM_TRUNCATE VM_WRITE 68 69/* Definition to limit shmem_truncate's steps between cond_rescheds */ 70#define LATENCY_LIMIT 64 71 72/* Pretend that each entry is of this size in directory's i_size */ 73#define BOGO_DIRENT_SIZE 20 74 75/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 76enum sgp_type { 77 SGP_QUICK, /* don't try more than file page cache lookup */ 78 SGP_READ, /* don't exceed i_size, don't allocate page */ 79 SGP_CACHE, /* don't exceed i_size, may allocate page */ 80 SGP_WRITE, /* may exceed i_size, may allocate page */ 81}; 82 83static int shmem_getpage(struct inode *inode, unsigned long idx, 84 struct page **pagep, enum sgp_type sgp, int *type); 85 86static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) 87{ 88 /* 89 * The above definition of ENTRIES_PER_PAGE, and the use of 90 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 91 * might be reconsidered if it ever diverges from PAGE_SIZE. 92 */ 93 return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT); 94} 95 96static inline void shmem_dir_free(struct page *page) 97{ 98 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); 99} 100 101static struct page **shmem_dir_map(struct page *page) 102{ 103 return (struct page **)kmap_atomic(page, KM_USER0); 104} 105 106static inline void shmem_dir_unmap(struct page **dir) 107{ 108 kunmap_atomic(dir, KM_USER0); 109} 110 111static swp_entry_t *shmem_swp_map(struct page *page) 112{ 113 return (swp_entry_t *)kmap_atomic(page, KM_USER1); 114} 115 116static inline void shmem_swp_balance_unmap(void) 117{ 118 /* 119 * When passing a pointer to an i_direct entry, to code which 120 * also handles indirect entries and so will shmem_swp_unmap, 121 * we must arrange for the preempt count to remain in balance. 122 * What kmap_atomic of a lowmem page does depends on config 123 * and architecture, so pretend to kmap_atomic some lowmem page. 124 */ 125 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); 126} 127 128static inline void shmem_swp_unmap(swp_entry_t *entry) 129{ 130 kunmap_atomic(entry, KM_USER1); 131} 132 133static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 134{ 135 return sb->s_fs_info; 136} 137 138/* 139 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 140 * for shared memory and for shared anonymous (/dev/zero) mappings 141 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 142 * consistent with the pre-accounting of private mappings ... 143 */ 144static inline int shmem_acct_size(unsigned long flags, loff_t size) 145{ 146 return (flags & VM_ACCOUNT)? 147 security_vm_enough_memory(VM_ACCT(size)): 0; 148} 149 150static inline void shmem_unacct_size(unsigned long flags, loff_t size) 151{ 152 if (flags & VM_ACCOUNT) 153 vm_unacct_memory(VM_ACCT(size)); 154} 155 156/* 157 * ... whereas tmpfs objects are accounted incrementally as 158 * pages are allocated, in order to allow huge sparse files. 159 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 160 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 161 */ 162static inline int shmem_acct_block(unsigned long flags) 163{ 164 return (flags & VM_ACCOUNT)? 165 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); 166} 167 168static inline void shmem_unacct_blocks(unsigned long flags, long pages) 169{ 170 if (!(flags & VM_ACCOUNT)) 171 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 172} 173 174static struct super_operations shmem_ops; 175static struct address_space_operations shmem_aops; 176static struct file_operations shmem_file_operations; 177static struct inode_operations shmem_inode_operations; 178static struct inode_operations shmem_dir_inode_operations; 179static struct vm_operations_struct shmem_vm_ops; 180 181static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 182 .ra_pages = 0, /* No readahead */ 183 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, 184 .unplug_io_fn = default_unplug_io_fn, 185}; 186 187static LIST_HEAD(shmem_swaplist); 188static DEFINE_SPINLOCK(shmem_swaplist_lock); 189 190static void shmem_free_blocks(struct inode *inode, long pages) 191{ 192 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 193 if (sbinfo->max_blocks) { 194 spin_lock(&sbinfo->stat_lock); 195 sbinfo->free_blocks += pages; 196 inode->i_blocks -= pages*BLOCKS_PER_PAGE; 197 spin_unlock(&sbinfo->stat_lock); 198 } 199} 200 201/* 202 * shmem_recalc_inode - recalculate the size of an inode 203 * 204 * @inode: inode to recalc 205 * 206 * We have to calculate the free blocks since the mm can drop 207 * undirtied hole pages behind our back. 208 * 209 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 210 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 211 * 212 * It has to be called with the spinlock held. 213 */ 214static void shmem_recalc_inode(struct inode *inode) 215{ 216 struct shmem_inode_info *info = SHMEM_I(inode); 217 long freed; 218 219 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 220 if (freed > 0) { 221 info->alloced -= freed; 222 shmem_unacct_blocks(info->flags, freed); 223 shmem_free_blocks(inode, freed); 224 } 225} 226 227/* 228 * shmem_swp_entry - find the swap vector position in the info structure 229 * 230 * @info: info structure for the inode 231 * @index: index of the page to find 232 * @page: optional page to add to the structure. Has to be preset to 233 * all zeros 234 * 235 * If there is no space allocated yet it will return NULL when 236 * page is NULL, else it will use the page for the needed block, 237 * setting it to NULL on return to indicate that it has been used. 238 * 239 * The swap vector is organized the following way: 240 * 241 * There are SHMEM_NR_DIRECT entries directly stored in the 242 * shmem_inode_info structure. So small files do not need an addional 243 * allocation. 244 * 245 * For pages with index > SHMEM_NR_DIRECT there is the pointer 246 * i_indirect which points to a page which holds in the first half 247 * doubly indirect blocks, in the second half triple indirect blocks: 248 * 249 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the 250 * following layout (for SHMEM_NR_DIRECT == 16): 251 * 252 * i_indirect -> dir --> 16-19 253 * | +-> 20-23 254 * | 255 * +-->dir2 --> 24-27 256 * | +-> 28-31 257 * | +-> 32-35 258 * | +-> 36-39 259 * | 260 * +-->dir3 --> 40-43 261 * +-> 44-47 262 * +-> 48-51 263 * +-> 52-55 264 */ 265static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) 266{ 267 unsigned long offset; 268 struct page **dir; 269 struct page *subdir; 270 271 if (index < SHMEM_NR_DIRECT) { 272 shmem_swp_balance_unmap(); 273 return info->i_direct+index; 274 } 275 if (!info->i_indirect) { 276 if (page) { 277 info->i_indirect = *page; 278 *page = NULL; 279 } 280 return NULL; /* need another page */ 281 } 282 283 index -= SHMEM_NR_DIRECT; 284 offset = index % ENTRIES_PER_PAGE; 285 index /= ENTRIES_PER_PAGE; 286 dir = shmem_dir_map(info->i_indirect); 287 288 if (index >= ENTRIES_PER_PAGE/2) { 289 index -= ENTRIES_PER_PAGE/2; 290 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; 291 index %= ENTRIES_PER_PAGE; 292 subdir = *dir; 293 if (!subdir) { 294 if (page) { 295 *dir = *page; 296 *page = NULL; 297 } 298 shmem_dir_unmap(dir); 299 return NULL; /* need another page */ 300 } 301 shmem_dir_unmap(dir); 302 dir = shmem_dir_map(subdir); 303 } 304 305 dir += index; 306 subdir = *dir; 307 if (!subdir) { 308 if (!page || !(subdir = *page)) { 309 shmem_dir_unmap(dir); 310 return NULL; /* need a page */ 311 } 312 *dir = subdir; 313 *page = NULL; 314 } 315 shmem_dir_unmap(dir); 316 return shmem_swp_map(subdir) + offset; 317} 318 319static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) 320{ 321 long incdec = value? 1: -1; 322 323 entry->val = value; 324 info->swapped += incdec; 325 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) { 326 struct page *page = kmap_atomic_to_page(entry); 327 set_page_private(page, page_private(page) + incdec); 328 } 329} 330 331/* 332 * shmem_swp_alloc - get the position of the swap entry for the page. 333 * If it does not exist allocate the entry. 334 * 335 * @info: info structure for the inode 336 * @index: index of the page to find 337 * @sgp: check and recheck i_size? skip allocation? 338 */ 339static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) 340{ 341 struct inode *inode = &info->vfs_inode; 342 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 343 struct page *page = NULL; 344 swp_entry_t *entry; 345 346 if (sgp != SGP_WRITE && 347 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 348 return ERR_PTR(-EINVAL); 349 350 while (!(entry = shmem_swp_entry(info, index, &page))) { 351 if (sgp == SGP_READ) 352 return shmem_swp_map(ZERO_PAGE(0)); 353 /* 354 * Test free_blocks against 1 not 0, since we have 1 data 355 * page (and perhaps indirect index pages) yet to allocate: 356 * a waste to allocate index if we cannot allocate data. 357 */ 358 if (sbinfo->max_blocks) { 359 spin_lock(&sbinfo->stat_lock); 360 if (sbinfo->free_blocks <= 1) { 361 spin_unlock(&sbinfo->stat_lock); 362 return ERR_PTR(-ENOSPC); 363 } 364 sbinfo->free_blocks--; 365 inode->i_blocks += BLOCKS_PER_PAGE; 366 spin_unlock(&sbinfo->stat_lock); 367 } 368 369 spin_unlock(&info->lock); 370 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); 371 if (page) 372 set_page_private(page, 0); 373 spin_lock(&info->lock); 374 375 if (!page) { 376 shmem_free_blocks(inode, 1); 377 return ERR_PTR(-ENOMEM); 378 } 379 if (sgp != SGP_WRITE && 380 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 381 entry = ERR_PTR(-EINVAL); 382 break; 383 } 384 if (info->next_index <= index) 385 info->next_index = index + 1; 386 } 387 if (page) { 388 /* another task gave its page, or truncated the file */ 389 shmem_free_blocks(inode, 1); 390 shmem_dir_free(page); 391 } 392 if (info->next_index <= index && !IS_ERR(entry)) 393 info->next_index = index + 1; 394 return entry; 395} 396 397/* 398 * shmem_free_swp - free some swap entries in a directory 399 * 400 * @dir: pointer to the directory 401 * @edir: pointer after last entry of the directory 402 */ 403static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir) 404{ 405 swp_entry_t *ptr; 406 int freed = 0; 407 408 for (ptr = dir; ptr < edir; ptr++) { 409 if (ptr->val) { 410 free_swap_and_cache(*ptr); 411 *ptr = (swp_entry_t){0}; 412 freed++; 413 } 414 } 415 return freed; 416} 417 418static int shmem_map_and_free_swp(struct page *subdir, 419 int offset, int limit, struct page ***dir) 420{ 421 swp_entry_t *ptr; 422 int freed = 0; 423 424 ptr = shmem_swp_map(subdir); 425 for (; offset < limit; offset += LATENCY_LIMIT) { 426 int size = limit - offset; 427 if (size > LATENCY_LIMIT) 428 size = LATENCY_LIMIT; 429 freed += shmem_free_swp(ptr+offset, ptr+offset+size); 430 if (need_resched()) { 431 shmem_swp_unmap(ptr); 432 if (*dir) { 433 shmem_dir_unmap(*dir); 434 *dir = NULL; 435 } 436 cond_resched(); 437 ptr = shmem_swp_map(subdir); 438 } 439 } 440 shmem_swp_unmap(ptr); 441 return freed; 442} 443 444static void shmem_free_pages(struct list_head *next) 445{ 446 struct page *page; 447 int freed = 0; 448 449 do { 450 page = container_of(next, struct page, lru); 451 next = next->next; 452 shmem_dir_free(page); 453 freed++; 454 if (freed >= LATENCY_LIMIT) { 455 cond_resched(); 456 freed = 0; 457 } 458 } while (next); 459} 460 461static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end) 462{ 463 struct shmem_inode_info *info = SHMEM_I(inode); 464 unsigned long idx; 465 unsigned long size; 466 unsigned long limit; 467 unsigned long stage; 468 unsigned long diroff; 469 struct page **dir; 470 struct page *topdir; 471 struct page *middir; 472 struct page *subdir; 473 swp_entry_t *ptr; 474 LIST_HEAD(pages_to_free); 475 long nr_pages_to_free = 0; 476 long nr_swaps_freed = 0; 477 int offset; 478 int freed; 479 int punch_hole = 0; 480 481 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 482 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 483 if (idx >= info->next_index) 484 return; 485 486 spin_lock(&info->lock); 487 info->flags |= SHMEM_TRUNCATE; 488 if (likely(end == (loff_t) -1)) { 489 limit = info->next_index; 490 info->next_index = idx; 491 } else { 492 limit = (end + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 493 if (limit > info->next_index) 494 limit = info->next_index; 495 punch_hole = 1; 496 } 497 498 topdir = info->i_indirect; 499 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) { 500 info->i_indirect = NULL; 501 nr_pages_to_free++; 502 list_add(&topdir->lru, &pages_to_free); 503 } 504 spin_unlock(&info->lock); 505 506 if (info->swapped && idx < SHMEM_NR_DIRECT) { 507 ptr = info->i_direct; 508 size = limit; 509 if (size > SHMEM_NR_DIRECT) 510 size = SHMEM_NR_DIRECT; 511 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size); 512 } 513 if (!topdir) 514 goto done2; 515 516 BUG_ON(limit <= SHMEM_NR_DIRECT); 517 limit -= SHMEM_NR_DIRECT; 518 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; 519 offset = idx % ENTRIES_PER_PAGE; 520 idx -= offset; 521 522 dir = shmem_dir_map(topdir); 523 stage = ENTRIES_PER_PAGEPAGE/2; 524 if (idx < ENTRIES_PER_PAGEPAGE/2) { 525 middir = topdir; 526 diroff = idx/ENTRIES_PER_PAGE; 527 } else { 528 dir += ENTRIES_PER_PAGE/2; 529 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; 530 while (stage <= idx) 531 stage += ENTRIES_PER_PAGEPAGE; 532 middir = *dir; 533 if (*dir) { 534 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % 535 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; 536 if (!diroff && !offset) { 537 *dir = NULL; 538 nr_pages_to_free++; 539 list_add(&middir->lru, &pages_to_free); 540 } 541 shmem_dir_unmap(dir); 542 dir = shmem_dir_map(middir); 543 } else { 544 diroff = 0; 545 offset = 0; 546 idx = stage; 547 } 548 } 549 550 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { 551 if (unlikely(idx == stage)) { 552 shmem_dir_unmap(dir); 553 dir = shmem_dir_map(topdir) + 554 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 555 while (!*dir) { 556 dir++; 557 idx += ENTRIES_PER_PAGEPAGE; 558 if (idx >= limit) 559 goto done1; 560 } 561 stage = idx + ENTRIES_PER_PAGEPAGE; 562 middir = *dir; 563 *dir = NULL; 564 nr_pages_to_free++; 565 list_add(&middir->lru, &pages_to_free); 566 shmem_dir_unmap(dir); 567 cond_resched(); 568 dir = shmem_dir_map(middir); 569 diroff = 0; 570 } 571 subdir = dir[diroff]; 572 if (subdir && page_private(subdir)) { 573 size = limit - idx; 574 if (size > ENTRIES_PER_PAGE) 575 size = ENTRIES_PER_PAGE; 576 freed = shmem_map_and_free_swp(subdir, 577 offset, size, &dir); 578 if (!dir) 579 dir = shmem_dir_map(middir); 580 nr_swaps_freed += freed; 581 if (offset) 582 spin_lock(&info->lock); 583 set_page_private(subdir, page_private(subdir) - freed); 584 if (offset) 585 spin_unlock(&info->lock); 586 if (!punch_hole) 587 BUG_ON(page_private(subdir) > offset); 588 } 589 if (offset) 590 offset = 0; 591 else if (subdir && !page_private(subdir)) { 592 dir[diroff] = NULL; 593 nr_pages_to_free++; 594 list_add(&subdir->lru, &pages_to_free); 595 } 596 } 597done1: 598 shmem_dir_unmap(dir); 599done2: 600 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { 601 /* 602 * Call truncate_inode_pages again: racing shmem_unuse_inode 603 * may have swizzled a page in from swap since vmtruncate or 604 * generic_delete_inode did it, before we lowered next_index. 605 * Also, though shmem_getpage checks i_size before adding to 606 * cache, no recheck after: so fix the narrow window there too. 607 */ 608 truncate_inode_pages_range(inode->i_mapping, start, end); 609 } 610 611 spin_lock(&info->lock); 612 info->flags &= ~SHMEM_TRUNCATE; 613 info->swapped -= nr_swaps_freed; 614 if (nr_pages_to_free) 615 shmem_free_blocks(inode, nr_pages_to_free); 616 shmem_recalc_inode(inode); 617 spin_unlock(&info->lock); 618 619 /* 620 * Empty swap vector directory pages to be freed? 621 */ 622 if (!list_empty(&pages_to_free)) { 623 pages_to_free.prev->next = NULL; 624 shmem_free_pages(pages_to_free.next); 625 } 626} 627 628static void shmem_truncate(struct inode *inode) 629{ 630 shmem_truncate_range(inode, inode->i_size, (loff_t)-1); 631} 632 633static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 634{ 635 struct inode *inode = dentry->d_inode; 636 struct page *page = NULL; 637 int error; 638 639 if (attr->ia_valid & ATTR_SIZE) { 640 if (attr->ia_size < inode->i_size) { 641 /* 642 * If truncating down to a partial page, then 643 * if that page is already allocated, hold it 644 * in memory until the truncation is over, so 645 * truncate_partial_page cannnot miss it were 646 * it assigned to swap. 647 */ 648 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { 649 (void) shmem_getpage(inode, 650 attr->ia_size>>PAGE_CACHE_SHIFT, 651 &page, SGP_READ, NULL); 652 } 653 /* 654 * Reset SHMEM_PAGEIN flag so that shmem_truncate can 655 * detect if any pages might have been added to cache 656 * after truncate_inode_pages. But we needn't bother 657 * if it's being fully truncated to zero-length: the 658 * nrpages check is efficient enough in that case. 659 */ 660 if (attr->ia_size) { 661 struct shmem_inode_info *info = SHMEM_I(inode); 662 spin_lock(&info->lock); 663 info->flags &= ~SHMEM_PAGEIN; 664 spin_unlock(&info->lock); 665 } 666 } 667 } 668 669 error = inode_change_ok(inode, attr); 670 if (!error) 671 error = inode_setattr(inode, attr); 672 if (page) 673 page_cache_release(page); 674 return error; 675} 676 677static void shmem_delete_inode(struct inode *inode) 678{ 679 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 680 struct shmem_inode_info *info = SHMEM_I(inode); 681 682 if (inode->i_op->truncate == shmem_truncate) { 683 truncate_inode_pages(inode->i_mapping, 0); 684 shmem_unacct_size(info->flags, inode->i_size); 685 inode->i_size = 0; 686 shmem_truncate(inode); 687 if (!list_empty(&info->swaplist)) { 688 spin_lock(&shmem_swaplist_lock); 689 list_del_init(&info->swaplist); 690 spin_unlock(&shmem_swaplist_lock); 691 } 692 } 693 BUG_ON(inode->i_blocks); 694 if (sbinfo->max_inodes) { 695 spin_lock(&sbinfo->stat_lock); 696 sbinfo->free_inodes++; 697 spin_unlock(&sbinfo->stat_lock); 698 } 699 clear_inode(inode); 700} 701 702static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) 703{ 704 swp_entry_t *ptr; 705 706 for (ptr = dir; ptr < edir; ptr++) { 707 if (ptr->val == entry.val) 708 return ptr - dir; 709 } 710 return -1; 711} 712 713static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) 714{ 715 struct inode *inode; 716 unsigned long idx; 717 unsigned long size; 718 unsigned long limit; 719 unsigned long stage; 720 struct page **dir; 721 struct page *subdir; 722 swp_entry_t *ptr; 723 int offset; 724 725 idx = 0; 726 ptr = info->i_direct; 727 spin_lock(&info->lock); 728 limit = info->next_index; 729 size = limit; 730 if (size > SHMEM_NR_DIRECT) 731 size = SHMEM_NR_DIRECT; 732 offset = shmem_find_swp(entry, ptr, ptr+size); 733 if (offset >= 0) { 734 shmem_swp_balance_unmap(); 735 goto found; 736 } 737 if (!info->i_indirect) 738 goto lost2; 739 740 dir = shmem_dir_map(info->i_indirect); 741 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; 742 743 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { 744 if (unlikely(idx == stage)) { 745 shmem_dir_unmap(dir-1); 746 dir = shmem_dir_map(info->i_indirect) + 747 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 748 while (!*dir) { 749 dir++; 750 idx += ENTRIES_PER_PAGEPAGE; 751 if (idx >= limit) 752 goto lost1; 753 } 754 stage = idx + ENTRIES_PER_PAGEPAGE; 755 subdir = *dir; 756 shmem_dir_unmap(dir); 757 dir = shmem_dir_map(subdir); 758 } 759 subdir = *dir; 760 if (subdir && page_private(subdir)) { 761 ptr = shmem_swp_map(subdir); 762 size = limit - idx; 763 if (size > ENTRIES_PER_PAGE) 764 size = ENTRIES_PER_PAGE; 765 offset = shmem_find_swp(entry, ptr, ptr+size); 766 if (offset >= 0) { 767 shmem_dir_unmap(dir); 768 goto found; 769 } 770 shmem_swp_unmap(ptr); 771 } 772 } 773lost1: 774 shmem_dir_unmap(dir-1); 775lost2: 776 spin_unlock(&info->lock); 777 return 0; 778found: 779 idx += offset; 780 inode = &info->vfs_inode; 781 if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) { 782 info->flags |= SHMEM_PAGEIN; 783 shmem_swp_set(info, ptr + offset, 0); 784 } 785 shmem_swp_unmap(ptr); 786 spin_unlock(&info->lock); 787 /* 788 * Decrement swap count even when the entry is left behind: 789 * try_to_unuse will skip over mms, then reincrement count. 790 */ 791 swap_free(entry); 792 return 1; 793} 794 795/* 796 * shmem_unuse() search for an eventually swapped out shmem page. 797 */ 798int shmem_unuse(swp_entry_t entry, struct page *page) 799{ 800 struct list_head *p, *next; 801 struct shmem_inode_info *info; 802 int found = 0; 803 804 spin_lock(&shmem_swaplist_lock); 805 list_for_each_safe(p, next, &shmem_swaplist) { 806 info = list_entry(p, struct shmem_inode_info, swaplist); 807 if (!info->swapped) 808 list_del_init(&info->swaplist); 809 else if (shmem_unuse_inode(info, entry, page)) { 810 /* move head to start search for next from here */ 811 list_move_tail(&shmem_swaplist, &info->swaplist); 812 found = 1; 813 break; 814 } 815 } 816 spin_unlock(&shmem_swaplist_lock); 817 return found; 818} 819 820/* 821 * Move the page from the page cache to the swap cache. 822 */ 823static int shmem_writepage(struct page *page, struct writeback_control *wbc) 824{ 825 struct shmem_inode_info *info; 826 swp_entry_t *entry, swap; 827 struct address_space *mapping; 828 unsigned long index; 829 struct inode *inode; 830 831 BUG_ON(!PageLocked(page)); 832 BUG_ON(page_mapped(page)); 833 834 mapping = page->mapping; 835 index = page->index; 836 inode = mapping->host; 837 info = SHMEM_I(inode); 838 if (info->flags & VM_LOCKED) 839 goto redirty; 840 swap = get_swap_page(); 841 if (!swap.val) 842 goto redirty; 843 844 spin_lock(&info->lock); 845 shmem_recalc_inode(inode); 846 if (index >= info->next_index) { 847 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 848 goto unlock; 849 } 850 entry = shmem_swp_entry(info, index, NULL); 851 BUG_ON(!entry); 852 BUG_ON(entry->val); 853 854 if (move_to_swap_cache(page, swap) == 0) { 855 shmem_swp_set(info, entry, swap.val); 856 shmem_swp_unmap(entry); 857 spin_unlock(&info->lock); 858 if (list_empty(&info->swaplist)) { 859 spin_lock(&shmem_swaplist_lock); 860 /* move instead of add in case we're racing */ 861 list_move_tail(&info->swaplist, &shmem_swaplist); 862 spin_unlock(&shmem_swaplist_lock); 863 } 864 unlock_page(page); 865 return 0; 866 } 867 868 shmem_swp_unmap(entry); 869unlock: 870 spin_unlock(&info->lock); 871 swap_free(swap); 872redirty: 873 set_page_dirty(page); 874 return AOP_WRITEPAGE_ACTIVATE; /* Return with the page locked */ 875} 876 877#ifdef CONFIG_NUMA 878static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) 879{ 880 char *nodelist = strchr(value, ':'); 881 int err = 1; 882 883 if (nodelist) { 884 /* NUL-terminate policy string */ 885 *nodelist++ = '\0'; 886 if (nodelist_parse(nodelist, *policy_nodes)) 887 goto out; 888 } 889 if (!strcmp(value, "default")) { 890 *policy = MPOL_DEFAULT; 891 /* Don't allow a nodelist */ 892 if (!nodelist) 893 err = 0; 894 } else if (!strcmp(value, "prefer")) { 895 *policy = MPOL_PREFERRED; 896 /* Insist on a nodelist of one node only */ 897 if (nodelist) { 898 char *rest = nodelist; 899 while (isdigit(*rest)) 900 rest++; 901 if (!*rest) 902 err = 0; 903 } 904 } else if (!strcmp(value, "bind")) { 905 *policy = MPOL_BIND; 906 /* Insist on a nodelist */ 907 if (nodelist) 908 err = 0; 909 } else if (!strcmp(value, "interleave")) { 910 *policy = MPOL_INTERLEAVE; 911 /* Default to nodes online if no nodelist */ 912 if (!nodelist) 913 *policy_nodes = node_online_map; 914 err = 0; 915 } 916out: 917 /* Restore string for error message */ 918 if (nodelist) 919 *--nodelist = ':'; 920 return err; 921} 922 923static struct page *shmem_swapin_async(struct shared_policy *p, 924 swp_entry_t entry, unsigned long idx) 925{ 926 struct page *page; 927 struct vm_area_struct pvma; 928 929 /* Create a pseudo vma that just contains the policy */ 930 memset(&pvma, 0, sizeof(struct vm_area_struct)); 931 pvma.vm_end = PAGE_SIZE; 932 pvma.vm_pgoff = idx; 933 pvma.vm_policy = mpol_shared_policy_lookup(p, idx); 934 page = read_swap_cache_async(entry, &pvma, 0); 935 mpol_free(pvma.vm_policy); 936 return page; 937} 938 939struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, 940 unsigned long idx) 941{ 942 struct shared_policy *p = &info->policy; 943 int i, num; 944 struct page *page; 945 unsigned long offset; 946 947 num = valid_swaphandles(entry, &offset); 948 for (i = 0; i < num; offset++, i++) { 949 page = shmem_swapin_async(p, 950 swp_entry(swp_type(entry), offset), idx); 951 if (!page) 952 break; 953 page_cache_release(page); 954 } 955 lru_add_drain(); /* Push any new pages onto the LRU now */ 956 return shmem_swapin_async(p, entry, idx); 957} 958 959static struct page * 960shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, 961 unsigned long idx) 962{ 963 struct vm_area_struct pvma; 964 struct page *page; 965 966 memset(&pvma, 0, sizeof(struct vm_area_struct)); 967 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 968 pvma.vm_pgoff = idx; 969 pvma.vm_end = PAGE_SIZE; 970 page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); 971 mpol_free(pvma.vm_policy); 972 return page; 973} 974#else 975static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) 976{ 977 return 1; 978} 979 980static inline struct page * 981shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) 982{ 983 swapin_readahead(entry, 0, NULL); 984 return read_swap_cache_async(entry, NULL, 0); 985} 986 987static inline struct page * 988shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) 989{ 990 return alloc_page(gfp | __GFP_ZERO); 991} 992#endif 993 994/* 995 * shmem_getpage - either get the page from swap or allocate a new one 996 * 997 * If we allocate a new one we do not mark it dirty. That's up to the 998 * vm. If we swap it in we mark it dirty since we also free the swap 999 * entry since a page cannot live in both the swap and page cache 1000 */ 1001static int shmem_getpage(struct inode *inode, unsigned long idx, 1002 struct page **pagep, enum sgp_type sgp, int *type) 1003{ 1004 struct address_space *mapping = inode->i_mapping; 1005 struct shmem_inode_info *info = SHMEM_I(inode); 1006 struct shmem_sb_info *sbinfo; 1007 struct page *filepage = *pagep; 1008 struct page *swappage; 1009 swp_entry_t *entry; 1010 swp_entry_t swap; 1011 int error; 1012 1013 if (idx >= SHMEM_MAX_INDEX) 1014 return -EFBIG; 1015 /* 1016 * Normally, filepage is NULL on entry, and either found 1017 * uptodate immediately, or allocated and zeroed, or read 1018 * in under swappage, which is then assigned to filepage. 1019 * But shmem_prepare_write passes in a locked filepage, 1020 * which may be found not uptodate by other callers too, 1021 * and may need to be copied from the swappage read in. 1022 */ 1023repeat: 1024 if (!filepage) 1025 filepage = find_lock_page(mapping, idx); 1026 if (filepage && PageUptodate(filepage)) 1027 goto done; 1028 error = 0; 1029 if (sgp == SGP_QUICK) 1030 goto failed; 1031 1032 spin_lock(&info->lock); 1033 shmem_recalc_inode(inode); 1034 entry = shmem_swp_alloc(info, idx, sgp); 1035 if (IS_ERR(entry)) { 1036 spin_unlock(&info->lock); 1037 error = PTR_ERR(entry); 1038 goto failed; 1039 } 1040 swap = *entry; 1041 1042 if (swap.val) { 1043 /* Look it up and read it in.. */ 1044 swappage = lookup_swap_cache(swap); 1045 if (!swappage) { 1046 shmem_swp_unmap(entry); 1047 spin_unlock(&info->lock); 1048 /* here we actually do the io */ 1049 if (type && *type == VM_FAULT_MINOR) { 1050 inc_page_state(pgmajfault); 1051 *type = VM_FAULT_MAJOR; 1052 } 1053 swappage = shmem_swapin(info, swap, idx); 1054 if (!swappage) { 1055 spin_lock(&info->lock); 1056 entry = shmem_swp_alloc(info, idx, sgp); 1057 if (IS_ERR(entry)) 1058 error = PTR_ERR(entry); 1059 else { 1060 if (entry->val == swap.val) 1061 error = -ENOMEM; 1062 shmem_swp_unmap(entry); 1063 } 1064 spin_unlock(&info->lock); 1065 if (error) 1066 goto failed; 1067 goto repeat; 1068 } 1069 wait_on_page_locked(swappage); 1070 page_cache_release(swappage); 1071 goto repeat; 1072 } 1073 1074 /* We have to do this with page locked to prevent races */ 1075 if (TestSetPageLocked(swappage)) { 1076 shmem_swp_unmap(entry); 1077 spin_unlock(&info->lock); 1078 wait_on_page_locked(swappage); 1079 page_cache_release(swappage); 1080 goto repeat; 1081 } 1082 if (!PageSwapCache(swappage)) { 1083 /* Page migration has occured */ 1084 shmem_swp_unmap(entry); 1085 spin_unlock(&info->lock); 1086 unlock_page(swappage); 1087 page_cache_release(swappage); 1088 goto repeat; 1089 } 1090 if (PageWriteback(swappage)) { 1091 shmem_swp_unmap(entry); 1092 spin_unlock(&info->lock); 1093 wait_on_page_writeback(swappage); 1094 unlock_page(swappage); 1095 page_cache_release(swappage); 1096 goto repeat; 1097 } 1098 if (!PageUptodate(swappage)) { 1099 shmem_swp_unmap(entry); 1100 spin_unlock(&info->lock); 1101 unlock_page(swappage); 1102 page_cache_release(swappage); 1103 error = -EIO; 1104 goto failed; 1105 } 1106 1107 if (filepage) { 1108 shmem_swp_set(info, entry, 0); 1109 shmem_swp_unmap(entry); 1110 delete_from_swap_cache(swappage); 1111 spin_unlock(&info->lock); 1112 copy_highpage(filepage, swappage); 1113 unlock_page(swappage); 1114 page_cache_release(swappage); 1115 flush_dcache_page(filepage); 1116 SetPageUptodate(filepage); 1117 set_page_dirty(filepage); 1118 swap_free(swap); 1119 } else if (!(error = move_from_swap_cache( 1120 swappage, idx, mapping))) { 1121 info->flags |= SHMEM_PAGEIN; 1122 shmem_swp_set(info, entry, 0); 1123 shmem_swp_unmap(entry); 1124 spin_unlock(&info->lock); 1125 filepage = swappage; 1126 swap_free(swap); 1127 } else { 1128 shmem_swp_unmap(entry); 1129 spin_unlock(&info->lock); 1130 unlock_page(swappage); 1131 page_cache_release(swappage); 1132 if (error == -ENOMEM) { 1133 /* let kswapd refresh zone for GFP_ATOMICs */ 1134 blk_congestion_wait(WRITE, HZ/50); 1135 } 1136 goto repeat; 1137 } 1138 } else if (sgp == SGP_READ && !filepage) { 1139 shmem_swp_unmap(entry); 1140 filepage = find_get_page(mapping, idx); 1141 if (filepage && 1142 (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { 1143 spin_unlock(&info->lock); 1144 wait_on_page_locked(filepage); 1145 page_cache_release(filepage); 1146 filepage = NULL; 1147 goto repeat; 1148 } 1149 spin_unlock(&info->lock); 1150 } else { 1151 shmem_swp_unmap(entry); 1152 sbinfo = SHMEM_SB(inode->i_sb); 1153 if (sbinfo->max_blocks) { 1154 spin_lock(&sbinfo->stat_lock); 1155 if (sbinfo->free_blocks == 0 || 1156 shmem_acct_block(info->flags)) { 1157 spin_unlock(&sbinfo->stat_lock); 1158 spin_unlock(&info->lock); 1159 error = -ENOSPC; 1160 goto failed; 1161 } 1162 sbinfo->free_blocks--; 1163 inode->i_blocks += BLOCKS_PER_PAGE; 1164 spin_unlock(&sbinfo->stat_lock); 1165 } else if (shmem_acct_block(info->flags)) { 1166 spin_unlock(&info->lock); 1167 error = -ENOSPC; 1168 goto failed; 1169 } 1170 1171 if (!filepage) { 1172 spin_unlock(&info->lock); 1173 filepage = shmem_alloc_page(mapping_gfp_mask(mapping), 1174 info, 1175 idx); 1176 if (!filepage) { 1177 shmem_unacct_blocks(info->flags, 1); 1178 shmem_free_blocks(inode, 1); 1179 error = -ENOMEM; 1180 goto failed; 1181 } 1182 1183 spin_lock(&info->lock); 1184 entry = shmem_swp_alloc(info, idx, sgp); 1185 if (IS_ERR(entry)) 1186 error = PTR_ERR(entry); 1187 else { 1188 swap = *entry; 1189 shmem_swp_unmap(entry); 1190 } 1191 if (error || swap.val || 0 != add_to_page_cache_lru( 1192 filepage, mapping, idx, GFP_ATOMIC)) { 1193 spin_unlock(&info->lock); 1194 page_cache_release(filepage); 1195 shmem_unacct_blocks(info->flags, 1); 1196 shmem_free_blocks(inode, 1); 1197 filepage = NULL; 1198 if (error) 1199 goto failed; 1200 goto repeat; 1201 } 1202 info->flags |= SHMEM_PAGEIN; 1203 } 1204 1205 info->alloced++; 1206 spin_unlock(&info->lock); 1207 flush_dcache_page(filepage); 1208 SetPageUptodate(filepage); 1209 } 1210done: 1211 if (*pagep != filepage) { 1212 unlock_page(filepage); 1213 *pagep = filepage; 1214 } 1215 return 0; 1216 1217failed: 1218 if (*pagep != filepage) { 1219 unlock_page(filepage); 1220 page_cache_release(filepage); 1221 } 1222 return error; 1223} 1224 1225struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type) 1226{ 1227 struct inode *inode = vma->vm_file->f_dentry->d_inode; 1228 struct page *page = NULL; 1229 unsigned long idx; 1230 int error; 1231 1232 idx = (address - vma->vm_start) >> PAGE_SHIFT; 1233 idx += vma->vm_pgoff; 1234 idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; 1235 if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 1236 return NOPAGE_SIGBUS; 1237 1238 error = shmem_getpage(inode, idx, &page, SGP_CACHE, type); 1239 if (error) 1240 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS; 1241 1242 mark_page_accessed(page); 1243 return page; 1244} 1245 1246static int shmem_populate(struct vm_area_struct *vma, 1247 unsigned long addr, unsigned long len, 1248 pgprot_t prot, unsigned long pgoff, int nonblock) 1249{ 1250 struct inode *inode = vma->vm_file->f_dentry->d_inode; 1251 struct mm_struct *mm = vma->vm_mm; 1252 enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE; 1253 unsigned long size; 1254 1255 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1256 if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size) 1257 return -EINVAL; 1258 1259 while ((long) len > 0) { 1260 struct page *page = NULL; 1261 int err; 1262 /* 1263 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE 1264 */ 1265 err = shmem_getpage(inode, pgoff, &page, sgp, NULL); 1266 if (err) 1267 return err; 1268 /* Page may still be null, but only if nonblock was set. */ 1269 if (page) { 1270 mark_page_accessed(page); 1271 err = install_page(mm, vma, addr, page, prot); 1272 if (err) { 1273 page_cache_release(page); 1274 return err; 1275 } 1276 } else if (vma->vm_flags & VM_NONLINEAR) { 1277 /* No page was found just because we can't read it in 1278 * now (being here implies nonblock != 0), but the page 1279 * may exist, so set the PTE to fault it in later. */ 1280 err = install_file_pte(mm, vma, addr, pgoff, prot); 1281 if (err) 1282 return err; 1283 } 1284 1285 len -= PAGE_SIZE; 1286 addr += PAGE_SIZE; 1287 pgoff++; 1288 } 1289 return 0; 1290} 1291 1292#ifdef CONFIG_NUMA 1293int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 1294{ 1295 struct inode *i = vma->vm_file->f_dentry->d_inode; 1296 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); 1297} 1298 1299struct mempolicy * 1300shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) 1301{ 1302 struct inode *i = vma->vm_file->f_dentry->d_inode; 1303 unsigned long idx; 1304 1305 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1306 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); 1307} 1308#endif 1309 1310int shmem_lock(struct file *file, int lock, struct user_struct *user) 1311{ 1312 struct inode *inode = file->f_dentry->d_inode; 1313 struct shmem_inode_info *info = SHMEM_I(inode); 1314 int retval = -ENOMEM; 1315 1316 spin_lock(&info->lock); 1317 if (lock && !(info->flags & VM_LOCKED)) { 1318 if (!user_shm_lock(inode->i_size, user)) 1319 goto out_nomem; 1320 info->flags |= VM_LOCKED; 1321 } 1322 if (!lock && (info->flags & VM_LOCKED) && user) { 1323 user_shm_unlock(inode->i_size, user); 1324 info->flags &= ~VM_LOCKED; 1325 } 1326 retval = 0; 1327out_nomem: 1328 spin_unlock(&info->lock); 1329 return retval; 1330} 1331 1332int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1333{ 1334 file_accessed(file); 1335 vma->vm_ops = &shmem_vm_ops; 1336 return 0; 1337} 1338 1339static struct inode * 1340shmem_get_inode(struct super_block *sb, int mode, dev_t dev) 1341{ 1342 struct inode *inode; 1343 struct shmem_inode_info *info; 1344 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1345 1346 if (sbinfo->max_inodes) { 1347 spin_lock(&sbinfo->stat_lock); 1348 if (!sbinfo->free_inodes) { 1349 spin_unlock(&sbinfo->stat_lock); 1350 return NULL; 1351 } 1352 sbinfo->free_inodes--; 1353 spin_unlock(&sbinfo->stat_lock); 1354 } 1355 1356 inode = new_inode(sb); 1357 if (inode) { 1358 inode->i_mode = mode; 1359 inode->i_uid = current->fsuid; 1360 inode->i_gid = current->fsgid; 1361 inode->i_blksize = PAGE_CACHE_SIZE; 1362 inode->i_blocks = 0; 1363 inode->i_mapping->a_ops = &shmem_aops; 1364 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1365 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1366 info = SHMEM_I(inode); 1367 memset(info, 0, (char *)inode - (char *)info); 1368 spin_lock_init(&info->lock); 1369 INIT_LIST_HEAD(&info->swaplist); 1370 1371 switch (mode & S_IFMT) { 1372 default: 1373 init_special_inode(inode, mode, dev); 1374 break; 1375 case S_IFREG: 1376 inode->i_op = &shmem_inode_operations; 1377 inode->i_fop = &shmem_file_operations; 1378 mpol_shared_policy_init(&info->policy, sbinfo->policy, 1379 &sbinfo->policy_nodes); 1380 break; 1381 case S_IFDIR: 1382 inode->i_nlink++; 1383 /* Some things misbehave if size == 0 on a directory */ 1384 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1385 inode->i_op = &shmem_dir_inode_operations; 1386 inode->i_fop = &simple_dir_operations; 1387 break; 1388 case S_IFLNK: 1389 /* 1390 * Must not load anything in the rbtree, 1391 * mpol_free_shared_policy will not be called. 1392 */ 1393 mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, 1394 NULL); 1395 break; 1396 } 1397 } else if (sbinfo->max_inodes) { 1398 spin_lock(&sbinfo->stat_lock); 1399 sbinfo->free_inodes++; 1400 spin_unlock(&sbinfo->stat_lock); 1401 } 1402 return inode; 1403} 1404 1405#ifdef CONFIG_TMPFS 1406static struct inode_operations shmem_symlink_inode_operations; 1407static struct inode_operations shmem_symlink_inline_operations; 1408 1409/* 1410 * Normally tmpfs makes no use of shmem_prepare_write, but it 1411 * lets a tmpfs file be used read-write below the loop driver. 1412 */ 1413static int 1414shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) 1415{ 1416 struct inode *inode = page->mapping->host; 1417 return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL); 1418} 1419 1420static ssize_t 1421shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 1422{ 1423 struct inode *inode = file->f_dentry->d_inode; 1424 loff_t pos; 1425 unsigned long written; 1426 ssize_t err; 1427 1428 if ((ssize_t) count < 0) 1429 return -EINVAL; 1430 1431 if (!access_ok(VERIFY_READ, buf, count)) 1432 return -EFAULT; 1433 1434 mutex_lock(&inode->i_mutex); 1435 1436 pos = *ppos; 1437 written = 0; 1438 1439 err = generic_write_checks(file, &pos, &count, 0); 1440 if (err || !count) 1441 goto out; 1442 1443 err = remove_suid(file->f_dentry); 1444 if (err) 1445 goto out; 1446 1447 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 1448 1449 do { 1450 struct page *page = NULL; 1451 unsigned long bytes, index, offset; 1452 char *kaddr; 1453 int left; 1454 1455 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 1456 index = pos >> PAGE_CACHE_SHIFT; 1457 bytes = PAGE_CACHE_SIZE - offset; 1458 if (bytes > count) 1459 bytes = count; 1460 1461 /* 1462 * We don't hold page lock across copy from user - 1463 * what would it guard against? - so no deadlock here. 1464 * But it still may be a good idea to prefault below. 1465 */ 1466 1467 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL); 1468 if (err) 1469 break; 1470 1471 left = bytes; 1472 if (PageHighMem(page)) { 1473 volatile unsigned char dummy; 1474 __get_user(dummy, buf); 1475 __get_user(dummy, buf + bytes - 1); 1476 1477 kaddr = kmap_atomic(page, KM_USER0); 1478 left = __copy_from_user_inatomic(kaddr + offset, 1479 buf, bytes); 1480 kunmap_atomic(kaddr, KM_USER0); 1481 } 1482 if (left) { 1483 kaddr = kmap(page); 1484 left = __copy_from_user(kaddr + offset, buf, bytes); 1485 kunmap(page); 1486 } 1487 1488 written += bytes; 1489 count -= bytes; 1490 pos += bytes; 1491 buf += bytes; 1492 if (pos > inode->i_size) 1493 i_size_write(inode, pos); 1494 1495 flush_dcache_page(page); 1496 set_page_dirty(page); 1497 mark_page_accessed(page); 1498 page_cache_release(page); 1499 1500 if (left) { 1501 pos -= left; 1502 written -= left; 1503 err = -EFAULT; 1504 break; 1505 } 1506 1507 /* 1508 * Our dirty pages are not counted in nr_dirty, 1509 * and we do not attempt to balance dirty pages. 1510 */ 1511 1512 cond_resched(); 1513 } while (count); 1514 1515 *ppos = pos; 1516 if (written) 1517 err = written; 1518out: 1519 mutex_unlock(&inode->i_mutex); 1520 return err; 1521} 1522 1523static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1524{ 1525 struct inode *inode = filp->f_dentry->d_inode; 1526 struct address_space *mapping = inode->i_mapping; 1527 unsigned long index, offset; 1528 1529 index = *ppos >> PAGE_CACHE_SHIFT; 1530 offset = *ppos & ~PAGE_CACHE_MASK; 1531 1532 for (;;) { 1533 struct page *page = NULL; 1534 unsigned long end_index, nr, ret; 1535 loff_t i_size = i_size_read(inode); 1536 1537 end_index = i_size >> PAGE_CACHE_SHIFT; 1538 if (index > end_index) 1539 break; 1540 if (index == end_index) { 1541 nr = i_size & ~PAGE_CACHE_MASK; 1542 if (nr <= offset) 1543 break; 1544 } 1545 1546 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL); 1547 if (desc->error) { 1548 if (desc->error == -EINVAL) 1549 desc->error = 0; 1550 break; 1551 } 1552 1553 /* 1554 * We must evaluate after, since reads (unlike writes) 1555 * are called without i_mutex protection against truncate 1556 */ 1557 nr = PAGE_CACHE_SIZE; 1558 i_size = i_size_read(inode); 1559 end_index = i_size >> PAGE_CACHE_SHIFT; 1560 if (index == end_index) { 1561 nr = i_size & ~PAGE_CACHE_MASK; 1562 if (nr <= offset) { 1563 if (page) 1564 page_cache_release(page); 1565 break; 1566 } 1567 } 1568 nr -= offset; 1569 1570 if (page) { 1571 /* 1572 * If users can be writing to this page using arbitrary 1573 * virtual addresses, take care about potential aliasing 1574 * before reading the page on the kernel side. 1575 */ 1576 if (mapping_writably_mapped(mapping)) 1577 flush_dcache_page(page); 1578 /* 1579 * Mark the page accessed if we read the beginning. 1580 */ 1581 if (!offset) 1582 mark_page_accessed(page); 1583 } else { 1584 page = ZERO_PAGE(0); 1585 page_cache_get(page); 1586 } 1587 1588 /* 1589 * Ok, we have the page, and it's up-to-date, so 1590 * now we can copy it to user space... 1591 * 1592 * The actor routine returns how many bytes were actually used.. 1593 * NOTE! This may not be the same as how much of a user buffer 1594 * we filled up (we may be padding etc), so we can only update 1595 * "pos" here (the actor routine has to update the user buffer 1596 * pointers and the remaining count). 1597 */ 1598 ret = actor(desc, page, offset, nr); 1599 offset += ret; 1600 index += offset >> PAGE_CACHE_SHIFT; 1601 offset &= ~PAGE_CACHE_MASK; 1602 1603 page_cache_release(page); 1604 if (ret != nr || !desc->count) 1605 break; 1606 1607 cond_resched(); 1608 } 1609 1610 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1611 file_accessed(filp); 1612} 1613 1614static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) 1615{ 1616 read_descriptor_t desc; 1617 1618 if ((ssize_t) count < 0) 1619 return -EINVAL; 1620 if (!access_ok(VERIFY_WRITE, buf, count)) 1621 return -EFAULT; 1622 if (!count) 1623 return 0; 1624 1625 desc.written = 0; 1626 desc.count = count; 1627 desc.arg.buf = buf; 1628 desc.error = 0; 1629 1630 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1631 if (desc.written) 1632 return desc.written; 1633 return desc.error; 1634} 1635 1636static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos, 1637 size_t count, read_actor_t actor, void *target) 1638{ 1639 read_descriptor_t desc; 1640 1641 if (!count) 1642 return 0; 1643 1644 desc.written = 0; 1645 desc.count = count; 1646 desc.arg.data = target; 1647 desc.error = 0; 1648 1649 do_shmem_file_read(in_file, ppos, &desc, actor); 1650 if (desc.written) 1651 return desc.written; 1652 return desc.error; 1653} 1654 1655static int shmem_statfs(struct super_block *sb, struct kstatfs *buf) 1656{ 1657 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1658 1659 buf->f_type = TMPFS_MAGIC; 1660 buf->f_bsize = PAGE_CACHE_SIZE; 1661 buf->f_namelen = NAME_MAX; 1662 spin_lock(&sbinfo->stat_lock); 1663 if (sbinfo->max_blocks) { 1664 buf->f_blocks = sbinfo->max_blocks; 1665 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; 1666 } 1667 if (sbinfo->max_inodes) { 1668 buf->f_files = sbinfo->max_inodes; 1669 buf->f_ffree = sbinfo->free_inodes; 1670 } 1671 /* else leave those fields 0 like simple_statfs */ 1672 spin_unlock(&sbinfo->stat_lock); 1673 return 0; 1674} 1675 1676/* 1677 * File creation. Allocate an inode, and we're done.. 1678 */ 1679static int 1680shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1681{ 1682 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); 1683 int error = -ENOSPC; 1684 1685 if (inode) { 1686 error = security_inode_init_security(inode, dir, NULL, NULL, 1687 NULL); 1688 if (error) { 1689 if (error != -EOPNOTSUPP) { 1690 iput(inode); 1691 return error; 1692 } 1693 error = 0; 1694 } 1695 if (dir->i_mode & S_ISGID) { 1696 inode->i_gid = dir->i_gid; 1697 if (S_ISDIR(mode)) 1698 inode->i_mode |= S_ISGID; 1699 } 1700 dir->i_size += BOGO_DIRENT_SIZE; 1701 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1702 d_instantiate(dentry, inode); 1703 dget(dentry); /* Extra count - pin the dentry in core */ 1704 } 1705 return error; 1706} 1707 1708static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1709{ 1710 int error; 1711 1712 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1713 return error; 1714 dir->i_nlink++; 1715 return 0; 1716} 1717 1718static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, 1719 struct nameidata *nd) 1720{ 1721 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1722} 1723 1724/* 1725 * Link a file.. 1726 */ 1727static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1728{ 1729 struct inode *inode = old_dentry->d_inode; 1730 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1731 1732 /* 1733 * No ordinary (disk based) filesystem counts links as inodes; 1734 * but each new link needs a new dentry, pinning lowmem, and 1735 * tmpfs dentries cannot be pruned until they are unlinked. 1736 */ 1737 if (sbinfo->max_inodes) { 1738 spin_lock(&sbinfo->stat_lock); 1739 if (!sbinfo->free_inodes) { 1740 spin_unlock(&sbinfo->stat_lock); 1741 return -ENOSPC; 1742 } 1743 sbinfo->free_inodes--; 1744 spin_unlock(&sbinfo->stat_lock); 1745 } 1746 1747 dir->i_size += BOGO_DIRENT_SIZE; 1748 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1749 inode->i_nlink++; 1750 atomic_inc(&inode->i_count); /* New dentry reference */ 1751 dget(dentry); /* Extra pinning count for the created dentry */ 1752 d_instantiate(dentry, inode); 1753 return 0; 1754} 1755 1756static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1757{ 1758 struct inode *inode = dentry->d_inode; 1759 1760 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { 1761 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1762 if (sbinfo->max_inodes) { 1763 spin_lock(&sbinfo->stat_lock); 1764 sbinfo->free_inodes++; 1765 spin_unlock(&sbinfo->stat_lock); 1766 } 1767 } 1768 1769 dir->i_size -= BOGO_DIRENT_SIZE; 1770 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1771 inode->i_nlink--; 1772 dput(dentry); /* Undo the count from "create" - this does all the work */ 1773 return 0; 1774} 1775 1776static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1777{ 1778 if (!simple_empty(dentry)) 1779 return -ENOTEMPTY; 1780 1781 dir->i_nlink--; 1782 return shmem_unlink(dir, dentry); 1783} 1784 1785/* 1786 * The VFS layer already does all the dentry stuff for rename, 1787 * we just have to decrement the usage count for the target if 1788 * it exists so that the VFS layer correctly free's it when it 1789 * gets overwritten. 1790 */ 1791static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1792{ 1793 struct inode *inode = old_dentry->d_inode; 1794 int they_are_dirs = S_ISDIR(inode->i_mode); 1795 1796 if (!simple_empty(new_dentry)) 1797 return -ENOTEMPTY; 1798 1799 if (new_dentry->d_inode) { 1800 (void) shmem_unlink(new_dir, new_dentry); 1801 if (they_are_dirs) 1802 old_dir->i_nlink--; 1803 } else if (they_are_dirs) { 1804 old_dir->i_nlink--; 1805 new_dir->i_nlink++; 1806 } 1807 1808 old_dir->i_size -= BOGO_DIRENT_SIZE; 1809 new_dir->i_size += BOGO_DIRENT_SIZE; 1810 old_dir->i_ctime = old_dir->i_mtime = 1811 new_dir->i_ctime = new_dir->i_mtime = 1812 inode->i_ctime = CURRENT_TIME; 1813 return 0; 1814} 1815 1816static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1817{ 1818 int error; 1819 int len; 1820 struct inode *inode; 1821 struct page *page = NULL; 1822 char *kaddr; 1823 struct shmem_inode_info *info; 1824 1825 len = strlen(symname) + 1; 1826 if (len > PAGE_CACHE_SIZE) 1827 return -ENAMETOOLONG; 1828 1829 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); 1830 if (!inode) 1831 return -ENOSPC; 1832 1833 error = security_inode_init_security(inode, dir, NULL, NULL, 1834 NULL); 1835 if (error) { 1836 if (error != -EOPNOTSUPP) { 1837 iput(inode); 1838 return error; 1839 } 1840 error = 0; 1841 } 1842 1843 info = SHMEM_I(inode); 1844 inode->i_size = len-1; 1845 if (len <= (char *)inode - (char *)info) { 1846 /* do it inline */ 1847 memcpy(info, symname, len); 1848 inode->i_op = &shmem_symlink_inline_operations; 1849 } else { 1850 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 1851 if (error) { 1852 iput(inode); 1853 return error; 1854 } 1855 inode->i_op = &shmem_symlink_inode_operations; 1856 kaddr = kmap_atomic(page, KM_USER0); 1857 memcpy(kaddr, symname, len); 1858 kunmap_atomic(kaddr, KM_USER0); 1859 set_page_dirty(page); 1860 page_cache_release(page); 1861 } 1862 if (dir->i_mode & S_ISGID) 1863 inode->i_gid = dir->i_gid; 1864 dir->i_size += BOGO_DIRENT_SIZE; 1865 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1866 d_instantiate(dentry, inode); 1867 dget(dentry); 1868 return 0; 1869} 1870 1871static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 1872{ 1873 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); 1874 return NULL; 1875} 1876 1877static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 1878{ 1879 struct page *page = NULL; 1880 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 1881 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); 1882 return page; 1883} 1884 1885static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 1886{ 1887 if (!IS_ERR(nd_get_link(nd))) { 1888 struct page *page = cookie; 1889 kunmap(page); 1890 mark_page_accessed(page); 1891 page_cache_release(page); 1892 } 1893} 1894 1895static struct inode_operations shmem_symlink_inline_operations = { 1896 .readlink = generic_readlink, 1897 .follow_link = shmem_follow_link_inline, 1898}; 1899 1900static struct inode_operations shmem_symlink_inode_operations = { 1901 .truncate = shmem_truncate, 1902 .readlink = generic_readlink, 1903 .follow_link = shmem_follow_link, 1904 .put_link = shmem_put_link, 1905}; 1906 1907static int shmem_parse_options(char *options, int *mode, uid_t *uid, 1908 gid_t *gid, unsigned long *blocks, unsigned long *inodes, 1909 int *policy, nodemask_t *policy_nodes) 1910{ 1911 char *this_char, *value, *rest; 1912 1913 while (options != NULL) { 1914 this_char = options; 1915 for (;;) { 1916 /* 1917 * NUL-terminate this option: unfortunately, 1918 * mount options form a comma-separated list, 1919 * but mpol's nodelist may also contain commas. 1920 */ 1921 options = strchr(options, ','); 1922 if (options == NULL) 1923 break; 1924 options++; 1925 if (!isdigit(*options)) { 1926 options[-1] = '\0'; 1927 break; 1928 } 1929 } 1930 if (!*this_char) 1931 continue; 1932 if ((value = strchr(this_char,'=')) != NULL) { 1933 *value++ = 0; 1934 } else { 1935 printk(KERN_ERR 1936 "tmpfs: No value for mount option '%s'\n", 1937 this_char); 1938 return 1; 1939 } 1940 1941 if (!strcmp(this_char,"size")) { 1942 unsigned long long size; 1943 size = memparse(value,&rest); 1944 if (*rest == '%') { 1945 size <<= PAGE_SHIFT; 1946 size *= totalram_pages; 1947 do_div(size, 100); 1948 rest++; 1949 } 1950 if (*rest) 1951 goto bad_val; 1952 *blocks = size >> PAGE_CACHE_SHIFT; 1953 } else if (!strcmp(this_char,"nr_blocks")) { 1954 *blocks = memparse(value,&rest); 1955 if (*rest) 1956 goto bad_val; 1957 } else if (!strcmp(this_char,"nr_inodes")) { 1958 *inodes = memparse(value,&rest); 1959 if (*rest) 1960 goto bad_val; 1961 } else if (!strcmp(this_char,"mode")) { 1962 if (!mode) 1963 continue; 1964 *mode = simple_strtoul(value,&rest,8); 1965 if (*rest) 1966 goto bad_val; 1967 } else if (!strcmp(this_char,"uid")) { 1968 if (!uid) 1969 continue; 1970 *uid = simple_strtoul(value,&rest,0); 1971 if (*rest) 1972 goto bad_val; 1973 } else if (!strcmp(this_char,"gid")) { 1974 if (!gid) 1975 continue; 1976 *gid = simple_strtoul(value,&rest,0); 1977 if (*rest) 1978 goto bad_val; 1979 } else if (!strcmp(this_char,"mpol")) { 1980 if (shmem_parse_mpol(value,policy,policy_nodes)) 1981 goto bad_val; 1982 } else { 1983 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 1984 this_char); 1985 return 1; 1986 } 1987 } 1988 return 0; 1989 1990bad_val: 1991 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 1992 value, this_char); 1993 return 1; 1994 1995} 1996 1997static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 1998{ 1999 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2000 unsigned long max_blocks = sbinfo->max_blocks; 2001 unsigned long max_inodes = sbinfo->max_inodes; 2002 int policy = sbinfo->policy; 2003 nodemask_t policy_nodes = sbinfo->policy_nodes; 2004 unsigned long blocks; 2005 unsigned long inodes; 2006 int error = -EINVAL; 2007 2008 if (shmem_parse_options(data, NULL, NULL, NULL, &max_blocks, 2009 &max_inodes, &policy, &policy_nodes)) 2010 return error; 2011 2012 spin_lock(&sbinfo->stat_lock); 2013 blocks = sbinfo->max_blocks - sbinfo->free_blocks; 2014 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 2015 if (max_blocks < blocks) 2016 goto out; 2017 if (max_inodes < inodes) 2018 goto out; 2019 /* 2020 * Those tests also disallow limited->unlimited while any are in 2021 * use, so i_blocks will always be zero when max_blocks is zero; 2022 * but we must separately disallow unlimited->limited, because 2023 * in that case we have no record of how much is already in use. 2024 */ 2025 if (max_blocks && !sbinfo->max_blocks) 2026 goto out; 2027 if (max_inodes && !sbinfo->max_inodes) 2028 goto out; 2029 2030 error = 0; 2031 sbinfo->max_blocks = max_blocks; 2032 sbinfo->free_blocks = max_blocks - blocks; 2033 sbinfo->max_inodes = max_inodes; 2034 sbinfo->free_inodes = max_inodes - inodes; 2035 sbinfo->policy = policy; 2036 sbinfo->policy_nodes = policy_nodes; 2037out: 2038 spin_unlock(&sbinfo->stat_lock); 2039 return error; 2040} 2041#endif 2042 2043static void shmem_put_super(struct super_block *sb) 2044{ 2045 kfree(sb->s_fs_info); 2046 sb->s_fs_info = NULL; 2047} 2048 2049static int shmem_fill_super(struct super_block *sb, 2050 void *data, int silent) 2051{ 2052 struct inode *inode; 2053 struct dentry *root; 2054 int mode = S_IRWXUGO | S_ISVTX; 2055 uid_t uid = current->fsuid; 2056 gid_t gid = current->fsgid; 2057 int err = -ENOMEM; 2058 struct shmem_sb_info *sbinfo; 2059 unsigned long blocks = 0; 2060 unsigned long inodes = 0; 2061 int policy = MPOL_DEFAULT; 2062 nodemask_t policy_nodes = node_online_map; 2063 2064#ifdef CONFIG_TMPFS 2065 /* 2066 * Per default we only allow half of the physical ram per 2067 * tmpfs instance, limiting inodes to one per page of lowmem; 2068 * but the internal instance is left unlimited. 2069 */ 2070 if (!(sb->s_flags & MS_NOUSER)) { 2071 blocks = totalram_pages / 2; 2072 inodes = totalram_pages - totalhigh_pages; 2073 if (inodes > blocks) 2074 inodes = blocks; 2075 if (shmem_parse_options(data, &mode, &uid, &gid, &blocks, 2076 &inodes, &policy, &policy_nodes)) 2077 return -EINVAL; 2078 } 2079#else 2080 sb->s_flags |= MS_NOUSER; 2081#endif 2082 2083 /* Round up to L1_CACHE_BYTES to resist false sharing */ 2084 sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), 2085 L1_CACHE_BYTES), GFP_KERNEL); 2086 if (!sbinfo) 2087 return -ENOMEM; 2088 2089 spin_lock_init(&sbinfo->stat_lock); 2090 sbinfo->max_blocks = blocks; 2091 sbinfo->free_blocks = blocks; 2092 sbinfo->max_inodes = inodes; 2093 sbinfo->free_inodes = inodes; 2094 sbinfo->policy = policy; 2095 sbinfo->policy_nodes = policy_nodes; 2096 2097 sb->s_fs_info = sbinfo; 2098 sb->s_maxbytes = SHMEM_MAX_BYTES; 2099 sb->s_blocksize = PAGE_CACHE_SIZE; 2100 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 2101 sb->s_magic = TMPFS_MAGIC; 2102 sb->s_op = &shmem_ops; 2103 2104 inode = shmem_get_inode(sb, S_IFDIR | mode, 0); 2105 if (!inode) 2106 goto failed; 2107 inode->i_uid = uid; 2108 inode->i_gid = gid; 2109 root = d_alloc_root(inode); 2110 if (!root) 2111 goto failed_iput; 2112 sb->s_root = root; 2113 return 0; 2114 2115failed_iput: 2116 iput(inode); 2117failed: 2118 shmem_put_super(sb); 2119 return err; 2120} 2121 2122static struct kmem_cache *shmem_inode_cachep; 2123 2124static struct inode *shmem_alloc_inode(struct super_block *sb) 2125{ 2126 struct shmem_inode_info *p; 2127 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL); 2128 if (!p) 2129 return NULL; 2130 return &p->vfs_inode; 2131} 2132 2133static void shmem_destroy_inode(struct inode *inode) 2134{ 2135 if ((inode->i_mode & S_IFMT) == S_IFREG) { 2136 /* only struct inode is valid if it's an inline symlink */ 2137 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2138 } 2139 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2140} 2141 2142static void init_once(void *foo, struct kmem_cache *cachep, 2143 unsigned long flags) 2144{ 2145 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2146 2147 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 2148 SLAB_CTOR_CONSTRUCTOR) { 2149 inode_init_once(&p->vfs_inode); 2150 } 2151} 2152 2153static int init_inodecache(void) 2154{ 2155 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2156 sizeof(struct shmem_inode_info), 2157 0, 0, init_once, NULL); 2158 if (shmem_inode_cachep == NULL) 2159 return -ENOMEM; 2160 return 0; 2161} 2162 2163static void destroy_inodecache(void) 2164{ 2165 if (kmem_cache_destroy(shmem_inode_cachep)) 2166 printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n"); 2167} 2168 2169static struct address_space_operations shmem_aops = { 2170 .writepage = shmem_writepage, 2171 .set_page_dirty = __set_page_dirty_nobuffers, 2172#ifdef CONFIG_TMPFS 2173 .prepare_write = shmem_prepare_write, 2174 .commit_write = simple_commit_write, 2175#endif 2176}; 2177 2178static struct file_operations shmem_file_operations = { 2179 .mmap = shmem_mmap, 2180#ifdef CONFIG_TMPFS 2181 .llseek = generic_file_llseek, 2182 .read = shmem_file_read, 2183 .write = shmem_file_write, 2184 .fsync = simple_sync_file, 2185 .sendfile = shmem_file_sendfile, 2186#endif 2187}; 2188 2189static struct inode_operations shmem_inode_operations = { 2190 .truncate = shmem_truncate, 2191 .setattr = shmem_notify_change, 2192 .truncate_range = shmem_truncate_range, 2193}; 2194 2195static struct inode_operations shmem_dir_inode_operations = { 2196#ifdef CONFIG_TMPFS 2197 .create = shmem_create, 2198 .lookup = simple_lookup, 2199 .link = shmem_link, 2200 .unlink = shmem_unlink, 2201 .symlink = shmem_symlink, 2202 .mkdir = shmem_mkdir, 2203 .rmdir = shmem_rmdir, 2204 .mknod = shmem_mknod, 2205 .rename = shmem_rename, 2206#endif 2207}; 2208 2209static struct super_operations shmem_ops = { 2210 .alloc_inode = shmem_alloc_inode, 2211 .destroy_inode = shmem_destroy_inode, 2212#ifdef CONFIG_TMPFS 2213 .statfs = shmem_statfs, 2214 .remount_fs = shmem_remount_fs, 2215#endif 2216 .delete_inode = shmem_delete_inode, 2217 .drop_inode = generic_delete_inode, 2218 .put_super = shmem_put_super, 2219}; 2220 2221static struct vm_operations_struct shmem_vm_ops = { 2222 .nopage = shmem_nopage, 2223 .populate = shmem_populate, 2224#ifdef CONFIG_NUMA 2225 .set_policy = shmem_set_policy, 2226 .get_policy = shmem_get_policy, 2227#endif 2228}; 2229 2230 2231static struct super_block *shmem_get_sb(struct file_system_type *fs_type, 2232 int flags, const char *dev_name, void *data) 2233{ 2234 return get_sb_nodev(fs_type, flags, data, shmem_fill_super); 2235} 2236 2237static struct file_system_type tmpfs_fs_type = { 2238 .owner = THIS_MODULE, 2239 .name = "tmpfs", 2240 .get_sb = shmem_get_sb, 2241 .kill_sb = kill_litter_super, 2242}; 2243static struct vfsmount *shm_mnt; 2244 2245static int __init init_tmpfs(void) 2246{ 2247 int error; 2248 2249 error = init_inodecache(); 2250 if (error) 2251 goto out3; 2252 2253 error = register_filesystem(&tmpfs_fs_type); 2254 if (error) { 2255 printk(KERN_ERR "Could not register tmpfs\n"); 2256 goto out2; 2257 } 2258#ifdef CONFIG_TMPFS 2259 devfs_mk_dir("shm"); 2260#endif 2261 shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER, 2262 tmpfs_fs_type.name, NULL); 2263 if (IS_ERR(shm_mnt)) { 2264 error = PTR_ERR(shm_mnt); 2265 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2266 goto out1; 2267 } 2268 return 0; 2269 2270out1: 2271 unregister_filesystem(&tmpfs_fs_type); 2272out2: 2273 destroy_inodecache(); 2274out3: 2275 shm_mnt = ERR_PTR(error); 2276 return error; 2277} 2278module_init(init_tmpfs) 2279 2280/* 2281 * shmem_file_setup - get an unlinked file living in tmpfs 2282 * 2283 * @name: name for dentry (to be seen in /proc/<pid>/maps 2284 * @size: size to be set for the file 2285 * 2286 */ 2287struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) 2288{ 2289 int error; 2290 struct file *file; 2291 struct inode *inode; 2292 struct dentry *dentry, *root; 2293 struct qstr this; 2294 2295 if (IS_ERR(shm_mnt)) 2296 return (void *)shm_mnt; 2297 2298 if (size < 0 || size > SHMEM_MAX_BYTES) 2299 return ERR_PTR(-EINVAL); 2300 2301 if (shmem_acct_size(flags, size)) 2302 return ERR_PTR(-ENOMEM); 2303 2304 error = -ENOMEM; 2305 this.name = name; 2306 this.len = strlen(name); 2307 this.hash = 0; /* will go */ 2308 root = shm_mnt->mnt_root; 2309 dentry = d_alloc(root, &this); 2310 if (!dentry) 2311 goto put_memory; 2312 2313 error = -ENFILE; 2314 file = get_empty_filp(); 2315 if (!file) 2316 goto put_dentry; 2317 2318 error = -ENOSPC; 2319 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 2320 if (!inode) 2321 goto close_file; 2322 2323 SHMEM_I(inode)->flags = flags & VM_ACCOUNT; 2324 d_instantiate(dentry, inode); 2325 inode->i_size = size; 2326 inode->i_nlink = 0; /* It is unlinked */ 2327 file->f_vfsmnt = mntget(shm_mnt); 2328 file->f_dentry = dentry; 2329 file->f_mapping = inode->i_mapping; 2330 file->f_op = &shmem_file_operations; 2331 file->f_mode = FMODE_WRITE | FMODE_READ; 2332 return file; 2333 2334close_file: 2335 put_filp(file); 2336put_dentry: 2337 dput(dentry); 2338put_memory: 2339 shmem_unacct_size(flags, size); 2340 return ERR_PTR(error); 2341} 2342 2343/* 2344 * shmem_zero_setup - setup a shared anonymous mapping 2345 * 2346 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2347 */ 2348int shmem_zero_setup(struct vm_area_struct *vma) 2349{ 2350 struct file *file; 2351 loff_t size = vma->vm_end - vma->vm_start; 2352 2353 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2354 if (IS_ERR(file)) 2355 return PTR_ERR(file); 2356 2357 if (vma->vm_file) 2358 fput(vma->vm_file); 2359 vma->vm_file = file; 2360 vma->vm_ops = &shmem_vm_ops; 2361 return 0; 2362} 2363