shmem.c revision 6daa0e28627abf362138244a620a821a9027d816
1/* 2 * Resizable virtual memory filesystem for Linux. 3 * 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2005 Hugh Dickins. 10 * Copyright (C) 2002-2005 VERITAS Software Corporation. 11 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * 13 * Extended attribute support for tmpfs: 14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 16 * 17 * This file is released under the GPL. 18 */ 19 20/* 21 * This virtual memory filesystem is heavily based on the ramfs. It 22 * extends ramfs by the ability to use swap and honor resource limits 23 * which makes it a completely usable filesystem. 24 */ 25 26#include <linux/config.h> 27#include <linux/module.h> 28#include <linux/init.h> 29#include <linux/devfs_fs_kernel.h> 30#include <linux/fs.h> 31#include <linux/mm.h> 32#include <linux/mman.h> 33#include <linux/file.h> 34#include <linux/swap.h> 35#include <linux/pagemap.h> 36#include <linux/string.h> 37#include <linux/slab.h> 38#include <linux/backing-dev.h> 39#include <linux/shmem_fs.h> 40#include <linux/mount.h> 41#include <linux/writeback.h> 42#include <linux/vfs.h> 43#include <linux/blkdev.h> 44#include <linux/security.h> 45#include <linux/swapops.h> 46#include <linux/mempolicy.h> 47#include <linux/namei.h> 48#include <asm/uaccess.h> 49#include <asm/div64.h> 50#include <asm/pgtable.h> 51 52/* This magic number is used in glibc for posix shared memory */ 53#define TMPFS_MAGIC 0x01021994 54 55#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long)) 56#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE) 57#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) 58 59#define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1)) 60#define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT) 61 62#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) 63 64/* info->flags needs VM_flags to handle pagein/truncate races efficiently */ 65#define SHMEM_PAGEIN VM_READ 66#define SHMEM_TRUNCATE VM_WRITE 67 68/* Definition to limit shmem_truncate's steps between cond_rescheds */ 69#define LATENCY_LIMIT 64 70 71/* Pretend that each entry is of this size in directory's i_size */ 72#define BOGO_DIRENT_SIZE 20 73 74/* Keep swapped page count in private field of indirect struct page */ 75#define nr_swapped private 76 77/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */ 78enum sgp_type { 79 SGP_QUICK, /* don't try more than file page cache lookup */ 80 SGP_READ, /* don't exceed i_size, don't allocate page */ 81 SGP_CACHE, /* don't exceed i_size, may allocate page */ 82 SGP_WRITE, /* may exceed i_size, may allocate page */ 83}; 84 85static int shmem_getpage(struct inode *inode, unsigned long idx, 86 struct page **pagep, enum sgp_type sgp, int *type); 87 88static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) 89{ 90 /* 91 * The above definition of ENTRIES_PER_PAGE, and the use of 92 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: 93 * might be reconsidered if it ever diverges from PAGE_SIZE. 94 */ 95 return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT); 96} 97 98static inline void shmem_dir_free(struct page *page) 99{ 100 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT); 101} 102 103static struct page **shmem_dir_map(struct page *page) 104{ 105 return (struct page **)kmap_atomic(page, KM_USER0); 106} 107 108static inline void shmem_dir_unmap(struct page **dir) 109{ 110 kunmap_atomic(dir, KM_USER0); 111} 112 113static swp_entry_t *shmem_swp_map(struct page *page) 114{ 115 return (swp_entry_t *)kmap_atomic(page, KM_USER1); 116} 117 118static inline void shmem_swp_balance_unmap(void) 119{ 120 /* 121 * When passing a pointer to an i_direct entry, to code which 122 * also handles indirect entries and so will shmem_swp_unmap, 123 * we must arrange for the preempt count to remain in balance. 124 * What kmap_atomic of a lowmem page does depends on config 125 * and architecture, so pretend to kmap_atomic some lowmem page. 126 */ 127 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1); 128} 129 130static inline void shmem_swp_unmap(swp_entry_t *entry) 131{ 132 kunmap_atomic(entry, KM_USER1); 133} 134 135static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 136{ 137 return sb->s_fs_info; 138} 139 140/* 141 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 142 * for shared memory and for shared anonymous (/dev/zero) mappings 143 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 144 * consistent with the pre-accounting of private mappings ... 145 */ 146static inline int shmem_acct_size(unsigned long flags, loff_t size) 147{ 148 return (flags & VM_ACCOUNT)? 149 security_vm_enough_memory(VM_ACCT(size)): 0; 150} 151 152static inline void shmem_unacct_size(unsigned long flags, loff_t size) 153{ 154 if (flags & VM_ACCOUNT) 155 vm_unacct_memory(VM_ACCT(size)); 156} 157 158/* 159 * ... whereas tmpfs objects are accounted incrementally as 160 * pages are allocated, in order to allow huge sparse files. 161 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 162 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 163 */ 164static inline int shmem_acct_block(unsigned long flags) 165{ 166 return (flags & VM_ACCOUNT)? 167 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE)); 168} 169 170static inline void shmem_unacct_blocks(unsigned long flags, long pages) 171{ 172 if (!(flags & VM_ACCOUNT)) 173 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); 174} 175 176static struct super_operations shmem_ops; 177static struct address_space_operations shmem_aops; 178static struct file_operations shmem_file_operations; 179static struct inode_operations shmem_inode_operations; 180static struct inode_operations shmem_dir_inode_operations; 181static struct vm_operations_struct shmem_vm_ops; 182 183static struct backing_dev_info shmem_backing_dev_info __read_mostly = { 184 .ra_pages = 0, /* No readahead */ 185 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, 186 .unplug_io_fn = default_unplug_io_fn, 187}; 188 189static LIST_HEAD(shmem_swaplist); 190static DEFINE_SPINLOCK(shmem_swaplist_lock); 191 192static void shmem_free_blocks(struct inode *inode, long pages) 193{ 194 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 195 if (sbinfo->max_blocks) { 196 spin_lock(&sbinfo->stat_lock); 197 sbinfo->free_blocks += pages; 198 inode->i_blocks -= pages*BLOCKS_PER_PAGE; 199 spin_unlock(&sbinfo->stat_lock); 200 } 201} 202 203/* 204 * shmem_recalc_inode - recalculate the size of an inode 205 * 206 * @inode: inode to recalc 207 * 208 * We have to calculate the free blocks since the mm can drop 209 * undirtied hole pages behind our back. 210 * 211 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 212 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 213 * 214 * It has to be called with the spinlock held. 215 */ 216static void shmem_recalc_inode(struct inode *inode) 217{ 218 struct shmem_inode_info *info = SHMEM_I(inode); 219 long freed; 220 221 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 222 if (freed > 0) { 223 info->alloced -= freed; 224 shmem_unacct_blocks(info->flags, freed); 225 shmem_free_blocks(inode, freed); 226 } 227} 228 229/* 230 * shmem_swp_entry - find the swap vector position in the info structure 231 * 232 * @info: info structure for the inode 233 * @index: index of the page to find 234 * @page: optional page to add to the structure. Has to be preset to 235 * all zeros 236 * 237 * If there is no space allocated yet it will return NULL when 238 * page is NULL, else it will use the page for the needed block, 239 * setting it to NULL on return to indicate that it has been used. 240 * 241 * The swap vector is organized the following way: 242 * 243 * There are SHMEM_NR_DIRECT entries directly stored in the 244 * shmem_inode_info structure. So small files do not need an addional 245 * allocation. 246 * 247 * For pages with index > SHMEM_NR_DIRECT there is the pointer 248 * i_indirect which points to a page which holds in the first half 249 * doubly indirect blocks, in the second half triple indirect blocks: 250 * 251 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the 252 * following layout (for SHMEM_NR_DIRECT == 16): 253 * 254 * i_indirect -> dir --> 16-19 255 * | +-> 20-23 256 * | 257 * +-->dir2 --> 24-27 258 * | +-> 28-31 259 * | +-> 32-35 260 * | +-> 36-39 261 * | 262 * +-->dir3 --> 40-43 263 * +-> 44-47 264 * +-> 48-51 265 * +-> 52-55 266 */ 267static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page) 268{ 269 unsigned long offset; 270 struct page **dir; 271 struct page *subdir; 272 273 if (index < SHMEM_NR_DIRECT) { 274 shmem_swp_balance_unmap(); 275 return info->i_direct+index; 276 } 277 if (!info->i_indirect) { 278 if (page) { 279 info->i_indirect = *page; 280 *page = NULL; 281 } 282 return NULL; /* need another page */ 283 } 284 285 index -= SHMEM_NR_DIRECT; 286 offset = index % ENTRIES_PER_PAGE; 287 index /= ENTRIES_PER_PAGE; 288 dir = shmem_dir_map(info->i_indirect); 289 290 if (index >= ENTRIES_PER_PAGE/2) { 291 index -= ENTRIES_PER_PAGE/2; 292 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE; 293 index %= ENTRIES_PER_PAGE; 294 subdir = *dir; 295 if (!subdir) { 296 if (page) { 297 *dir = *page; 298 *page = NULL; 299 } 300 shmem_dir_unmap(dir); 301 return NULL; /* need another page */ 302 } 303 shmem_dir_unmap(dir); 304 dir = shmem_dir_map(subdir); 305 } 306 307 dir += index; 308 subdir = *dir; 309 if (!subdir) { 310 if (!page || !(subdir = *page)) { 311 shmem_dir_unmap(dir); 312 return NULL; /* need a page */ 313 } 314 *dir = subdir; 315 *page = NULL; 316 } 317 shmem_dir_unmap(dir); 318 return shmem_swp_map(subdir) + offset; 319} 320 321static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value) 322{ 323 long incdec = value? 1: -1; 324 325 entry->val = value; 326 info->swapped += incdec; 327 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) 328 kmap_atomic_to_page(entry)->nr_swapped += incdec; 329} 330 331/* 332 * shmem_swp_alloc - get the position of the swap entry for the page. 333 * If it does not exist allocate the entry. 334 * 335 * @info: info structure for the inode 336 * @index: index of the page to find 337 * @sgp: check and recheck i_size? skip allocation? 338 */ 339static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp) 340{ 341 struct inode *inode = &info->vfs_inode; 342 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 343 struct page *page = NULL; 344 swp_entry_t *entry; 345 346 if (sgp != SGP_WRITE && 347 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 348 return ERR_PTR(-EINVAL); 349 350 while (!(entry = shmem_swp_entry(info, index, &page))) { 351 if (sgp == SGP_READ) 352 return shmem_swp_map(ZERO_PAGE(0)); 353 /* 354 * Test free_blocks against 1 not 0, since we have 1 data 355 * page (and perhaps indirect index pages) yet to allocate: 356 * a waste to allocate index if we cannot allocate data. 357 */ 358 if (sbinfo->max_blocks) { 359 spin_lock(&sbinfo->stat_lock); 360 if (sbinfo->free_blocks <= 1) { 361 spin_unlock(&sbinfo->stat_lock); 362 return ERR_PTR(-ENOSPC); 363 } 364 sbinfo->free_blocks--; 365 inode->i_blocks += BLOCKS_PER_PAGE; 366 spin_unlock(&sbinfo->stat_lock); 367 } 368 369 spin_unlock(&info->lock); 370 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO); 371 if (page) { 372 page->nr_swapped = 0; 373 } 374 spin_lock(&info->lock); 375 376 if (!page) { 377 shmem_free_blocks(inode, 1); 378 return ERR_PTR(-ENOMEM); 379 } 380 if (sgp != SGP_WRITE && 381 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 382 entry = ERR_PTR(-EINVAL); 383 break; 384 } 385 if (info->next_index <= index) 386 info->next_index = index + 1; 387 } 388 if (page) { 389 /* another task gave its page, or truncated the file */ 390 shmem_free_blocks(inode, 1); 391 shmem_dir_free(page); 392 } 393 if (info->next_index <= index && !IS_ERR(entry)) 394 info->next_index = index + 1; 395 return entry; 396} 397 398/* 399 * shmem_free_swp - free some swap entries in a directory 400 * 401 * @dir: pointer to the directory 402 * @edir: pointer after last entry of the directory 403 */ 404static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir) 405{ 406 swp_entry_t *ptr; 407 int freed = 0; 408 409 for (ptr = dir; ptr < edir; ptr++) { 410 if (ptr->val) { 411 free_swap_and_cache(*ptr); 412 *ptr = (swp_entry_t){0}; 413 freed++; 414 } 415 } 416 return freed; 417} 418 419static int shmem_map_and_free_swp(struct page *subdir, 420 int offset, int limit, struct page ***dir) 421{ 422 swp_entry_t *ptr; 423 int freed = 0; 424 425 ptr = shmem_swp_map(subdir); 426 for (; offset < limit; offset += LATENCY_LIMIT) { 427 int size = limit - offset; 428 if (size > LATENCY_LIMIT) 429 size = LATENCY_LIMIT; 430 freed += shmem_free_swp(ptr+offset, ptr+offset+size); 431 if (need_resched()) { 432 shmem_swp_unmap(ptr); 433 if (*dir) { 434 shmem_dir_unmap(*dir); 435 *dir = NULL; 436 } 437 cond_resched(); 438 ptr = shmem_swp_map(subdir); 439 } 440 } 441 shmem_swp_unmap(ptr); 442 return freed; 443} 444 445static void shmem_free_pages(struct list_head *next) 446{ 447 struct page *page; 448 int freed = 0; 449 450 do { 451 page = container_of(next, struct page, lru); 452 next = next->next; 453 shmem_dir_free(page); 454 freed++; 455 if (freed >= LATENCY_LIMIT) { 456 cond_resched(); 457 freed = 0; 458 } 459 } while (next); 460} 461 462static void shmem_truncate(struct inode *inode) 463{ 464 struct shmem_inode_info *info = SHMEM_I(inode); 465 unsigned long idx; 466 unsigned long size; 467 unsigned long limit; 468 unsigned long stage; 469 unsigned long diroff; 470 struct page **dir; 471 struct page *topdir; 472 struct page *middir; 473 struct page *subdir; 474 swp_entry_t *ptr; 475 LIST_HEAD(pages_to_free); 476 long nr_pages_to_free = 0; 477 long nr_swaps_freed = 0; 478 int offset; 479 int freed; 480 481 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 482 idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 483 if (idx >= info->next_index) 484 return; 485 486 spin_lock(&info->lock); 487 info->flags |= SHMEM_TRUNCATE; 488 limit = info->next_index; 489 info->next_index = idx; 490 topdir = info->i_indirect; 491 if (topdir && idx <= SHMEM_NR_DIRECT) { 492 info->i_indirect = NULL; 493 nr_pages_to_free++; 494 list_add(&topdir->lru, &pages_to_free); 495 } 496 spin_unlock(&info->lock); 497 498 if (info->swapped && idx < SHMEM_NR_DIRECT) { 499 ptr = info->i_direct; 500 size = limit; 501 if (size > SHMEM_NR_DIRECT) 502 size = SHMEM_NR_DIRECT; 503 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size); 504 } 505 if (!topdir) 506 goto done2; 507 508 BUG_ON(limit <= SHMEM_NR_DIRECT); 509 limit -= SHMEM_NR_DIRECT; 510 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0; 511 offset = idx % ENTRIES_PER_PAGE; 512 idx -= offset; 513 514 dir = shmem_dir_map(topdir); 515 stage = ENTRIES_PER_PAGEPAGE/2; 516 if (idx < ENTRIES_PER_PAGEPAGE/2) { 517 middir = topdir; 518 diroff = idx/ENTRIES_PER_PAGE; 519 } else { 520 dir += ENTRIES_PER_PAGE/2; 521 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE; 522 while (stage <= idx) 523 stage += ENTRIES_PER_PAGEPAGE; 524 middir = *dir; 525 if (*dir) { 526 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) % 527 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE; 528 if (!diroff && !offset) { 529 *dir = NULL; 530 nr_pages_to_free++; 531 list_add(&middir->lru, &pages_to_free); 532 } 533 shmem_dir_unmap(dir); 534 dir = shmem_dir_map(middir); 535 } else { 536 diroff = 0; 537 offset = 0; 538 idx = stage; 539 } 540 } 541 542 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) { 543 if (unlikely(idx == stage)) { 544 shmem_dir_unmap(dir); 545 dir = shmem_dir_map(topdir) + 546 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 547 while (!*dir) { 548 dir++; 549 idx += ENTRIES_PER_PAGEPAGE; 550 if (idx >= limit) 551 goto done1; 552 } 553 stage = idx + ENTRIES_PER_PAGEPAGE; 554 middir = *dir; 555 *dir = NULL; 556 nr_pages_to_free++; 557 list_add(&middir->lru, &pages_to_free); 558 shmem_dir_unmap(dir); 559 cond_resched(); 560 dir = shmem_dir_map(middir); 561 diroff = 0; 562 } 563 subdir = dir[diroff]; 564 if (subdir && subdir->nr_swapped) { 565 size = limit - idx; 566 if (size > ENTRIES_PER_PAGE) 567 size = ENTRIES_PER_PAGE; 568 freed = shmem_map_and_free_swp(subdir, 569 offset, size, &dir); 570 if (!dir) 571 dir = shmem_dir_map(middir); 572 nr_swaps_freed += freed; 573 if (offset) 574 spin_lock(&info->lock); 575 subdir->nr_swapped -= freed; 576 if (offset) 577 spin_unlock(&info->lock); 578 BUG_ON(subdir->nr_swapped > offset); 579 } 580 if (offset) 581 offset = 0; 582 else if (subdir) { 583 dir[diroff] = NULL; 584 nr_pages_to_free++; 585 list_add(&subdir->lru, &pages_to_free); 586 } 587 } 588done1: 589 shmem_dir_unmap(dir); 590done2: 591 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) { 592 /* 593 * Call truncate_inode_pages again: racing shmem_unuse_inode 594 * may have swizzled a page in from swap since vmtruncate or 595 * generic_delete_inode did it, before we lowered next_index. 596 * Also, though shmem_getpage checks i_size before adding to 597 * cache, no recheck after: so fix the narrow window there too. 598 */ 599 truncate_inode_pages(inode->i_mapping, inode->i_size); 600 } 601 602 spin_lock(&info->lock); 603 info->flags &= ~SHMEM_TRUNCATE; 604 info->swapped -= nr_swaps_freed; 605 if (nr_pages_to_free) 606 shmem_free_blocks(inode, nr_pages_to_free); 607 shmem_recalc_inode(inode); 608 spin_unlock(&info->lock); 609 610 /* 611 * Empty swap vector directory pages to be freed? 612 */ 613 if (!list_empty(&pages_to_free)) { 614 pages_to_free.prev->next = NULL; 615 shmem_free_pages(pages_to_free.next); 616 } 617} 618 619static int shmem_notify_change(struct dentry *dentry, struct iattr *attr) 620{ 621 struct inode *inode = dentry->d_inode; 622 struct page *page = NULL; 623 int error; 624 625 if (attr->ia_valid & ATTR_SIZE) { 626 if (attr->ia_size < inode->i_size) { 627 /* 628 * If truncating down to a partial page, then 629 * if that page is already allocated, hold it 630 * in memory until the truncation is over, so 631 * truncate_partial_page cannnot miss it were 632 * it assigned to swap. 633 */ 634 if (attr->ia_size & (PAGE_CACHE_SIZE-1)) { 635 (void) shmem_getpage(inode, 636 attr->ia_size>>PAGE_CACHE_SHIFT, 637 &page, SGP_READ, NULL); 638 } 639 /* 640 * Reset SHMEM_PAGEIN flag so that shmem_truncate can 641 * detect if any pages might have been added to cache 642 * after truncate_inode_pages. But we needn't bother 643 * if it's being fully truncated to zero-length: the 644 * nrpages check is efficient enough in that case. 645 */ 646 if (attr->ia_size) { 647 struct shmem_inode_info *info = SHMEM_I(inode); 648 spin_lock(&info->lock); 649 info->flags &= ~SHMEM_PAGEIN; 650 spin_unlock(&info->lock); 651 } 652 } 653 } 654 655 error = inode_change_ok(inode, attr); 656 if (!error) 657 error = inode_setattr(inode, attr); 658 if (page) 659 page_cache_release(page); 660 return error; 661} 662 663static void shmem_delete_inode(struct inode *inode) 664{ 665 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 666 struct shmem_inode_info *info = SHMEM_I(inode); 667 668 if (inode->i_op->truncate == shmem_truncate) { 669 truncate_inode_pages(inode->i_mapping, 0); 670 shmem_unacct_size(info->flags, inode->i_size); 671 inode->i_size = 0; 672 shmem_truncate(inode); 673 if (!list_empty(&info->swaplist)) { 674 spin_lock(&shmem_swaplist_lock); 675 list_del_init(&info->swaplist); 676 spin_unlock(&shmem_swaplist_lock); 677 } 678 } 679 BUG_ON(inode->i_blocks); 680 if (sbinfo->max_inodes) { 681 spin_lock(&sbinfo->stat_lock); 682 sbinfo->free_inodes++; 683 spin_unlock(&sbinfo->stat_lock); 684 } 685 clear_inode(inode); 686} 687 688static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir) 689{ 690 swp_entry_t *ptr; 691 692 for (ptr = dir; ptr < edir; ptr++) { 693 if (ptr->val == entry.val) 694 return ptr - dir; 695 } 696 return -1; 697} 698 699static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page) 700{ 701 struct inode *inode; 702 unsigned long idx; 703 unsigned long size; 704 unsigned long limit; 705 unsigned long stage; 706 struct page **dir; 707 struct page *subdir; 708 swp_entry_t *ptr; 709 int offset; 710 711 idx = 0; 712 ptr = info->i_direct; 713 spin_lock(&info->lock); 714 limit = info->next_index; 715 size = limit; 716 if (size > SHMEM_NR_DIRECT) 717 size = SHMEM_NR_DIRECT; 718 offset = shmem_find_swp(entry, ptr, ptr+size); 719 if (offset >= 0) { 720 shmem_swp_balance_unmap(); 721 goto found; 722 } 723 if (!info->i_indirect) 724 goto lost2; 725 726 dir = shmem_dir_map(info->i_indirect); 727 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2; 728 729 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) { 730 if (unlikely(idx == stage)) { 731 shmem_dir_unmap(dir-1); 732 dir = shmem_dir_map(info->i_indirect) + 733 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE; 734 while (!*dir) { 735 dir++; 736 idx += ENTRIES_PER_PAGEPAGE; 737 if (idx >= limit) 738 goto lost1; 739 } 740 stage = idx + ENTRIES_PER_PAGEPAGE; 741 subdir = *dir; 742 shmem_dir_unmap(dir); 743 dir = shmem_dir_map(subdir); 744 } 745 subdir = *dir; 746 if (subdir && subdir->nr_swapped) { 747 ptr = shmem_swp_map(subdir); 748 size = limit - idx; 749 if (size > ENTRIES_PER_PAGE) 750 size = ENTRIES_PER_PAGE; 751 offset = shmem_find_swp(entry, ptr, ptr+size); 752 if (offset >= 0) { 753 shmem_dir_unmap(dir); 754 goto found; 755 } 756 shmem_swp_unmap(ptr); 757 } 758 } 759lost1: 760 shmem_dir_unmap(dir-1); 761lost2: 762 spin_unlock(&info->lock); 763 return 0; 764found: 765 idx += offset; 766 inode = &info->vfs_inode; 767 if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) { 768 info->flags |= SHMEM_PAGEIN; 769 shmem_swp_set(info, ptr + offset, 0); 770 } 771 shmem_swp_unmap(ptr); 772 spin_unlock(&info->lock); 773 /* 774 * Decrement swap count even when the entry is left behind: 775 * try_to_unuse will skip over mms, then reincrement count. 776 */ 777 swap_free(entry); 778 return 1; 779} 780 781/* 782 * shmem_unuse() search for an eventually swapped out shmem page. 783 */ 784int shmem_unuse(swp_entry_t entry, struct page *page) 785{ 786 struct list_head *p, *next; 787 struct shmem_inode_info *info; 788 int found = 0; 789 790 spin_lock(&shmem_swaplist_lock); 791 list_for_each_safe(p, next, &shmem_swaplist) { 792 info = list_entry(p, struct shmem_inode_info, swaplist); 793 if (!info->swapped) 794 list_del_init(&info->swaplist); 795 else if (shmem_unuse_inode(info, entry, page)) { 796 /* move head to start search for next from here */ 797 list_move_tail(&shmem_swaplist, &info->swaplist); 798 found = 1; 799 break; 800 } 801 } 802 spin_unlock(&shmem_swaplist_lock); 803 return found; 804} 805 806/* 807 * Move the page from the page cache to the swap cache. 808 */ 809static int shmem_writepage(struct page *page, struct writeback_control *wbc) 810{ 811 struct shmem_inode_info *info; 812 swp_entry_t *entry, swap; 813 struct address_space *mapping; 814 unsigned long index; 815 struct inode *inode; 816 817 BUG_ON(!PageLocked(page)); 818 BUG_ON(page_mapped(page)); 819 820 mapping = page->mapping; 821 index = page->index; 822 inode = mapping->host; 823 info = SHMEM_I(inode); 824 if (info->flags & VM_LOCKED) 825 goto redirty; 826 swap = get_swap_page(); 827 if (!swap.val) 828 goto redirty; 829 830 spin_lock(&info->lock); 831 shmem_recalc_inode(inode); 832 if (index >= info->next_index) { 833 BUG_ON(!(info->flags & SHMEM_TRUNCATE)); 834 goto unlock; 835 } 836 entry = shmem_swp_entry(info, index, NULL); 837 BUG_ON(!entry); 838 BUG_ON(entry->val); 839 840 if (move_to_swap_cache(page, swap) == 0) { 841 shmem_swp_set(info, entry, swap.val); 842 shmem_swp_unmap(entry); 843 spin_unlock(&info->lock); 844 if (list_empty(&info->swaplist)) { 845 spin_lock(&shmem_swaplist_lock); 846 /* move instead of add in case we're racing */ 847 list_move_tail(&info->swaplist, &shmem_swaplist); 848 spin_unlock(&shmem_swaplist_lock); 849 } 850 unlock_page(page); 851 return 0; 852 } 853 854 shmem_swp_unmap(entry); 855unlock: 856 spin_unlock(&info->lock); 857 swap_free(swap); 858redirty: 859 set_page_dirty(page); 860 return WRITEPAGE_ACTIVATE; /* Return with the page locked */ 861} 862 863#ifdef CONFIG_NUMA 864static struct page *shmem_swapin_async(struct shared_policy *p, 865 swp_entry_t entry, unsigned long idx) 866{ 867 struct page *page; 868 struct vm_area_struct pvma; 869 870 /* Create a pseudo vma that just contains the policy */ 871 memset(&pvma, 0, sizeof(struct vm_area_struct)); 872 pvma.vm_end = PAGE_SIZE; 873 pvma.vm_pgoff = idx; 874 pvma.vm_policy = mpol_shared_policy_lookup(p, idx); 875 page = read_swap_cache_async(entry, &pvma, 0); 876 mpol_free(pvma.vm_policy); 877 return page; 878} 879 880struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry, 881 unsigned long idx) 882{ 883 struct shared_policy *p = &info->policy; 884 int i, num; 885 struct page *page; 886 unsigned long offset; 887 888 num = valid_swaphandles(entry, &offset); 889 for (i = 0; i < num; offset++, i++) { 890 page = shmem_swapin_async(p, 891 swp_entry(swp_type(entry), offset), idx); 892 if (!page) 893 break; 894 page_cache_release(page); 895 } 896 lru_add_drain(); /* Push any new pages onto the LRU now */ 897 return shmem_swapin_async(p, entry, idx); 898} 899 900static struct page * 901shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, 902 unsigned long idx) 903{ 904 struct vm_area_struct pvma; 905 struct page *page; 906 907 memset(&pvma, 0, sizeof(struct vm_area_struct)); 908 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx); 909 pvma.vm_pgoff = idx; 910 pvma.vm_end = PAGE_SIZE; 911 page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0); 912 mpol_free(pvma.vm_policy); 913 return page; 914} 915#else 916static inline struct page * 917shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) 918{ 919 swapin_readahead(entry, 0, NULL); 920 return read_swap_cache_async(entry, NULL, 0); 921} 922 923static inline struct page * 924shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx) 925{ 926 return alloc_page(gfp | __GFP_ZERO); 927} 928#endif 929 930/* 931 * shmem_getpage - either get the page from swap or allocate a new one 932 * 933 * If we allocate a new one we do not mark it dirty. That's up to the 934 * vm. If we swap it in we mark it dirty since we also free the swap 935 * entry since a page cannot live in both the swap and page cache 936 */ 937static int shmem_getpage(struct inode *inode, unsigned long idx, 938 struct page **pagep, enum sgp_type sgp, int *type) 939{ 940 struct address_space *mapping = inode->i_mapping; 941 struct shmem_inode_info *info = SHMEM_I(inode); 942 struct shmem_sb_info *sbinfo; 943 struct page *filepage = *pagep; 944 struct page *swappage; 945 swp_entry_t *entry; 946 swp_entry_t swap; 947 int error; 948 949 if (idx >= SHMEM_MAX_INDEX) 950 return -EFBIG; 951 /* 952 * Normally, filepage is NULL on entry, and either found 953 * uptodate immediately, or allocated and zeroed, or read 954 * in under swappage, which is then assigned to filepage. 955 * But shmem_prepare_write passes in a locked filepage, 956 * which may be found not uptodate by other callers too, 957 * and may need to be copied from the swappage read in. 958 */ 959repeat: 960 if (!filepage) 961 filepage = find_lock_page(mapping, idx); 962 if (filepage && PageUptodate(filepage)) 963 goto done; 964 error = 0; 965 if (sgp == SGP_QUICK) 966 goto failed; 967 968 spin_lock(&info->lock); 969 shmem_recalc_inode(inode); 970 entry = shmem_swp_alloc(info, idx, sgp); 971 if (IS_ERR(entry)) { 972 spin_unlock(&info->lock); 973 error = PTR_ERR(entry); 974 goto failed; 975 } 976 swap = *entry; 977 978 if (swap.val) { 979 /* Look it up and read it in.. */ 980 swappage = lookup_swap_cache(swap); 981 if (!swappage) { 982 shmem_swp_unmap(entry); 983 spin_unlock(&info->lock); 984 /* here we actually do the io */ 985 if (type && *type == VM_FAULT_MINOR) { 986 inc_page_state(pgmajfault); 987 *type = VM_FAULT_MAJOR; 988 } 989 swappage = shmem_swapin(info, swap, idx); 990 if (!swappage) { 991 spin_lock(&info->lock); 992 entry = shmem_swp_alloc(info, idx, sgp); 993 if (IS_ERR(entry)) 994 error = PTR_ERR(entry); 995 else { 996 if (entry->val == swap.val) 997 error = -ENOMEM; 998 shmem_swp_unmap(entry); 999 } 1000 spin_unlock(&info->lock); 1001 if (error) 1002 goto failed; 1003 goto repeat; 1004 } 1005 wait_on_page_locked(swappage); 1006 page_cache_release(swappage); 1007 goto repeat; 1008 } 1009 1010 /* We have to do this with page locked to prevent races */ 1011 if (TestSetPageLocked(swappage)) { 1012 shmem_swp_unmap(entry); 1013 spin_unlock(&info->lock); 1014 wait_on_page_locked(swappage); 1015 page_cache_release(swappage); 1016 goto repeat; 1017 } 1018 if (PageWriteback(swappage)) { 1019 shmem_swp_unmap(entry); 1020 spin_unlock(&info->lock); 1021 wait_on_page_writeback(swappage); 1022 unlock_page(swappage); 1023 page_cache_release(swappage); 1024 goto repeat; 1025 } 1026 if (!PageUptodate(swappage)) { 1027 shmem_swp_unmap(entry); 1028 spin_unlock(&info->lock); 1029 unlock_page(swappage); 1030 page_cache_release(swappage); 1031 error = -EIO; 1032 goto failed; 1033 } 1034 1035 if (filepage) { 1036 shmem_swp_set(info, entry, 0); 1037 shmem_swp_unmap(entry); 1038 delete_from_swap_cache(swappage); 1039 spin_unlock(&info->lock); 1040 copy_highpage(filepage, swappage); 1041 unlock_page(swappage); 1042 page_cache_release(swappage); 1043 flush_dcache_page(filepage); 1044 SetPageUptodate(filepage); 1045 set_page_dirty(filepage); 1046 swap_free(swap); 1047 } else if (!(error = move_from_swap_cache( 1048 swappage, idx, mapping))) { 1049 info->flags |= SHMEM_PAGEIN; 1050 shmem_swp_set(info, entry, 0); 1051 shmem_swp_unmap(entry); 1052 spin_unlock(&info->lock); 1053 filepage = swappage; 1054 swap_free(swap); 1055 } else { 1056 shmem_swp_unmap(entry); 1057 spin_unlock(&info->lock); 1058 unlock_page(swappage); 1059 page_cache_release(swappage); 1060 if (error == -ENOMEM) { 1061 /* let kswapd refresh zone for GFP_ATOMICs */ 1062 blk_congestion_wait(WRITE, HZ/50); 1063 } 1064 goto repeat; 1065 } 1066 } else if (sgp == SGP_READ && !filepage) { 1067 shmem_swp_unmap(entry); 1068 filepage = find_get_page(mapping, idx); 1069 if (filepage && 1070 (!PageUptodate(filepage) || TestSetPageLocked(filepage))) { 1071 spin_unlock(&info->lock); 1072 wait_on_page_locked(filepage); 1073 page_cache_release(filepage); 1074 filepage = NULL; 1075 goto repeat; 1076 } 1077 spin_unlock(&info->lock); 1078 } else { 1079 shmem_swp_unmap(entry); 1080 sbinfo = SHMEM_SB(inode->i_sb); 1081 if (sbinfo->max_blocks) { 1082 spin_lock(&sbinfo->stat_lock); 1083 if (sbinfo->free_blocks == 0 || 1084 shmem_acct_block(info->flags)) { 1085 spin_unlock(&sbinfo->stat_lock); 1086 spin_unlock(&info->lock); 1087 error = -ENOSPC; 1088 goto failed; 1089 } 1090 sbinfo->free_blocks--; 1091 inode->i_blocks += BLOCKS_PER_PAGE; 1092 spin_unlock(&sbinfo->stat_lock); 1093 } else if (shmem_acct_block(info->flags)) { 1094 spin_unlock(&info->lock); 1095 error = -ENOSPC; 1096 goto failed; 1097 } 1098 1099 if (!filepage) { 1100 spin_unlock(&info->lock); 1101 filepage = shmem_alloc_page(mapping_gfp_mask(mapping), 1102 info, 1103 idx); 1104 if (!filepage) { 1105 shmem_unacct_blocks(info->flags, 1); 1106 shmem_free_blocks(inode, 1); 1107 error = -ENOMEM; 1108 goto failed; 1109 } 1110 1111 spin_lock(&info->lock); 1112 entry = shmem_swp_alloc(info, idx, sgp); 1113 if (IS_ERR(entry)) 1114 error = PTR_ERR(entry); 1115 else { 1116 swap = *entry; 1117 shmem_swp_unmap(entry); 1118 } 1119 if (error || swap.val || 0 != add_to_page_cache_lru( 1120 filepage, mapping, idx, GFP_ATOMIC)) { 1121 spin_unlock(&info->lock); 1122 page_cache_release(filepage); 1123 shmem_unacct_blocks(info->flags, 1); 1124 shmem_free_blocks(inode, 1); 1125 filepage = NULL; 1126 if (error) 1127 goto failed; 1128 goto repeat; 1129 } 1130 info->flags |= SHMEM_PAGEIN; 1131 } 1132 1133 info->alloced++; 1134 spin_unlock(&info->lock); 1135 flush_dcache_page(filepage); 1136 SetPageUptodate(filepage); 1137 } 1138done: 1139 if (*pagep != filepage) { 1140 unlock_page(filepage); 1141 *pagep = filepage; 1142 } 1143 return 0; 1144 1145failed: 1146 if (*pagep != filepage) { 1147 unlock_page(filepage); 1148 page_cache_release(filepage); 1149 } 1150 return error; 1151} 1152 1153struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type) 1154{ 1155 struct inode *inode = vma->vm_file->f_dentry->d_inode; 1156 struct page *page = NULL; 1157 unsigned long idx; 1158 int error; 1159 1160 idx = (address - vma->vm_start) >> PAGE_SHIFT; 1161 idx += vma->vm_pgoff; 1162 idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; 1163 if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode)) 1164 return NOPAGE_SIGBUS; 1165 1166 error = shmem_getpage(inode, idx, &page, SGP_CACHE, type); 1167 if (error) 1168 return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS; 1169 1170 mark_page_accessed(page); 1171 return page; 1172} 1173 1174static int shmem_populate(struct vm_area_struct *vma, 1175 unsigned long addr, unsigned long len, 1176 pgprot_t prot, unsigned long pgoff, int nonblock) 1177{ 1178 struct inode *inode = vma->vm_file->f_dentry->d_inode; 1179 struct mm_struct *mm = vma->vm_mm; 1180 enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE; 1181 unsigned long size; 1182 1183 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; 1184 if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size) 1185 return -EINVAL; 1186 1187 while ((long) len > 0) { 1188 struct page *page = NULL; 1189 int err; 1190 /* 1191 * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE 1192 */ 1193 err = shmem_getpage(inode, pgoff, &page, sgp, NULL); 1194 if (err) 1195 return err; 1196 /* Page may still be null, but only if nonblock was set. */ 1197 if (page) { 1198 mark_page_accessed(page); 1199 err = install_page(mm, vma, addr, page, prot); 1200 if (err) { 1201 page_cache_release(page); 1202 return err; 1203 } 1204 } else { 1205 /* No page was found just because we can't read it in 1206 * now (being here implies nonblock != 0), but the page 1207 * may exist, so set the PTE to fault it in later. */ 1208 err = install_file_pte(mm, vma, addr, pgoff, prot); 1209 if (err) 1210 return err; 1211 } 1212 1213 len -= PAGE_SIZE; 1214 addr += PAGE_SIZE; 1215 pgoff++; 1216 } 1217 return 0; 1218} 1219 1220#ifdef CONFIG_NUMA 1221int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 1222{ 1223 struct inode *i = vma->vm_file->f_dentry->d_inode; 1224 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new); 1225} 1226 1227struct mempolicy * 1228shmem_get_policy(struct vm_area_struct *vma, unsigned long addr) 1229{ 1230 struct inode *i = vma->vm_file->f_dentry->d_inode; 1231 unsigned long idx; 1232 1233 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 1234 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx); 1235} 1236#endif 1237 1238int shmem_lock(struct file *file, int lock, struct user_struct *user) 1239{ 1240 struct inode *inode = file->f_dentry->d_inode; 1241 struct shmem_inode_info *info = SHMEM_I(inode); 1242 int retval = -ENOMEM; 1243 1244 spin_lock(&info->lock); 1245 if (lock && !(info->flags & VM_LOCKED)) { 1246 if (!user_shm_lock(inode->i_size, user)) 1247 goto out_nomem; 1248 info->flags |= VM_LOCKED; 1249 } 1250 if (!lock && (info->flags & VM_LOCKED) && user) { 1251 user_shm_unlock(inode->i_size, user); 1252 info->flags &= ~VM_LOCKED; 1253 } 1254 retval = 0; 1255out_nomem: 1256 spin_unlock(&info->lock); 1257 return retval; 1258} 1259 1260static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 1261{ 1262 file_accessed(file); 1263 vma->vm_ops = &shmem_vm_ops; 1264 return 0; 1265} 1266 1267static struct inode * 1268shmem_get_inode(struct super_block *sb, int mode, dev_t dev) 1269{ 1270 struct inode *inode; 1271 struct shmem_inode_info *info; 1272 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1273 1274 if (sbinfo->max_inodes) { 1275 spin_lock(&sbinfo->stat_lock); 1276 if (!sbinfo->free_inodes) { 1277 spin_unlock(&sbinfo->stat_lock); 1278 return NULL; 1279 } 1280 sbinfo->free_inodes--; 1281 spin_unlock(&sbinfo->stat_lock); 1282 } 1283 1284 inode = new_inode(sb); 1285 if (inode) { 1286 inode->i_mode = mode; 1287 inode->i_uid = current->fsuid; 1288 inode->i_gid = current->fsgid; 1289 inode->i_blksize = PAGE_CACHE_SIZE; 1290 inode->i_blocks = 0; 1291 inode->i_mapping->a_ops = &shmem_aops; 1292 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info; 1293 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1294 info = SHMEM_I(inode); 1295 memset(info, 0, (char *)inode - (char *)info); 1296 spin_lock_init(&info->lock); 1297 INIT_LIST_HEAD(&info->swaplist); 1298 1299 switch (mode & S_IFMT) { 1300 default: 1301 init_special_inode(inode, mode, dev); 1302 break; 1303 case S_IFREG: 1304 inode->i_op = &shmem_inode_operations; 1305 inode->i_fop = &shmem_file_operations; 1306 mpol_shared_policy_init(&info->policy); 1307 break; 1308 case S_IFDIR: 1309 inode->i_nlink++; 1310 /* Some things misbehave if size == 0 on a directory */ 1311 inode->i_size = 2 * BOGO_DIRENT_SIZE; 1312 inode->i_op = &shmem_dir_inode_operations; 1313 inode->i_fop = &simple_dir_operations; 1314 break; 1315 case S_IFLNK: 1316 /* 1317 * Must not load anything in the rbtree, 1318 * mpol_free_shared_policy will not be called. 1319 */ 1320 mpol_shared_policy_init(&info->policy); 1321 break; 1322 } 1323 } else if (sbinfo->max_inodes) { 1324 spin_lock(&sbinfo->stat_lock); 1325 sbinfo->free_inodes++; 1326 spin_unlock(&sbinfo->stat_lock); 1327 } 1328 return inode; 1329} 1330 1331#ifdef CONFIG_TMPFS 1332static struct inode_operations shmem_symlink_inode_operations; 1333static struct inode_operations shmem_symlink_inline_operations; 1334 1335/* 1336 * Normally tmpfs makes no use of shmem_prepare_write, but it 1337 * lets a tmpfs file be used read-write below the loop driver. 1338 */ 1339static int 1340shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) 1341{ 1342 struct inode *inode = page->mapping->host; 1343 return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL); 1344} 1345 1346static ssize_t 1347shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 1348{ 1349 struct inode *inode = file->f_dentry->d_inode; 1350 loff_t pos; 1351 unsigned long written; 1352 ssize_t err; 1353 1354 if ((ssize_t) count < 0) 1355 return -EINVAL; 1356 1357 if (!access_ok(VERIFY_READ, buf, count)) 1358 return -EFAULT; 1359 1360 down(&inode->i_sem); 1361 1362 pos = *ppos; 1363 written = 0; 1364 1365 err = generic_write_checks(file, &pos, &count, 0); 1366 if (err || !count) 1367 goto out; 1368 1369 err = remove_suid(file->f_dentry); 1370 if (err) 1371 goto out; 1372 1373 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 1374 1375 do { 1376 struct page *page = NULL; 1377 unsigned long bytes, index, offset; 1378 char *kaddr; 1379 int left; 1380 1381 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 1382 index = pos >> PAGE_CACHE_SHIFT; 1383 bytes = PAGE_CACHE_SIZE - offset; 1384 if (bytes > count) 1385 bytes = count; 1386 1387 /* 1388 * We don't hold page lock across copy from user - 1389 * what would it guard against? - so no deadlock here. 1390 * But it still may be a good idea to prefault below. 1391 */ 1392 1393 err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL); 1394 if (err) 1395 break; 1396 1397 left = bytes; 1398 if (PageHighMem(page)) { 1399 volatile unsigned char dummy; 1400 __get_user(dummy, buf); 1401 __get_user(dummy, buf + bytes - 1); 1402 1403 kaddr = kmap_atomic(page, KM_USER0); 1404 left = __copy_from_user_inatomic(kaddr + offset, 1405 buf, bytes); 1406 kunmap_atomic(kaddr, KM_USER0); 1407 } 1408 if (left) { 1409 kaddr = kmap(page); 1410 left = __copy_from_user(kaddr + offset, buf, bytes); 1411 kunmap(page); 1412 } 1413 1414 written += bytes; 1415 count -= bytes; 1416 pos += bytes; 1417 buf += bytes; 1418 if (pos > inode->i_size) 1419 i_size_write(inode, pos); 1420 1421 flush_dcache_page(page); 1422 set_page_dirty(page); 1423 mark_page_accessed(page); 1424 page_cache_release(page); 1425 1426 if (left) { 1427 pos -= left; 1428 written -= left; 1429 err = -EFAULT; 1430 break; 1431 } 1432 1433 /* 1434 * Our dirty pages are not counted in nr_dirty, 1435 * and we do not attempt to balance dirty pages. 1436 */ 1437 1438 cond_resched(); 1439 } while (count); 1440 1441 *ppos = pos; 1442 if (written) 1443 err = written; 1444out: 1445 up(&inode->i_sem); 1446 return err; 1447} 1448 1449static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor) 1450{ 1451 struct inode *inode = filp->f_dentry->d_inode; 1452 struct address_space *mapping = inode->i_mapping; 1453 unsigned long index, offset; 1454 1455 index = *ppos >> PAGE_CACHE_SHIFT; 1456 offset = *ppos & ~PAGE_CACHE_MASK; 1457 1458 for (;;) { 1459 struct page *page = NULL; 1460 unsigned long end_index, nr, ret; 1461 loff_t i_size = i_size_read(inode); 1462 1463 end_index = i_size >> PAGE_CACHE_SHIFT; 1464 if (index > end_index) 1465 break; 1466 if (index == end_index) { 1467 nr = i_size & ~PAGE_CACHE_MASK; 1468 if (nr <= offset) 1469 break; 1470 } 1471 1472 desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL); 1473 if (desc->error) { 1474 if (desc->error == -EINVAL) 1475 desc->error = 0; 1476 break; 1477 } 1478 1479 /* 1480 * We must evaluate after, since reads (unlike writes) 1481 * are called without i_sem protection against truncate 1482 */ 1483 nr = PAGE_CACHE_SIZE; 1484 i_size = i_size_read(inode); 1485 end_index = i_size >> PAGE_CACHE_SHIFT; 1486 if (index == end_index) { 1487 nr = i_size & ~PAGE_CACHE_MASK; 1488 if (nr <= offset) { 1489 if (page) 1490 page_cache_release(page); 1491 break; 1492 } 1493 } 1494 nr -= offset; 1495 1496 if (page) { 1497 /* 1498 * If users can be writing to this page using arbitrary 1499 * virtual addresses, take care about potential aliasing 1500 * before reading the page on the kernel side. 1501 */ 1502 if (mapping_writably_mapped(mapping)) 1503 flush_dcache_page(page); 1504 /* 1505 * Mark the page accessed if we read the beginning. 1506 */ 1507 if (!offset) 1508 mark_page_accessed(page); 1509 } else 1510 page = ZERO_PAGE(0); 1511 1512 /* 1513 * Ok, we have the page, and it's up-to-date, so 1514 * now we can copy it to user space... 1515 * 1516 * The actor routine returns how many bytes were actually used.. 1517 * NOTE! This may not be the same as how much of a user buffer 1518 * we filled up (we may be padding etc), so we can only update 1519 * "pos" here (the actor routine has to update the user buffer 1520 * pointers and the remaining count). 1521 */ 1522 ret = actor(desc, page, offset, nr); 1523 offset += ret; 1524 index += offset >> PAGE_CACHE_SHIFT; 1525 offset &= ~PAGE_CACHE_MASK; 1526 1527 page_cache_release(page); 1528 if (ret != nr || !desc->count) 1529 break; 1530 1531 cond_resched(); 1532 } 1533 1534 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; 1535 file_accessed(filp); 1536} 1537 1538static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) 1539{ 1540 read_descriptor_t desc; 1541 1542 if ((ssize_t) count < 0) 1543 return -EINVAL; 1544 if (!access_ok(VERIFY_WRITE, buf, count)) 1545 return -EFAULT; 1546 if (!count) 1547 return 0; 1548 1549 desc.written = 0; 1550 desc.count = count; 1551 desc.arg.buf = buf; 1552 desc.error = 0; 1553 1554 do_shmem_file_read(filp, ppos, &desc, file_read_actor); 1555 if (desc.written) 1556 return desc.written; 1557 return desc.error; 1558} 1559 1560static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos, 1561 size_t count, read_actor_t actor, void *target) 1562{ 1563 read_descriptor_t desc; 1564 1565 if (!count) 1566 return 0; 1567 1568 desc.written = 0; 1569 desc.count = count; 1570 desc.arg.data = target; 1571 desc.error = 0; 1572 1573 do_shmem_file_read(in_file, ppos, &desc, actor); 1574 if (desc.written) 1575 return desc.written; 1576 return desc.error; 1577} 1578 1579static int shmem_statfs(struct super_block *sb, struct kstatfs *buf) 1580{ 1581 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1582 1583 buf->f_type = TMPFS_MAGIC; 1584 buf->f_bsize = PAGE_CACHE_SIZE; 1585 buf->f_namelen = NAME_MAX; 1586 spin_lock(&sbinfo->stat_lock); 1587 if (sbinfo->max_blocks) { 1588 buf->f_blocks = sbinfo->max_blocks; 1589 buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; 1590 } 1591 if (sbinfo->max_inodes) { 1592 buf->f_files = sbinfo->max_inodes; 1593 buf->f_ffree = sbinfo->free_inodes; 1594 } 1595 /* else leave those fields 0 like simple_statfs */ 1596 spin_unlock(&sbinfo->stat_lock); 1597 return 0; 1598} 1599 1600/* 1601 * File creation. Allocate an inode, and we're done.. 1602 */ 1603static int 1604shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) 1605{ 1606 struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev); 1607 int error = -ENOSPC; 1608 1609 if (inode) { 1610 error = security_inode_init_security(inode, dir, NULL, NULL, 1611 NULL); 1612 if (error) { 1613 if (error != -EOPNOTSUPP) { 1614 iput(inode); 1615 return error; 1616 } 1617 error = 0; 1618 } 1619 if (dir->i_mode & S_ISGID) { 1620 inode->i_gid = dir->i_gid; 1621 if (S_ISDIR(mode)) 1622 inode->i_mode |= S_ISGID; 1623 } 1624 dir->i_size += BOGO_DIRENT_SIZE; 1625 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1626 d_instantiate(dentry, inode); 1627 dget(dentry); /* Extra count - pin the dentry in core */ 1628 } 1629 return error; 1630} 1631 1632static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode) 1633{ 1634 int error; 1635 1636 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 1637 return error; 1638 dir->i_nlink++; 1639 return 0; 1640} 1641 1642static int shmem_create(struct inode *dir, struct dentry *dentry, int mode, 1643 struct nameidata *nd) 1644{ 1645 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 1646} 1647 1648/* 1649 * Link a file.. 1650 */ 1651static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 1652{ 1653 struct inode *inode = old_dentry->d_inode; 1654 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1655 1656 /* 1657 * No ordinary (disk based) filesystem counts links as inodes; 1658 * but each new link needs a new dentry, pinning lowmem, and 1659 * tmpfs dentries cannot be pruned until they are unlinked. 1660 */ 1661 if (sbinfo->max_inodes) { 1662 spin_lock(&sbinfo->stat_lock); 1663 if (!sbinfo->free_inodes) { 1664 spin_unlock(&sbinfo->stat_lock); 1665 return -ENOSPC; 1666 } 1667 sbinfo->free_inodes--; 1668 spin_unlock(&sbinfo->stat_lock); 1669 } 1670 1671 dir->i_size += BOGO_DIRENT_SIZE; 1672 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1673 inode->i_nlink++; 1674 atomic_inc(&inode->i_count); /* New dentry reference */ 1675 dget(dentry); /* Extra pinning count for the created dentry */ 1676 d_instantiate(dentry, inode); 1677 return 0; 1678} 1679 1680static int shmem_unlink(struct inode *dir, struct dentry *dentry) 1681{ 1682 struct inode *inode = dentry->d_inode; 1683 1684 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) { 1685 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1686 if (sbinfo->max_inodes) { 1687 spin_lock(&sbinfo->stat_lock); 1688 sbinfo->free_inodes++; 1689 spin_unlock(&sbinfo->stat_lock); 1690 } 1691 } 1692 1693 dir->i_size -= BOGO_DIRENT_SIZE; 1694 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1695 inode->i_nlink--; 1696 dput(dentry); /* Undo the count from "create" - this does all the work */ 1697 return 0; 1698} 1699 1700static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 1701{ 1702 if (!simple_empty(dentry)) 1703 return -ENOTEMPTY; 1704 1705 dir->i_nlink--; 1706 return shmem_unlink(dir, dentry); 1707} 1708 1709/* 1710 * The VFS layer already does all the dentry stuff for rename, 1711 * we just have to decrement the usage count for the target if 1712 * it exists so that the VFS layer correctly free's it when it 1713 * gets overwritten. 1714 */ 1715static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 1716{ 1717 struct inode *inode = old_dentry->d_inode; 1718 int they_are_dirs = S_ISDIR(inode->i_mode); 1719 1720 if (!simple_empty(new_dentry)) 1721 return -ENOTEMPTY; 1722 1723 if (new_dentry->d_inode) { 1724 (void) shmem_unlink(new_dir, new_dentry); 1725 if (they_are_dirs) 1726 old_dir->i_nlink--; 1727 } else if (they_are_dirs) { 1728 old_dir->i_nlink--; 1729 new_dir->i_nlink++; 1730 } 1731 1732 old_dir->i_size -= BOGO_DIRENT_SIZE; 1733 new_dir->i_size += BOGO_DIRENT_SIZE; 1734 old_dir->i_ctime = old_dir->i_mtime = 1735 new_dir->i_ctime = new_dir->i_mtime = 1736 inode->i_ctime = CURRENT_TIME; 1737 return 0; 1738} 1739 1740static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 1741{ 1742 int error; 1743 int len; 1744 struct inode *inode; 1745 struct page *page = NULL; 1746 char *kaddr; 1747 struct shmem_inode_info *info; 1748 1749 len = strlen(symname) + 1; 1750 if (len > PAGE_CACHE_SIZE) 1751 return -ENAMETOOLONG; 1752 1753 inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0); 1754 if (!inode) 1755 return -ENOSPC; 1756 1757 error = security_inode_init_security(inode, dir, NULL, NULL, 1758 NULL); 1759 if (error) { 1760 if (error != -EOPNOTSUPP) { 1761 iput(inode); 1762 return error; 1763 } 1764 error = 0; 1765 } 1766 1767 info = SHMEM_I(inode); 1768 inode->i_size = len-1; 1769 if (len <= (char *)inode - (char *)info) { 1770 /* do it inline */ 1771 memcpy(info, symname, len); 1772 inode->i_op = &shmem_symlink_inline_operations; 1773 } else { 1774 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL); 1775 if (error) { 1776 iput(inode); 1777 return error; 1778 } 1779 inode->i_op = &shmem_symlink_inode_operations; 1780 kaddr = kmap_atomic(page, KM_USER0); 1781 memcpy(kaddr, symname, len); 1782 kunmap_atomic(kaddr, KM_USER0); 1783 set_page_dirty(page); 1784 page_cache_release(page); 1785 } 1786 if (dir->i_mode & S_ISGID) 1787 inode->i_gid = dir->i_gid; 1788 dir->i_size += BOGO_DIRENT_SIZE; 1789 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 1790 d_instantiate(dentry, inode); 1791 dget(dentry); 1792 return 0; 1793} 1794 1795static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) 1796{ 1797 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); 1798 return NULL; 1799} 1800 1801static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) 1802{ 1803 struct page *page = NULL; 1804 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); 1805 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); 1806 return page; 1807} 1808 1809static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 1810{ 1811 if (!IS_ERR(nd_get_link(nd))) { 1812 struct page *page = cookie; 1813 kunmap(page); 1814 mark_page_accessed(page); 1815 page_cache_release(page); 1816 } 1817} 1818 1819static struct inode_operations shmem_symlink_inline_operations = { 1820 .readlink = generic_readlink, 1821 .follow_link = shmem_follow_link_inline, 1822}; 1823 1824static struct inode_operations shmem_symlink_inode_operations = { 1825 .truncate = shmem_truncate, 1826 .readlink = generic_readlink, 1827 .follow_link = shmem_follow_link, 1828 .put_link = shmem_put_link, 1829}; 1830 1831static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes) 1832{ 1833 char *this_char, *value, *rest; 1834 1835 while ((this_char = strsep(&options, ",")) != NULL) { 1836 if (!*this_char) 1837 continue; 1838 if ((value = strchr(this_char,'=')) != NULL) { 1839 *value++ = 0; 1840 } else { 1841 printk(KERN_ERR 1842 "tmpfs: No value for mount option '%s'\n", 1843 this_char); 1844 return 1; 1845 } 1846 1847 if (!strcmp(this_char,"size")) { 1848 unsigned long long size; 1849 size = memparse(value,&rest); 1850 if (*rest == '%') { 1851 size <<= PAGE_SHIFT; 1852 size *= totalram_pages; 1853 do_div(size, 100); 1854 rest++; 1855 } 1856 if (*rest) 1857 goto bad_val; 1858 *blocks = size >> PAGE_CACHE_SHIFT; 1859 } else if (!strcmp(this_char,"nr_blocks")) { 1860 *blocks = memparse(value,&rest); 1861 if (*rest) 1862 goto bad_val; 1863 } else if (!strcmp(this_char,"nr_inodes")) { 1864 *inodes = memparse(value,&rest); 1865 if (*rest) 1866 goto bad_val; 1867 } else if (!strcmp(this_char,"mode")) { 1868 if (!mode) 1869 continue; 1870 *mode = simple_strtoul(value,&rest,8); 1871 if (*rest) 1872 goto bad_val; 1873 } else if (!strcmp(this_char,"uid")) { 1874 if (!uid) 1875 continue; 1876 *uid = simple_strtoul(value,&rest,0); 1877 if (*rest) 1878 goto bad_val; 1879 } else if (!strcmp(this_char,"gid")) { 1880 if (!gid) 1881 continue; 1882 *gid = simple_strtoul(value,&rest,0); 1883 if (*rest) 1884 goto bad_val; 1885 } else { 1886 printk(KERN_ERR "tmpfs: Bad mount option %s\n", 1887 this_char); 1888 return 1; 1889 } 1890 } 1891 return 0; 1892 1893bad_val: 1894 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n", 1895 value, this_char); 1896 return 1; 1897 1898} 1899 1900static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 1901{ 1902 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 1903 unsigned long max_blocks = sbinfo->max_blocks; 1904 unsigned long max_inodes = sbinfo->max_inodes; 1905 unsigned long blocks; 1906 unsigned long inodes; 1907 int error = -EINVAL; 1908 1909 if (shmem_parse_options(data, NULL, NULL, NULL, 1910 &max_blocks, &max_inodes)) 1911 return error; 1912 1913 spin_lock(&sbinfo->stat_lock); 1914 blocks = sbinfo->max_blocks - sbinfo->free_blocks; 1915 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 1916 if (max_blocks < blocks) 1917 goto out; 1918 if (max_inodes < inodes) 1919 goto out; 1920 /* 1921 * Those tests also disallow limited->unlimited while any are in 1922 * use, so i_blocks will always be zero when max_blocks is zero; 1923 * but we must separately disallow unlimited->limited, because 1924 * in that case we have no record of how much is already in use. 1925 */ 1926 if (max_blocks && !sbinfo->max_blocks) 1927 goto out; 1928 if (max_inodes && !sbinfo->max_inodes) 1929 goto out; 1930 1931 error = 0; 1932 sbinfo->max_blocks = max_blocks; 1933 sbinfo->free_blocks = max_blocks - blocks; 1934 sbinfo->max_inodes = max_inodes; 1935 sbinfo->free_inodes = max_inodes - inodes; 1936out: 1937 spin_unlock(&sbinfo->stat_lock); 1938 return error; 1939} 1940#endif 1941 1942static void shmem_put_super(struct super_block *sb) 1943{ 1944 kfree(sb->s_fs_info); 1945 sb->s_fs_info = NULL; 1946} 1947 1948static int shmem_fill_super(struct super_block *sb, 1949 void *data, int silent) 1950{ 1951 struct inode *inode; 1952 struct dentry *root; 1953 int mode = S_IRWXUGO | S_ISVTX; 1954 uid_t uid = current->fsuid; 1955 gid_t gid = current->fsgid; 1956 int err = -ENOMEM; 1957 struct shmem_sb_info *sbinfo; 1958 unsigned long blocks = 0; 1959 unsigned long inodes = 0; 1960 1961#ifdef CONFIG_TMPFS 1962 /* 1963 * Per default we only allow half of the physical ram per 1964 * tmpfs instance, limiting inodes to one per page of lowmem; 1965 * but the internal instance is left unlimited. 1966 */ 1967 if (!(sb->s_flags & MS_NOUSER)) { 1968 blocks = totalram_pages / 2; 1969 inodes = totalram_pages - totalhigh_pages; 1970 if (inodes > blocks) 1971 inodes = blocks; 1972 if (shmem_parse_options(data, &mode, &uid, &gid, 1973 &blocks, &inodes)) 1974 return -EINVAL; 1975 } 1976#else 1977 sb->s_flags |= MS_NOUSER; 1978#endif 1979 1980 /* Round up to L1_CACHE_BYTES to resist false sharing */ 1981 sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info), 1982 L1_CACHE_BYTES), GFP_KERNEL); 1983 if (!sbinfo) 1984 return -ENOMEM; 1985 1986 spin_lock_init(&sbinfo->stat_lock); 1987 sbinfo->max_blocks = blocks; 1988 sbinfo->free_blocks = blocks; 1989 sbinfo->max_inodes = inodes; 1990 sbinfo->free_inodes = inodes; 1991 1992 sb->s_fs_info = sbinfo; 1993 sb->s_maxbytes = SHMEM_MAX_BYTES; 1994 sb->s_blocksize = PAGE_CACHE_SIZE; 1995 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1996 sb->s_magic = TMPFS_MAGIC; 1997 sb->s_op = &shmem_ops; 1998 1999 inode = shmem_get_inode(sb, S_IFDIR | mode, 0); 2000 if (!inode) 2001 goto failed; 2002 inode->i_uid = uid; 2003 inode->i_gid = gid; 2004 root = d_alloc_root(inode); 2005 if (!root) 2006 goto failed_iput; 2007 sb->s_root = root; 2008 return 0; 2009 2010failed_iput: 2011 iput(inode); 2012failed: 2013 shmem_put_super(sb); 2014 return err; 2015} 2016 2017static kmem_cache_t *shmem_inode_cachep; 2018 2019static struct inode *shmem_alloc_inode(struct super_block *sb) 2020{ 2021 struct shmem_inode_info *p; 2022 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL); 2023 if (!p) 2024 return NULL; 2025 return &p->vfs_inode; 2026} 2027 2028static void shmem_destroy_inode(struct inode *inode) 2029{ 2030 if ((inode->i_mode & S_IFMT) == S_IFREG) { 2031 /* only struct inode is valid if it's an inline symlink */ 2032 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 2033 } 2034 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 2035} 2036 2037static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) 2038{ 2039 struct shmem_inode_info *p = (struct shmem_inode_info *) foo; 2040 2041 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 2042 SLAB_CTOR_CONSTRUCTOR) { 2043 inode_init_once(&p->vfs_inode); 2044 } 2045} 2046 2047static int init_inodecache(void) 2048{ 2049 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 2050 sizeof(struct shmem_inode_info), 2051 0, 0, init_once, NULL); 2052 if (shmem_inode_cachep == NULL) 2053 return -ENOMEM; 2054 return 0; 2055} 2056 2057static void destroy_inodecache(void) 2058{ 2059 if (kmem_cache_destroy(shmem_inode_cachep)) 2060 printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n"); 2061} 2062 2063static struct address_space_operations shmem_aops = { 2064 .writepage = shmem_writepage, 2065 .set_page_dirty = __set_page_dirty_nobuffers, 2066#ifdef CONFIG_TMPFS 2067 .prepare_write = shmem_prepare_write, 2068 .commit_write = simple_commit_write, 2069#endif 2070}; 2071 2072static struct file_operations shmem_file_operations = { 2073 .mmap = shmem_mmap, 2074#ifdef CONFIG_TMPFS 2075 .llseek = generic_file_llseek, 2076 .read = shmem_file_read, 2077 .write = shmem_file_write, 2078 .fsync = simple_sync_file, 2079 .sendfile = shmem_file_sendfile, 2080#endif 2081}; 2082 2083static struct inode_operations shmem_inode_operations = { 2084 .truncate = shmem_truncate, 2085 .setattr = shmem_notify_change, 2086}; 2087 2088static struct inode_operations shmem_dir_inode_operations = { 2089#ifdef CONFIG_TMPFS 2090 .create = shmem_create, 2091 .lookup = simple_lookup, 2092 .link = shmem_link, 2093 .unlink = shmem_unlink, 2094 .symlink = shmem_symlink, 2095 .mkdir = shmem_mkdir, 2096 .rmdir = shmem_rmdir, 2097 .mknod = shmem_mknod, 2098 .rename = shmem_rename, 2099#endif 2100}; 2101 2102static struct super_operations shmem_ops = { 2103 .alloc_inode = shmem_alloc_inode, 2104 .destroy_inode = shmem_destroy_inode, 2105#ifdef CONFIG_TMPFS 2106 .statfs = shmem_statfs, 2107 .remount_fs = shmem_remount_fs, 2108#endif 2109 .delete_inode = shmem_delete_inode, 2110 .drop_inode = generic_delete_inode, 2111 .put_super = shmem_put_super, 2112}; 2113 2114static struct vm_operations_struct shmem_vm_ops = { 2115 .nopage = shmem_nopage, 2116 .populate = shmem_populate, 2117#ifdef CONFIG_NUMA 2118 .set_policy = shmem_set_policy, 2119 .get_policy = shmem_get_policy, 2120#endif 2121}; 2122 2123 2124static struct super_block *shmem_get_sb(struct file_system_type *fs_type, 2125 int flags, const char *dev_name, void *data) 2126{ 2127 return get_sb_nodev(fs_type, flags, data, shmem_fill_super); 2128} 2129 2130static struct file_system_type tmpfs_fs_type = { 2131 .owner = THIS_MODULE, 2132 .name = "tmpfs", 2133 .get_sb = shmem_get_sb, 2134 .kill_sb = kill_litter_super, 2135}; 2136static struct vfsmount *shm_mnt; 2137 2138static int __init init_tmpfs(void) 2139{ 2140 int error; 2141 2142 error = init_inodecache(); 2143 if (error) 2144 goto out3; 2145 2146 error = register_filesystem(&tmpfs_fs_type); 2147 if (error) { 2148 printk(KERN_ERR "Could not register tmpfs\n"); 2149 goto out2; 2150 } 2151#ifdef CONFIG_TMPFS 2152 devfs_mk_dir("shm"); 2153#endif 2154 shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER, 2155 tmpfs_fs_type.name, NULL); 2156 if (IS_ERR(shm_mnt)) { 2157 error = PTR_ERR(shm_mnt); 2158 printk(KERN_ERR "Could not kern_mount tmpfs\n"); 2159 goto out1; 2160 } 2161 return 0; 2162 2163out1: 2164 unregister_filesystem(&tmpfs_fs_type); 2165out2: 2166 destroy_inodecache(); 2167out3: 2168 shm_mnt = ERR_PTR(error); 2169 return error; 2170} 2171module_init(init_tmpfs) 2172 2173/* 2174 * shmem_file_setup - get an unlinked file living in tmpfs 2175 * 2176 * @name: name for dentry (to be seen in /proc/<pid>/maps 2177 * @size: size to be set for the file 2178 * 2179 */ 2180struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) 2181{ 2182 int error; 2183 struct file *file; 2184 struct inode *inode; 2185 struct dentry *dentry, *root; 2186 struct qstr this; 2187 2188 if (IS_ERR(shm_mnt)) 2189 return (void *)shm_mnt; 2190 2191 if (size < 0 || size > SHMEM_MAX_BYTES) 2192 return ERR_PTR(-EINVAL); 2193 2194 if (shmem_acct_size(flags, size)) 2195 return ERR_PTR(-ENOMEM); 2196 2197 error = -ENOMEM; 2198 this.name = name; 2199 this.len = strlen(name); 2200 this.hash = 0; /* will go */ 2201 root = shm_mnt->mnt_root; 2202 dentry = d_alloc(root, &this); 2203 if (!dentry) 2204 goto put_memory; 2205 2206 error = -ENFILE; 2207 file = get_empty_filp(); 2208 if (!file) 2209 goto put_dentry; 2210 2211 error = -ENOSPC; 2212 inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0); 2213 if (!inode) 2214 goto close_file; 2215 2216 SHMEM_I(inode)->flags = flags & VM_ACCOUNT; 2217 d_instantiate(dentry, inode); 2218 inode->i_size = size; 2219 inode->i_nlink = 0; /* It is unlinked */ 2220 file->f_vfsmnt = mntget(shm_mnt); 2221 file->f_dentry = dentry; 2222 file->f_mapping = inode->i_mapping; 2223 file->f_op = &shmem_file_operations; 2224 file->f_mode = FMODE_WRITE | FMODE_READ; 2225 return file; 2226 2227close_file: 2228 put_filp(file); 2229put_dentry: 2230 dput(dentry); 2231put_memory: 2232 shmem_unacct_size(flags, size); 2233 return ERR_PTR(error); 2234} 2235 2236/* 2237 * shmem_zero_setup - setup a shared anonymous mapping 2238 * 2239 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 2240 */ 2241int shmem_zero_setup(struct vm_area_struct *vma) 2242{ 2243 struct file *file; 2244 loff_t size = vma->vm_end - vma->vm_start; 2245 2246 file = shmem_file_setup("dev/zero", size, vma->vm_flags); 2247 if (IS_ERR(file)) 2248 return PTR_ERR(file); 2249 2250 if (vma->vm_file) 2251 fput(vma->vm_file); 2252 vma->vm_file = file; 2253 vma->vm_ops = &shmem_vm_ops; 2254 return 0; 2255} 2256