ashmem.c revision 1d3f8f2da1c28709a3c494f3872b89c871906b2d
1/* mm/ashmem.c 2** 3** Anonymous Shared Memory Subsystem, ashmem 4** 5** Copyright (C) 2008 Google, Inc. 6** 7** Robert Love <rlove@google.com> 8** 9** This software is licensed under the terms of the GNU General Public 10** License version 2, as published by the Free Software Foundation, and 11** may be copied, distributed, and modified under those terms. 12** 13** This program is distributed in the hope that it will be useful, 14** but WITHOUT ANY WARRANTY; without even the implied warranty of 15** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16** GNU General Public License for more details. 17*/ 18 19#include <linux/module.h> 20#include <linux/file.h> 21#include <linux/fs.h> 22#include <linux/miscdevice.h> 23#include <linux/security.h> 24#include <linux/mm.h> 25#include <linux/mman.h> 26#include <linux/uaccess.h> 27#include <linux/personality.h> 28#include <linux/bitops.h> 29#include <linux/mutex.h> 30#include <linux/shmem_fs.h> 31#include "ashmem.h" 32 33#define ASHMEM_NAME_PREFIX "dev/ashmem/" 34#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) 35#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) 36 37/* 38 * ashmem_area - anonymous shared memory area 39 * Lifecycle: From our parent file's open() until its release() 40 * Locking: Protected by `ashmem_mutex' 41 * Big Note: Mappings do NOT pin this structure; it dies on close() 42 */ 43struct ashmem_area { 44 char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */ 45 struct list_head unpinned_list; /* list of all ashmem areas */ 46 struct file *file; /* the shmem-based backing file */ 47 size_t size; /* size of the mapping, in bytes */ 48 unsigned long prot_mask; /* allowed prot bits, as vm_flags */ 49}; 50 51/* 52 * ashmem_range - represents an interval of unpinned (evictable) pages 53 * Lifecycle: From unpin to pin 54 * Locking: Protected by `ashmem_mutex' 55 */ 56struct ashmem_range { 57 struct list_head lru; /* entry in LRU list */ 58 struct list_head unpinned; /* entry in its area's unpinned list */ 59 struct ashmem_area *asma; /* associated area */ 60 size_t pgstart; /* starting page, inclusive */ 61 size_t pgend; /* ending page, inclusive */ 62 unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */ 63}; 64 65/* LRU list of unpinned pages, protected by ashmem_mutex */ 66static LIST_HEAD(ashmem_lru_list); 67 68/* Count of pages on our LRU list, protected by ashmem_mutex */ 69static unsigned long lru_count; 70 71/* 72 * ashmem_mutex - protects the list of and each individual ashmem_area 73 * 74 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem 75 */ 76static DEFINE_MUTEX(ashmem_mutex); 77 78static struct kmem_cache *ashmem_area_cachep __read_mostly; 79static struct kmem_cache *ashmem_range_cachep __read_mostly; 80 81#define range_size(range) \ 82 ((range)->pgend - (range)->pgstart + 1) 83 84#define range_on_lru(range) \ 85 ((range)->purged == ASHMEM_NOT_PURGED) 86 87#define page_range_subsumes_range(range, start, end) \ 88 (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) 89 90#define page_range_subsumed_by_range(range, start, end) \ 91 (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) 92 93#define page_in_range(range, page) \ 94 (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) 95 96#define page_range_in_range(range, start, end) \ 97 (page_in_range(range, start) || page_in_range(range, end) || \ 98 page_range_subsumes_range(range, start, end)) 99 100#define range_before_page(range, page) \ 101 ((range)->pgend < (page)) 102 103#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) 104 105static inline void lru_add(struct ashmem_range *range) 106{ 107 list_add_tail(&range->lru, &ashmem_lru_list); 108 lru_count += range_size(range); 109} 110 111static inline void lru_del(struct ashmem_range *range) 112{ 113 list_del(&range->lru); 114 lru_count -= range_size(range); 115} 116 117/* 118 * range_alloc - allocate and initialize a new ashmem_range structure 119 * 120 * 'asma' - associated ashmem_area 121 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list 122 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) 123 * 'start' - starting page, inclusive 124 * 'end' - ending page, inclusive 125 * 126 * Caller must hold ashmem_mutex. 127 */ 128static int range_alloc(struct ashmem_area *asma, 129 struct ashmem_range *prev_range, unsigned int purged, 130 size_t start, size_t end) 131{ 132 struct ashmem_range *range; 133 134 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); 135 if (unlikely(!range)) 136 return -ENOMEM; 137 138 range->asma = asma; 139 range->pgstart = start; 140 range->pgend = end; 141 range->purged = purged; 142 143 list_add_tail(&range->unpinned, &prev_range->unpinned); 144 145 if (range_on_lru(range)) 146 lru_add(range); 147 148 return 0; 149} 150 151static void range_del(struct ashmem_range *range) 152{ 153 list_del(&range->unpinned); 154 if (range_on_lru(range)) 155 lru_del(range); 156 kmem_cache_free(ashmem_range_cachep, range); 157} 158 159/* 160 * range_shrink - shrinks a range 161 * 162 * Caller must hold ashmem_mutex. 163 */ 164static inline void range_shrink(struct ashmem_range *range, 165 size_t start, size_t end) 166{ 167 size_t pre = range_size(range); 168 169 range->pgstart = start; 170 range->pgend = end; 171 172 if (range_on_lru(range)) 173 lru_count -= pre - range_size(range); 174} 175 176static int ashmem_open(struct inode *inode, struct file *file) 177{ 178 struct ashmem_area *asma; 179 int ret; 180 181 ret = nonseekable_open(inode, file); 182 if (unlikely(ret)) 183 return ret; 184 185 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); 186 if (unlikely(!asma)) 187 return -ENOMEM; 188 189 INIT_LIST_HEAD(&asma->unpinned_list); 190 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); 191 asma->prot_mask = PROT_MASK; 192 file->private_data = asma; 193 194 return 0; 195} 196 197static int ashmem_release(struct inode *ignored, struct file *file) 198{ 199 struct ashmem_area *asma = file->private_data; 200 struct ashmem_range *range, *next; 201 202 mutex_lock(&ashmem_mutex); 203 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) 204 range_del(range); 205 mutex_unlock(&ashmem_mutex); 206 207 if (asma->file) 208 fput(asma->file); 209 kmem_cache_free(ashmem_area_cachep, asma); 210 211 return 0; 212} 213 214static ssize_t ashmem_read(struct file *file, char __user *buf, 215 size_t len, loff_t *pos) 216{ 217 struct ashmem_area *asma = file->private_data; 218 int ret = 0; 219 220 mutex_lock(&ashmem_mutex); 221 222 /* If size is not set, or set to 0, always return EOF. */ 223 if (asma->size == 0) { 224 goto out; 225 } 226 227 if (!asma->file) { 228 ret = -EBADF; 229 goto out; 230 } 231 232 ret = asma->file->f_op->read(asma->file, buf, len, pos); 233 234out: 235 mutex_unlock(&ashmem_mutex); 236 return ret; 237} 238 239static inline unsigned long 240calc_vm_may_flags(unsigned long prot) 241{ 242 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) | 243 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | 244 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); 245} 246 247static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) 248{ 249 struct ashmem_area *asma = file->private_data; 250 int ret = 0; 251 252 mutex_lock(&ashmem_mutex); 253 254 /* user needs to SET_SIZE before mapping */ 255 if (unlikely(!asma->size)) { 256 ret = -EINVAL; 257 goto out; 258 } 259 260 /* requested protection bits must match our allowed protection mask */ 261 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & 262 calc_vm_prot_bits(PROT_MASK))) { 263 ret = -EPERM; 264 goto out; 265 } 266 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); 267 268 if (!asma->file) { 269 char *name = ASHMEM_NAME_DEF; 270 struct file *vmfile; 271 272 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') 273 name = asma->name; 274 275 /* ... and allocate the backing shmem file */ 276 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); 277 if (unlikely(IS_ERR(vmfile))) { 278 ret = PTR_ERR(vmfile); 279 goto out; 280 } 281 asma->file = vmfile; 282 } 283 get_file(asma->file); 284 285 /* 286 * XXX - Reworked to use shmem_zero_setup() instead of 287 * shmem_set_file while we're in staging. -jstultz 288 */ 289 if (vma->vm_flags & VM_SHARED) { 290 ret = shmem_zero_setup(vma); 291 if (ret) { 292 fput(asma->file); 293 goto out; 294 } 295 } 296 297 if (vma->vm_file) 298 fput(vma->vm_file); 299 vma->vm_file = asma->file; 300 vma->vm_flags |= VM_CAN_NONLINEAR; 301 302out: 303 mutex_unlock(&ashmem_mutex); 304 return ret; 305} 306 307/* 308 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab 309 * 310 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how 311 * many objects (pages) we have in total. 312 * 313 * 'gfp_mask' is the mask of the allocation that got us into this mess. 314 * 315 * Return value is the number of objects (pages) remaining, or -1 if we cannot 316 * proceed without risk of deadlock (due to gfp_mask). 317 * 318 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial 319 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' 320 * pages freed. 321 */ 322static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) 323{ 324 struct ashmem_range *range, *next; 325 326 /* We might recurse into filesystem code, so bail out if necessary */ 327 if (nr_to_scan && !(gfp_mask & __GFP_FS)) 328 return -1; 329 if (!nr_to_scan) 330 return lru_count; 331 332 mutex_lock(&ashmem_mutex); 333 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { 334 struct inode *inode = range->asma->file->f_dentry->d_inode; 335 loff_t start = range->pgstart * PAGE_SIZE; 336 loff_t end = (range->pgend + 1) * PAGE_SIZE - 1; 337 338 vmtruncate_range(inode, start, end); 339 range->purged = ASHMEM_WAS_PURGED; 340 lru_del(range); 341 342 nr_to_scan -= range_size(range); 343 if (nr_to_scan <= 0) 344 break; 345 } 346 mutex_unlock(&ashmem_mutex); 347 348 return lru_count; 349} 350 351static struct shrinker ashmem_shrinker = { 352 .shrink = ashmem_shrink, 353 .seeks = DEFAULT_SEEKS * 4, 354}; 355 356static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) 357{ 358 int ret = 0; 359 360 mutex_lock(&ashmem_mutex); 361 362 /* the user can only remove, not add, protection bits */ 363 if (unlikely((asma->prot_mask & prot) != prot)) { 364 ret = -EINVAL; 365 goto out; 366 } 367 368 /* does the application expect PROT_READ to imply PROT_EXEC? */ 369 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 370 prot |= PROT_EXEC; 371 372 asma->prot_mask = prot; 373 374out: 375 mutex_unlock(&ashmem_mutex); 376 return ret; 377} 378 379static int set_name(struct ashmem_area *asma, void __user *name) 380{ 381 int ret = 0; 382 383 mutex_lock(&ashmem_mutex); 384 385 /* cannot change an existing mapping's name */ 386 if (unlikely(asma->file)) { 387 ret = -EINVAL; 388 goto out; 389 } 390 391 if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN, 392 name, ASHMEM_NAME_LEN))) 393 ret = -EFAULT; 394 asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; 395 396out: 397 mutex_unlock(&ashmem_mutex); 398 399 return ret; 400} 401 402static int get_name(struct ashmem_area *asma, void __user *name) 403{ 404 int ret = 0; 405 406 mutex_lock(&ashmem_mutex); 407 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { 408 size_t len; 409 410 /* 411 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes 412 * prevents us from revealing one user's stack to another. 413 */ 414 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; 415 if (unlikely(copy_to_user(name, 416 asma->name + ASHMEM_NAME_PREFIX_LEN, len))) 417 ret = -EFAULT; 418 } else { 419 if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF, 420 sizeof(ASHMEM_NAME_DEF)))) 421 ret = -EFAULT; 422 } 423 mutex_unlock(&ashmem_mutex); 424 425 return ret; 426} 427 428/* 429 * ashmem_pin - pin the given ashmem region, returning whether it was 430 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). 431 * 432 * Caller must hold ashmem_mutex. 433 */ 434static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 435{ 436 struct ashmem_range *range, *next; 437 int ret = ASHMEM_NOT_PURGED; 438 439 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 440 /* moved past last applicable page; we can short circuit */ 441 if (range_before_page(range, pgstart)) 442 break; 443 444 /* 445 * The user can ask us to pin pages that span multiple ranges, 446 * or to pin pages that aren't even unpinned, so this is messy. 447 * 448 * Four cases: 449 * 1. The requested range subsumes an existing range, so we 450 * just remove the entire matching range. 451 * 2. The requested range overlaps the start of an existing 452 * range, so we just update that range. 453 * 3. The requested range overlaps the end of an existing 454 * range, so we just update that range. 455 * 4. The requested range punches a hole in an existing range, 456 * so we have to update one side of the range and then 457 * create a new range for the other side. 458 */ 459 if (page_range_in_range(range, pgstart, pgend)) { 460 ret |= range->purged; 461 462 /* Case #1: Easy. Just nuke the whole thing. */ 463 if (page_range_subsumes_range(range, pgstart, pgend)) { 464 range_del(range); 465 continue; 466 } 467 468 /* Case #2: We overlap from the start, so adjust it */ 469 if (range->pgstart >= pgstart) { 470 range_shrink(range, pgend + 1, range->pgend); 471 continue; 472 } 473 474 /* Case #3: We overlap from the rear, so adjust it */ 475 if (range->pgend <= pgend) { 476 range_shrink(range, range->pgstart, pgstart-1); 477 continue; 478 } 479 480 /* 481 * Case #4: We eat a chunk out of the middle. A bit 482 * more complicated, we allocate a new range for the 483 * second half and adjust the first chunk's endpoint. 484 */ 485 range_alloc(asma, range, range->purged, 486 pgend + 1, range->pgend); 487 range_shrink(range, range->pgstart, pgstart - 1); 488 break; 489 } 490 } 491 492 return ret; 493} 494 495/* 496 * ashmem_unpin - unpin the given range of pages. Returns zero on success. 497 * 498 * Caller must hold ashmem_mutex. 499 */ 500static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) 501{ 502 struct ashmem_range *range, *next; 503 unsigned int purged = ASHMEM_NOT_PURGED; 504 505restart: 506 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { 507 /* short circuit: this is our insertion point */ 508 if (range_before_page(range, pgstart)) 509 break; 510 511 /* 512 * The user can ask us to unpin pages that are already entirely 513 * or partially pinned. We handle those two cases here. 514 */ 515 if (page_range_subsumed_by_range(range, pgstart, pgend)) 516 return 0; 517 if (page_range_in_range(range, pgstart, pgend)) { 518 pgstart = min_t(size_t, range->pgstart, pgstart), 519 pgend = max_t(size_t, range->pgend, pgend); 520 purged |= range->purged; 521 range_del(range); 522 goto restart; 523 } 524 } 525 526 return range_alloc(asma, range, purged, pgstart, pgend); 527} 528 529/* 530 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the 531 * given interval are unpinned and ASHMEM_IS_PINNED otherwise. 532 * 533 * Caller must hold ashmem_mutex. 534 */ 535static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, 536 size_t pgend) 537{ 538 struct ashmem_range *range; 539 int ret = ASHMEM_IS_PINNED; 540 541 list_for_each_entry(range, &asma->unpinned_list, unpinned) { 542 if (range_before_page(range, pgstart)) 543 break; 544 if (page_range_in_range(range, pgstart, pgend)) { 545 ret = ASHMEM_IS_UNPINNED; 546 break; 547 } 548 } 549 550 return ret; 551} 552 553static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, 554 void __user *p) 555{ 556 struct ashmem_pin pin; 557 size_t pgstart, pgend; 558 int ret = -EINVAL; 559 560 if (unlikely(!asma->file)) 561 return -EINVAL; 562 563 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) 564 return -EFAULT; 565 566 /* per custom, you can pass zero for len to mean "everything onward" */ 567 if (!pin.len) 568 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 569 570 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) 571 return -EINVAL; 572 573 if (unlikely(((__u32) -1) - pin.offset < pin.len)) 574 return -EINVAL; 575 576 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) 577 return -EINVAL; 578 579 pgstart = pin.offset / PAGE_SIZE; 580 pgend = pgstart + (pin.len / PAGE_SIZE) - 1; 581 582 mutex_lock(&ashmem_mutex); 583 584 switch (cmd) { 585 case ASHMEM_PIN: 586 ret = ashmem_pin(asma, pgstart, pgend); 587 break; 588 case ASHMEM_UNPIN: 589 ret = ashmem_unpin(asma, pgstart, pgend); 590 break; 591 case ASHMEM_GET_PIN_STATUS: 592 ret = ashmem_get_pin_status(asma, pgstart, pgend); 593 break; 594 } 595 596 mutex_unlock(&ashmem_mutex); 597 598 return ret; 599} 600 601static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 602{ 603 struct ashmem_area *asma = file->private_data; 604 long ret = -ENOTTY; 605 606 switch (cmd) { 607 case ASHMEM_SET_NAME: 608 ret = set_name(asma, (void __user *) arg); 609 break; 610 case ASHMEM_GET_NAME: 611 ret = get_name(asma, (void __user *) arg); 612 break; 613 case ASHMEM_SET_SIZE: 614 ret = -EINVAL; 615 if (!asma->file) { 616 ret = 0; 617 asma->size = (size_t) arg; 618 } 619 break; 620 case ASHMEM_GET_SIZE: 621 ret = asma->size; 622 break; 623 case ASHMEM_SET_PROT_MASK: 624 ret = set_prot_mask(asma, arg); 625 break; 626 case ASHMEM_GET_PROT_MASK: 627 ret = asma->prot_mask; 628 break; 629 case ASHMEM_PIN: 630 case ASHMEM_UNPIN: 631 case ASHMEM_GET_PIN_STATUS: 632 ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg); 633 break; 634 case ASHMEM_PURGE_ALL_CACHES: 635 ret = -EPERM; 636 if (capable(CAP_SYS_ADMIN)) { 637 ret = ashmem_shrink(&ashmem_shrinker, 0, GFP_KERNEL); 638 ashmem_shrink(&ashmem_shrinker, ret, GFP_KERNEL); 639 } 640 break; 641 } 642 643 return ret; 644} 645 646static struct file_operations ashmem_fops = { 647 .owner = THIS_MODULE, 648 .open = ashmem_open, 649 .release = ashmem_release, 650 .read = ashmem_read, 651 .mmap = ashmem_mmap, 652 .unlocked_ioctl = ashmem_ioctl, 653 .compat_ioctl = ashmem_ioctl, 654}; 655 656static struct miscdevice ashmem_misc = { 657 .minor = MISC_DYNAMIC_MINOR, 658 .name = "ashmem", 659 .fops = &ashmem_fops, 660}; 661 662static int __init ashmem_init(void) 663{ 664 int ret; 665 666 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", 667 sizeof(struct ashmem_area), 668 0, 0, NULL); 669 if (unlikely(!ashmem_area_cachep)) { 670 printk(KERN_ERR "ashmem: failed to create slab cache\n"); 671 return -ENOMEM; 672 } 673 674 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", 675 sizeof(struct ashmem_range), 676 0, 0, NULL); 677 if (unlikely(!ashmem_range_cachep)) { 678 printk(KERN_ERR "ashmem: failed to create slab cache\n"); 679 return -ENOMEM; 680 } 681 682 ret = misc_register(&ashmem_misc); 683 if (unlikely(ret)) { 684 printk(KERN_ERR "ashmem: failed to register misc device!\n"); 685 return ret; 686 } 687 688 register_shrinker(&ashmem_shrinker); 689 690 printk(KERN_INFO "ashmem: initialized\n"); 691 692 return 0; 693} 694 695static void __exit ashmem_exit(void) 696{ 697 int ret; 698 699 unregister_shrinker(&ashmem_shrinker); 700 701 ret = misc_deregister(&ashmem_misc); 702 if (unlikely(ret)) 703 printk(KERN_ERR "ashmem: failed to unregister misc device!\n"); 704 705 kmem_cache_destroy(ashmem_range_cachep); 706 kmem_cache_destroy(ashmem_area_cachep); 707 708 printk(KERN_INFO "ashmem: unloaded\n"); 709} 710 711module_init(ashmem_init); 712module_exit(ashmem_exit); 713 714MODULE_LICENSE("GPL"); 715