mmap.c revision b845f313d78e4e259ec449909e3bbadf77b53a6d
1/* 2 * mm/mmap.c 3 * 4 * Written by obz. 5 * 6 * Address space accounting code <alan@redhat.com> 7 */ 8 9#include <linux/slab.h> 10#include <linux/backing-dev.h> 11#include <linux/mm.h> 12#include <linux/shm.h> 13#include <linux/mman.h> 14#include <linux/pagemap.h> 15#include <linux/swap.h> 16#include <linux/syscalls.h> 17#include <linux/capability.h> 18#include <linux/init.h> 19#include <linux/file.h> 20#include <linux/fs.h> 21#include <linux/personality.h> 22#include <linux/security.h> 23#include <linux/hugetlb.h> 24#include <linux/profile.h> 25#include <linux/module.h> 26#include <linux/mount.h> 27#include <linux/mempolicy.h> 28#include <linux/rmap.h> 29 30#include <asm/uaccess.h> 31#include <asm/cacheflush.h> 32#include <asm/tlb.h> 33#include <asm/mmu_context.h> 34 35#ifndef arch_mmap_check 36#define arch_mmap_check(addr, len, flags) (0) 37#endif 38 39#ifndef arch_rebalance_pgtables 40#define arch_rebalance_pgtables(addr, len) (addr) 41#endif 42 43static void unmap_region(struct mm_struct *mm, 44 struct vm_area_struct *vma, struct vm_area_struct *prev, 45 unsigned long start, unsigned long end); 46 47/* 48 * WARNING: the debugging will use recursive algorithms so never enable this 49 * unless you know what you are doing. 50 */ 51#undef DEBUG_MM_RB 52 53/* description of effects of mapping type and prot in current implementation. 54 * this is due to the limited x86 page protection hardware. The expected 55 * behavior is in parens: 56 * 57 * map_type prot 58 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC 59 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes 60 * w: (no) no w: (no) no w: (yes) yes w: (no) no 61 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 62 * 63 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes 64 * w: (no) no w: (no) no w: (copy) copy w: (no) no 65 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes 66 * 67 */ 68pgprot_t protection_map[16] = { 69 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, 70 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 71}; 72 73pgprot_t vm_get_page_prot(unsigned long vm_flags) 74{ 75 return __pgprot(pgprot_val(protection_map[vm_flags & 76 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | 77 pgprot_val(arch_vm_get_page_prot(vm_flags))); 78} 79EXPORT_SYMBOL(vm_get_page_prot); 80 81int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ 82int sysctl_overcommit_ratio = 50; /* default is 50% */ 83int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; 84atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0); 85 86/* 87 * Check that a process has enough memory to allocate a new virtual 88 * mapping. 0 means there is enough memory for the allocation to 89 * succeed and -ENOMEM implies there is not. 90 * 91 * We currently support three overcommit policies, which are set via the 92 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting 93 * 94 * Strict overcommit modes added 2002 Feb 26 by Alan Cox. 95 * Additional code 2002 Jul 20 by Robert Love. 96 * 97 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. 98 * 99 * Note this is a helper function intended to be used by LSMs which 100 * wish to use this logic. 101 */ 102int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) 103{ 104 unsigned long free, allowed; 105 106 vm_acct_memory(pages); 107 108 /* 109 * Sometimes we want to use more memory than we have 110 */ 111 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) 112 return 0; 113 114 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { 115 unsigned long n; 116 117 free = global_page_state(NR_FILE_PAGES); 118 free += nr_swap_pages; 119 120 /* 121 * Any slabs which are created with the 122 * SLAB_RECLAIM_ACCOUNT flag claim to have contents 123 * which are reclaimable, under pressure. The dentry 124 * cache and most inode caches should fall into this 125 */ 126 free += global_page_state(NR_SLAB_RECLAIMABLE); 127 128 /* 129 * Leave the last 3% for root 130 */ 131 if (!cap_sys_admin) 132 free -= free / 32; 133 134 if (free > pages) 135 return 0; 136 137 /* 138 * nr_free_pages() is very expensive on large systems, 139 * only call if we're about to fail. 140 */ 141 n = nr_free_pages(); 142 143 /* 144 * Leave reserved pages. The pages are not for anonymous pages. 145 */ 146 if (n <= totalreserve_pages) 147 goto error; 148 else 149 n -= totalreserve_pages; 150 151 /* 152 * Leave the last 3% for root 153 */ 154 if (!cap_sys_admin) 155 n -= n / 32; 156 free += n; 157 158 if (free > pages) 159 return 0; 160 161 goto error; 162 } 163 164 allowed = (totalram_pages - hugetlb_total_pages()) 165 * sysctl_overcommit_ratio / 100; 166 /* 167 * Leave the last 3% for root 168 */ 169 if (!cap_sys_admin) 170 allowed -= allowed / 32; 171 allowed += total_swap_pages; 172 173 /* Don't let a single process grow too big: 174 leave 3% of the size of this process for other processes */ 175 allowed -= mm->total_vm / 32; 176 177 /* 178 * cast `allowed' as a signed long because vm_committed_space 179 * sometimes has a negative value 180 */ 181 if (atomic_long_read(&vm_committed_space) < (long)allowed) 182 return 0; 183error: 184 vm_unacct_memory(pages); 185 186 return -ENOMEM; 187} 188 189/* 190 * Requires inode->i_mapping->i_mmap_lock 191 */ 192static void __remove_shared_vm_struct(struct vm_area_struct *vma, 193 struct file *file, struct address_space *mapping) 194{ 195 if (vma->vm_flags & VM_DENYWRITE) 196 atomic_inc(&file->f_path.dentry->d_inode->i_writecount); 197 if (vma->vm_flags & VM_SHARED) 198 mapping->i_mmap_writable--; 199 200 flush_dcache_mmap_lock(mapping); 201 if (unlikely(vma->vm_flags & VM_NONLINEAR)) 202 list_del_init(&vma->shared.vm_set.list); 203 else 204 vma_prio_tree_remove(vma, &mapping->i_mmap); 205 flush_dcache_mmap_unlock(mapping); 206} 207 208/* 209 * Unlink a file-based vm structure from its prio_tree, to hide 210 * vma from rmap and vmtruncate before freeing its page tables. 211 */ 212void unlink_file_vma(struct vm_area_struct *vma) 213{ 214 struct file *file = vma->vm_file; 215 216 if (file) { 217 struct address_space *mapping = file->f_mapping; 218 spin_lock(&mapping->i_mmap_lock); 219 __remove_shared_vm_struct(vma, file, mapping); 220 spin_unlock(&mapping->i_mmap_lock); 221 } 222} 223 224/* 225 * Close a vm structure and free it, returning the next. 226 */ 227static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) 228{ 229 struct vm_area_struct *next = vma->vm_next; 230 231 might_sleep(); 232 if (vma->vm_ops && vma->vm_ops->close) 233 vma->vm_ops->close(vma); 234 if (vma->vm_file) { 235 fput(vma->vm_file); 236 if (vma->vm_flags & VM_EXECUTABLE) 237 removed_exe_file_vma(vma->vm_mm); 238 } 239 mpol_put(vma_policy(vma)); 240 kmem_cache_free(vm_area_cachep, vma); 241 return next; 242} 243 244asmlinkage unsigned long sys_brk(unsigned long brk) 245{ 246 unsigned long rlim, retval; 247 unsigned long newbrk, oldbrk; 248 struct mm_struct *mm = current->mm; 249 unsigned long min_brk; 250 251 down_write(&mm->mmap_sem); 252 253#ifdef CONFIG_COMPAT_BRK 254 min_brk = mm->end_code; 255#else 256 min_brk = mm->start_brk; 257#endif 258 if (brk < min_brk) 259 goto out; 260 261 /* 262 * Check against rlimit here. If this check is done later after the test 263 * of oldbrk with newbrk then it can escape the test and let the data 264 * segment grow beyond its set limit the in case where the limit is 265 * not page aligned -Ram Gupta 266 */ 267 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; 268 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + 269 (mm->end_data - mm->start_data) > rlim) 270 goto out; 271 272 newbrk = PAGE_ALIGN(brk); 273 oldbrk = PAGE_ALIGN(mm->brk); 274 if (oldbrk == newbrk) 275 goto set_brk; 276 277 /* Always allow shrinking brk. */ 278 if (brk <= mm->brk) { 279 if (!do_munmap(mm, newbrk, oldbrk-newbrk)) 280 goto set_brk; 281 goto out; 282 } 283 284 /* Check against existing mmap mappings. */ 285 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) 286 goto out; 287 288 /* Ok, looks good - let it rip. */ 289 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk) 290 goto out; 291set_brk: 292 mm->brk = brk; 293out: 294 retval = mm->brk; 295 up_write(&mm->mmap_sem); 296 return retval; 297} 298 299#ifdef DEBUG_MM_RB 300static int browse_rb(struct rb_root *root) 301{ 302 int i = 0, j; 303 struct rb_node *nd, *pn = NULL; 304 unsigned long prev = 0, pend = 0; 305 306 for (nd = rb_first(root); nd; nd = rb_next(nd)) { 307 struct vm_area_struct *vma; 308 vma = rb_entry(nd, struct vm_area_struct, vm_rb); 309 if (vma->vm_start < prev) 310 printk("vm_start %lx prev %lx\n", vma->vm_start, prev), i = -1; 311 if (vma->vm_start < pend) 312 printk("vm_start %lx pend %lx\n", vma->vm_start, pend); 313 if (vma->vm_start > vma->vm_end) 314 printk("vm_end %lx < vm_start %lx\n", vma->vm_end, vma->vm_start); 315 i++; 316 pn = nd; 317 prev = vma->vm_start; 318 pend = vma->vm_end; 319 } 320 j = 0; 321 for (nd = pn; nd; nd = rb_prev(nd)) { 322 j++; 323 } 324 if (i != j) 325 printk("backwards %d, forwards %d\n", j, i), i = 0; 326 return i; 327} 328 329void validate_mm(struct mm_struct *mm) 330{ 331 int bug = 0; 332 int i = 0; 333 struct vm_area_struct *tmp = mm->mmap; 334 while (tmp) { 335 tmp = tmp->vm_next; 336 i++; 337 } 338 if (i != mm->map_count) 339 printk("map_count %d vm_next %d\n", mm->map_count, i), bug = 1; 340 i = browse_rb(&mm->mm_rb); 341 if (i != mm->map_count) 342 printk("map_count %d rb %d\n", mm->map_count, i), bug = 1; 343 BUG_ON(bug); 344} 345#else 346#define validate_mm(mm) do { } while (0) 347#endif 348 349static struct vm_area_struct * 350find_vma_prepare(struct mm_struct *mm, unsigned long addr, 351 struct vm_area_struct **pprev, struct rb_node ***rb_link, 352 struct rb_node ** rb_parent) 353{ 354 struct vm_area_struct * vma; 355 struct rb_node ** __rb_link, * __rb_parent, * rb_prev; 356 357 __rb_link = &mm->mm_rb.rb_node; 358 rb_prev = __rb_parent = NULL; 359 vma = NULL; 360 361 while (*__rb_link) { 362 struct vm_area_struct *vma_tmp; 363 364 __rb_parent = *__rb_link; 365 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb); 366 367 if (vma_tmp->vm_end > addr) { 368 vma = vma_tmp; 369 if (vma_tmp->vm_start <= addr) 370 return vma; 371 __rb_link = &__rb_parent->rb_left; 372 } else { 373 rb_prev = __rb_parent; 374 __rb_link = &__rb_parent->rb_right; 375 } 376 } 377 378 *pprev = NULL; 379 if (rb_prev) 380 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb); 381 *rb_link = __rb_link; 382 *rb_parent = __rb_parent; 383 return vma; 384} 385 386static inline void 387__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 388 struct vm_area_struct *prev, struct rb_node *rb_parent) 389{ 390 if (prev) { 391 vma->vm_next = prev->vm_next; 392 prev->vm_next = vma; 393 } else { 394 mm->mmap = vma; 395 if (rb_parent) 396 vma->vm_next = rb_entry(rb_parent, 397 struct vm_area_struct, vm_rb); 398 else 399 vma->vm_next = NULL; 400 } 401} 402 403void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, 404 struct rb_node **rb_link, struct rb_node *rb_parent) 405{ 406 rb_link_node(&vma->vm_rb, rb_parent, rb_link); 407 rb_insert_color(&vma->vm_rb, &mm->mm_rb); 408} 409 410static inline void __vma_link_file(struct vm_area_struct *vma) 411{ 412 struct file * file; 413 414 file = vma->vm_file; 415 if (file) { 416 struct address_space *mapping = file->f_mapping; 417 418 if (vma->vm_flags & VM_DENYWRITE) 419 atomic_dec(&file->f_path.dentry->d_inode->i_writecount); 420 if (vma->vm_flags & VM_SHARED) 421 mapping->i_mmap_writable++; 422 423 flush_dcache_mmap_lock(mapping); 424 if (unlikely(vma->vm_flags & VM_NONLINEAR)) 425 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); 426 else 427 vma_prio_tree_insert(vma, &mapping->i_mmap); 428 flush_dcache_mmap_unlock(mapping); 429 } 430} 431 432static void 433__vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 434 struct vm_area_struct *prev, struct rb_node **rb_link, 435 struct rb_node *rb_parent) 436{ 437 __vma_link_list(mm, vma, prev, rb_parent); 438 __vma_link_rb(mm, vma, rb_link, rb_parent); 439 __anon_vma_link(vma); 440} 441 442static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, 443 struct vm_area_struct *prev, struct rb_node **rb_link, 444 struct rb_node *rb_parent) 445{ 446 struct address_space *mapping = NULL; 447 448 if (vma->vm_file) 449 mapping = vma->vm_file->f_mapping; 450 451 if (mapping) { 452 spin_lock(&mapping->i_mmap_lock); 453 vma->vm_truncate_count = mapping->truncate_count; 454 } 455 anon_vma_lock(vma); 456 457 __vma_link(mm, vma, prev, rb_link, rb_parent); 458 __vma_link_file(vma); 459 460 anon_vma_unlock(vma); 461 if (mapping) 462 spin_unlock(&mapping->i_mmap_lock); 463 464 mm->map_count++; 465 validate_mm(mm); 466} 467 468/* 469 * Helper for vma_adjust in the split_vma insert case: 470 * insert vm structure into list and rbtree and anon_vma, 471 * but it has already been inserted into prio_tree earlier. 472 */ 473static void 474__insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) 475{ 476 struct vm_area_struct * __vma, * prev; 477 struct rb_node ** rb_link, * rb_parent; 478 479 __vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent); 480 BUG_ON(__vma && __vma->vm_start < vma->vm_end); 481 __vma_link(mm, vma, prev, rb_link, rb_parent); 482 mm->map_count++; 483} 484 485static inline void 486__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, 487 struct vm_area_struct *prev) 488{ 489 prev->vm_next = vma->vm_next; 490 rb_erase(&vma->vm_rb, &mm->mm_rb); 491 if (mm->mmap_cache == vma) 492 mm->mmap_cache = prev; 493} 494 495/* 496 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that 497 * is already present in an i_mmap tree without adjusting the tree. 498 * The following helper function should be used when such adjustments 499 * are necessary. The "insert" vma (if any) is to be inserted 500 * before we drop the necessary locks. 501 */ 502void vma_adjust(struct vm_area_struct *vma, unsigned long start, 503 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) 504{ 505 struct mm_struct *mm = vma->vm_mm; 506 struct vm_area_struct *next = vma->vm_next; 507 struct vm_area_struct *importer = NULL; 508 struct address_space *mapping = NULL; 509 struct prio_tree_root *root = NULL; 510 struct file *file = vma->vm_file; 511 struct anon_vma *anon_vma = NULL; 512 long adjust_next = 0; 513 int remove_next = 0; 514 515 if (next && !insert) { 516 if (end >= next->vm_end) { 517 /* 518 * vma expands, overlapping all the next, and 519 * perhaps the one after too (mprotect case 6). 520 */ 521again: remove_next = 1 + (end > next->vm_end); 522 end = next->vm_end; 523 anon_vma = next->anon_vma; 524 importer = vma; 525 } else if (end > next->vm_start) { 526 /* 527 * vma expands, overlapping part of the next: 528 * mprotect case 5 shifting the boundary up. 529 */ 530 adjust_next = (end - next->vm_start) >> PAGE_SHIFT; 531 anon_vma = next->anon_vma; 532 importer = vma; 533 } else if (end < vma->vm_end) { 534 /* 535 * vma shrinks, and !insert tells it's not 536 * split_vma inserting another: so it must be 537 * mprotect case 4 shifting the boundary down. 538 */ 539 adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT); 540 anon_vma = next->anon_vma; 541 importer = next; 542 } 543 } 544 545 if (file) { 546 mapping = file->f_mapping; 547 if (!(vma->vm_flags & VM_NONLINEAR)) 548 root = &mapping->i_mmap; 549 spin_lock(&mapping->i_mmap_lock); 550 if (importer && 551 vma->vm_truncate_count != next->vm_truncate_count) { 552 /* 553 * unmap_mapping_range might be in progress: 554 * ensure that the expanding vma is rescanned. 555 */ 556 importer->vm_truncate_count = 0; 557 } 558 if (insert) { 559 insert->vm_truncate_count = vma->vm_truncate_count; 560 /* 561 * Put into prio_tree now, so instantiated pages 562 * are visible to arm/parisc __flush_dcache_page 563 * throughout; but we cannot insert into address 564 * space until vma start or end is updated. 565 */ 566 __vma_link_file(insert); 567 } 568 } 569 570 /* 571 * When changing only vma->vm_end, we don't really need 572 * anon_vma lock: but is that case worth optimizing out? 573 */ 574 if (vma->anon_vma) 575 anon_vma = vma->anon_vma; 576 if (anon_vma) { 577 spin_lock(&anon_vma->lock); 578 /* 579 * Easily overlooked: when mprotect shifts the boundary, 580 * make sure the expanding vma has anon_vma set if the 581 * shrinking vma had, to cover any anon pages imported. 582 */ 583 if (importer && !importer->anon_vma) { 584 importer->anon_vma = anon_vma; 585 __anon_vma_link(importer); 586 } 587 } 588 589 if (root) { 590 flush_dcache_mmap_lock(mapping); 591 vma_prio_tree_remove(vma, root); 592 if (adjust_next) 593 vma_prio_tree_remove(next, root); 594 } 595 596 vma->vm_start = start; 597 vma->vm_end = end; 598 vma->vm_pgoff = pgoff; 599 if (adjust_next) { 600 next->vm_start += adjust_next << PAGE_SHIFT; 601 next->vm_pgoff += adjust_next; 602 } 603 604 if (root) { 605 if (adjust_next) 606 vma_prio_tree_insert(next, root); 607 vma_prio_tree_insert(vma, root); 608 flush_dcache_mmap_unlock(mapping); 609 } 610 611 if (remove_next) { 612 /* 613 * vma_merge has merged next into vma, and needs 614 * us to remove next before dropping the locks. 615 */ 616 __vma_unlink(mm, next, vma); 617 if (file) 618 __remove_shared_vm_struct(next, file, mapping); 619 if (next->anon_vma) 620 __anon_vma_merge(vma, next); 621 } else if (insert) { 622 /* 623 * split_vma has split insert from vma, and needs 624 * us to insert it before dropping the locks 625 * (it may either follow vma or precede it). 626 */ 627 __insert_vm_struct(mm, insert); 628 } 629 630 if (anon_vma) 631 spin_unlock(&anon_vma->lock); 632 if (mapping) 633 spin_unlock(&mapping->i_mmap_lock); 634 635 if (remove_next) { 636 if (file) { 637 fput(file); 638 if (next->vm_flags & VM_EXECUTABLE) 639 removed_exe_file_vma(mm); 640 } 641 mm->map_count--; 642 mpol_put(vma_policy(next)); 643 kmem_cache_free(vm_area_cachep, next); 644 /* 645 * In mprotect's case 6 (see comments on vma_merge), 646 * we must remove another next too. It would clutter 647 * up the code too much to do both in one go. 648 */ 649 if (remove_next == 2) { 650 next = vma->vm_next; 651 goto again; 652 } 653 } 654 655 validate_mm(mm); 656} 657 658/* 659 * If the vma has a ->close operation then the driver probably needs to release 660 * per-vma resources, so we don't attempt to merge those. 661 */ 662#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP) 663 664static inline int is_mergeable_vma(struct vm_area_struct *vma, 665 struct file *file, unsigned long vm_flags) 666{ 667 if (vma->vm_flags != vm_flags) 668 return 0; 669 if (vma->vm_file != file) 670 return 0; 671 if (vma->vm_ops && vma->vm_ops->close) 672 return 0; 673 return 1; 674} 675 676static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1, 677 struct anon_vma *anon_vma2) 678{ 679 return !anon_vma1 || !anon_vma2 || (anon_vma1 == anon_vma2); 680} 681 682/* 683 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 684 * in front of (at a lower virtual address and file offset than) the vma. 685 * 686 * We cannot merge two vmas if they have differently assigned (non-NULL) 687 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 688 * 689 * We don't check here for the merged mmap wrapping around the end of pagecache 690 * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which 691 * wrap, nor mmaps which cover the final page at index -1UL. 692 */ 693static int 694can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, 695 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) 696{ 697 if (is_mergeable_vma(vma, file, vm_flags) && 698 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { 699 if (vma->vm_pgoff == vm_pgoff) 700 return 1; 701 } 702 return 0; 703} 704 705/* 706 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff) 707 * beyond (at a higher virtual address and file offset than) the vma. 708 * 709 * We cannot merge two vmas if they have differently assigned (non-NULL) 710 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible. 711 */ 712static int 713can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, 714 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) 715{ 716 if (is_mergeable_vma(vma, file, vm_flags) && 717 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { 718 pgoff_t vm_pglen; 719 vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 720 if (vma->vm_pgoff + vm_pglen == vm_pgoff) 721 return 1; 722 } 723 return 0; 724} 725 726/* 727 * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out 728 * whether that can be merged with its predecessor or its successor. 729 * Or both (it neatly fills a hole). 730 * 731 * In most cases - when called for mmap, brk or mremap - [addr,end) is 732 * certain not to be mapped by the time vma_merge is called; but when 733 * called for mprotect, it is certain to be already mapped (either at 734 * an offset within prev, or at the start of next), and the flags of 735 * this area are about to be changed to vm_flags - and the no-change 736 * case has already been eliminated. 737 * 738 * The following mprotect cases have to be considered, where AAAA is 739 * the area passed down from mprotect_fixup, never extending beyond one 740 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after: 741 * 742 * AAAA AAAA AAAA AAAA 743 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX 744 * cannot merge might become might become might become 745 * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or 746 * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or 747 * mremap move: PPPPNNNNNNNN 8 748 * AAAA 749 * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN 750 * might become case 1 below case 2 below case 3 below 751 * 752 * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX: 753 * mprotect_fixup updates vm_flags & vm_page_prot on successful return. 754 */ 755struct vm_area_struct *vma_merge(struct mm_struct *mm, 756 struct vm_area_struct *prev, unsigned long addr, 757 unsigned long end, unsigned long vm_flags, 758 struct anon_vma *anon_vma, struct file *file, 759 pgoff_t pgoff, struct mempolicy *policy) 760{ 761 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; 762 struct vm_area_struct *area, *next; 763 764 /* 765 * We later require that vma->vm_flags == vm_flags, 766 * so this tests vma->vm_flags & VM_SPECIAL, too. 767 */ 768 if (vm_flags & VM_SPECIAL) 769 return NULL; 770 771 if (prev) 772 next = prev->vm_next; 773 else 774 next = mm->mmap; 775 area = next; 776 if (next && next->vm_end == end) /* cases 6, 7, 8 */ 777 next = next->vm_next; 778 779 /* 780 * Can it merge with the predecessor? 781 */ 782 if (prev && prev->vm_end == addr && 783 mpol_equal(vma_policy(prev), policy) && 784 can_vma_merge_after(prev, vm_flags, 785 anon_vma, file, pgoff)) { 786 /* 787 * OK, it can. Can we now merge in the successor as well? 788 */ 789 if (next && end == next->vm_start && 790 mpol_equal(policy, vma_policy(next)) && 791 can_vma_merge_before(next, vm_flags, 792 anon_vma, file, pgoff+pglen) && 793 is_mergeable_anon_vma(prev->anon_vma, 794 next->anon_vma)) { 795 /* cases 1, 6 */ 796 vma_adjust(prev, prev->vm_start, 797 next->vm_end, prev->vm_pgoff, NULL); 798 } else /* cases 2, 5, 7 */ 799 vma_adjust(prev, prev->vm_start, 800 end, prev->vm_pgoff, NULL); 801 return prev; 802 } 803 804 /* 805 * Can this new request be merged in front of next? 806 */ 807 if (next && end == next->vm_start && 808 mpol_equal(policy, vma_policy(next)) && 809 can_vma_merge_before(next, vm_flags, 810 anon_vma, file, pgoff+pglen)) { 811 if (prev && addr < prev->vm_end) /* case 4 */ 812 vma_adjust(prev, prev->vm_start, 813 addr, prev->vm_pgoff, NULL); 814 else /* cases 3, 8 */ 815 vma_adjust(area, addr, next->vm_end, 816 next->vm_pgoff - pglen, NULL); 817 return area; 818 } 819 820 return NULL; 821} 822 823/* 824 * find_mergeable_anon_vma is used by anon_vma_prepare, to check 825 * neighbouring vmas for a suitable anon_vma, before it goes off 826 * to allocate a new anon_vma. It checks because a repetitive 827 * sequence of mprotects and faults may otherwise lead to distinct 828 * anon_vmas being allocated, preventing vma merge in subsequent 829 * mprotect. 830 */ 831struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) 832{ 833 struct vm_area_struct *near; 834 unsigned long vm_flags; 835 836 near = vma->vm_next; 837 if (!near) 838 goto try_prev; 839 840 /* 841 * Since only mprotect tries to remerge vmas, match flags 842 * which might be mprotected into each other later on. 843 * Neither mlock nor madvise tries to remerge at present, 844 * so leave their flags as obstructing a merge. 845 */ 846 vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); 847 vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); 848 849 if (near->anon_vma && vma->vm_end == near->vm_start && 850 mpol_equal(vma_policy(vma), vma_policy(near)) && 851 can_vma_merge_before(near, vm_flags, 852 NULL, vma->vm_file, vma->vm_pgoff + 853 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT))) 854 return near->anon_vma; 855try_prev: 856 /* 857 * It is potentially slow to have to call find_vma_prev here. 858 * But it's only on the first write fault on the vma, not 859 * every time, and we could devise a way to avoid it later 860 * (e.g. stash info in next's anon_vma_node when assigning 861 * an anon_vma, or when trying vma_merge). Another time. 862 */ 863 BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma); 864 if (!near) 865 goto none; 866 867 vm_flags = vma->vm_flags & ~(VM_READ|VM_WRITE|VM_EXEC); 868 vm_flags |= near->vm_flags & (VM_READ|VM_WRITE|VM_EXEC); 869 870 if (near->anon_vma && near->vm_end == vma->vm_start && 871 mpol_equal(vma_policy(near), vma_policy(vma)) && 872 can_vma_merge_after(near, vm_flags, 873 NULL, vma->vm_file, vma->vm_pgoff)) 874 return near->anon_vma; 875none: 876 /* 877 * There's no absolute need to look only at touching neighbours: 878 * we could search further afield for "compatible" anon_vmas. 879 * But it would probably just be a waste of time searching, 880 * or lead to too many vmas hanging off the same anon_vma. 881 * We're trying to allow mprotect remerging later on, 882 * not trying to minimize memory used for anon_vmas. 883 */ 884 return NULL; 885} 886 887#ifdef CONFIG_PROC_FS 888void vm_stat_account(struct mm_struct *mm, unsigned long flags, 889 struct file *file, long pages) 890{ 891 const unsigned long stack_flags 892 = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); 893 894 if (file) { 895 mm->shared_vm += pages; 896 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) 897 mm->exec_vm += pages; 898 } else if (flags & stack_flags) 899 mm->stack_vm += pages; 900 if (flags & (VM_RESERVED|VM_IO)) 901 mm->reserved_vm += pages; 902} 903#endif /* CONFIG_PROC_FS */ 904 905/* 906 * The caller must hold down_write(current->mm->mmap_sem). 907 */ 908 909unsigned long do_mmap_pgoff(struct file * file, unsigned long addr, 910 unsigned long len, unsigned long prot, 911 unsigned long flags, unsigned long pgoff) 912{ 913 struct mm_struct * mm = current->mm; 914 struct inode *inode; 915 unsigned int vm_flags; 916 int error; 917 int accountable = 1; 918 unsigned long reqprot = prot; 919 920 /* 921 * Does the application expect PROT_READ to imply PROT_EXEC? 922 * 923 * (the exception is when the underlying filesystem is noexec 924 * mounted, in which case we dont add PROT_EXEC.) 925 */ 926 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) 927 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) 928 prot |= PROT_EXEC; 929 930 if (!len) 931 return -EINVAL; 932 933 if (!(flags & MAP_FIXED)) 934 addr = round_hint_to_min(addr); 935 936 error = arch_mmap_check(addr, len, flags); 937 if (error) 938 return error; 939 940 /* Careful about overflows.. */ 941 len = PAGE_ALIGN(len); 942 if (!len || len > TASK_SIZE) 943 return -ENOMEM; 944 945 /* offset overflow? */ 946 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff) 947 return -EOVERFLOW; 948 949 /* Too many mappings? */ 950 if (mm->map_count > sysctl_max_map_count) 951 return -ENOMEM; 952 953 /* Obtain the address to map to. we verify (or select) it and ensure 954 * that it represents a valid section of the address space. 955 */ 956 addr = get_unmapped_area(file, addr, len, pgoff, flags); 957 if (addr & ~PAGE_MASK) 958 return addr; 959 960 /* Do simple checking here so the lower-level routines won't have 961 * to. we assume access permissions have been handled by the open 962 * of the memory object, so we don't do any here. 963 */ 964 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | 965 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; 966 967 if (flags & MAP_LOCKED) { 968 if (!can_do_mlock()) 969 return -EPERM; 970 vm_flags |= VM_LOCKED; 971 } 972 /* mlock MCL_FUTURE? */ 973 if (vm_flags & VM_LOCKED) { 974 unsigned long locked, lock_limit; 975 locked = len >> PAGE_SHIFT; 976 locked += mm->locked_vm; 977 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 978 lock_limit >>= PAGE_SHIFT; 979 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 980 return -EAGAIN; 981 } 982 983 inode = file ? file->f_path.dentry->d_inode : NULL; 984 985 if (file) { 986 switch (flags & MAP_TYPE) { 987 case MAP_SHARED: 988 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE)) 989 return -EACCES; 990 991 /* 992 * Make sure we don't allow writing to an append-only 993 * file.. 994 */ 995 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) 996 return -EACCES; 997 998 /* 999 * Make sure there are no mandatory locks on the file. 1000 */ 1001 if (locks_verify_locked(inode)) 1002 return -EAGAIN; 1003 1004 vm_flags |= VM_SHARED | VM_MAYSHARE; 1005 if (!(file->f_mode & FMODE_WRITE)) 1006 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 1007 1008 /* fall through */ 1009 case MAP_PRIVATE: 1010 if (!(file->f_mode & FMODE_READ)) 1011 return -EACCES; 1012 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) { 1013 if (vm_flags & VM_EXEC) 1014 return -EPERM; 1015 vm_flags &= ~VM_MAYEXEC; 1016 } 1017 if (is_file_hugepages(file)) 1018 accountable = 0; 1019 1020 if (!file->f_op || !file->f_op->mmap) 1021 return -ENODEV; 1022 break; 1023 1024 default: 1025 return -EINVAL; 1026 } 1027 } else { 1028 switch (flags & MAP_TYPE) { 1029 case MAP_SHARED: 1030 vm_flags |= VM_SHARED | VM_MAYSHARE; 1031 break; 1032 case MAP_PRIVATE: 1033 /* 1034 * Set pgoff according to addr for anon_vma. 1035 */ 1036 pgoff = addr >> PAGE_SHIFT; 1037 break; 1038 default: 1039 return -EINVAL; 1040 } 1041 } 1042 1043 error = security_file_mmap(file, reqprot, prot, flags, addr, 0); 1044 if (error) 1045 return error; 1046 1047 return mmap_region(file, addr, len, flags, vm_flags, pgoff, 1048 accountable); 1049} 1050EXPORT_SYMBOL(do_mmap_pgoff); 1051 1052/* 1053 * Some shared mappigns will want the pages marked read-only 1054 * to track write events. If so, we'll downgrade vm_page_prot 1055 * to the private version (using protection_map[] without the 1056 * VM_SHARED bit). 1057 */ 1058int vma_wants_writenotify(struct vm_area_struct *vma) 1059{ 1060 unsigned int vm_flags = vma->vm_flags; 1061 1062 /* If it was private or non-writable, the write bit is already clear */ 1063 if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) 1064 return 0; 1065 1066 /* The backer wishes to know when pages are first written to? */ 1067 if (vma->vm_ops && vma->vm_ops->page_mkwrite) 1068 return 1; 1069 1070 /* The open routine did something to the protections already? */ 1071 if (pgprot_val(vma->vm_page_prot) != 1072 pgprot_val(vm_get_page_prot(vm_flags))) 1073 return 0; 1074 1075 /* Specialty mapping? */ 1076 if (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) 1077 return 0; 1078 1079 /* Can the mapping track the dirty pages? */ 1080 return vma->vm_file && vma->vm_file->f_mapping && 1081 mapping_cap_account_dirty(vma->vm_file->f_mapping); 1082} 1083 1084unsigned long mmap_region(struct file *file, unsigned long addr, 1085 unsigned long len, unsigned long flags, 1086 unsigned int vm_flags, unsigned long pgoff, 1087 int accountable) 1088{ 1089 struct mm_struct *mm = current->mm; 1090 struct vm_area_struct *vma, *prev; 1091 int correct_wcount = 0; 1092 int error; 1093 struct rb_node **rb_link, *rb_parent; 1094 unsigned long charged = 0; 1095 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL; 1096 1097 /* Clear old maps */ 1098 error = -ENOMEM; 1099munmap_back: 1100 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); 1101 if (vma && vma->vm_start < addr + len) { 1102 if (do_munmap(mm, addr, len)) 1103 return -ENOMEM; 1104 goto munmap_back; 1105 } 1106 1107 /* Check against address space limit. */ 1108 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) 1109 return -ENOMEM; 1110 1111 if (accountable && (!(flags & MAP_NORESERVE) || 1112 sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { 1113 if (vm_flags & VM_SHARED) { 1114 /* Check memory availability in shmem_file_setup? */ 1115 vm_flags |= VM_ACCOUNT; 1116 } else if (vm_flags & VM_WRITE) { 1117 /* 1118 * Private writable mapping: check memory availability 1119 */ 1120 charged = len >> PAGE_SHIFT; 1121 if (security_vm_enough_memory(charged)) 1122 return -ENOMEM; 1123 vm_flags |= VM_ACCOUNT; 1124 } 1125 } 1126 1127 /* 1128 * Can we just expand an old private anonymous mapping? 1129 * The VM_SHARED test is necessary because shmem_zero_setup 1130 * will create the file object for a shared anonymous map below. 1131 */ 1132 if (!file && !(vm_flags & VM_SHARED) && 1133 vma_merge(mm, prev, addr, addr + len, vm_flags, 1134 NULL, NULL, pgoff, NULL)) 1135 goto out; 1136 1137 /* 1138 * Determine the object being mapped and call the appropriate 1139 * specific mapper. the address has already been validated, but 1140 * not unmapped, but the maps are removed from the list. 1141 */ 1142 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 1143 if (!vma) { 1144 error = -ENOMEM; 1145 goto unacct_error; 1146 } 1147 1148 vma->vm_mm = mm; 1149 vma->vm_start = addr; 1150 vma->vm_end = addr + len; 1151 vma->vm_flags = vm_flags; 1152 vma->vm_page_prot = vm_get_page_prot(vm_flags); 1153 vma->vm_pgoff = pgoff; 1154 1155 if (file) { 1156 error = -EINVAL; 1157 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP)) 1158 goto free_vma; 1159 if (vm_flags & VM_DENYWRITE) { 1160 error = deny_write_access(file); 1161 if (error) 1162 goto free_vma; 1163 correct_wcount = 1; 1164 } 1165 vma->vm_file = file; 1166 get_file(file); 1167 error = file->f_op->mmap(file, vma); 1168 if (error) 1169 goto unmap_and_free_vma; 1170 if (vm_flags & VM_EXECUTABLE) 1171 added_exe_file_vma(mm); 1172 } else if (vm_flags & VM_SHARED) { 1173 error = shmem_zero_setup(vma); 1174 if (error) 1175 goto free_vma; 1176 } 1177 1178 /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform 1179 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) 1180 * that memory reservation must be checked; but that reservation 1181 * belongs to shared memory object, not to vma: so now clear it. 1182 */ 1183 if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT)) 1184 vma->vm_flags &= ~VM_ACCOUNT; 1185 1186 /* Can addr have changed?? 1187 * 1188 * Answer: Yes, several device drivers can do it in their 1189 * f_op->mmap method. -DaveM 1190 */ 1191 addr = vma->vm_start; 1192 pgoff = vma->vm_pgoff; 1193 vm_flags = vma->vm_flags; 1194 1195 if (vma_wants_writenotify(vma)) 1196 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); 1197 1198 if (file && vma_merge(mm, prev, addr, vma->vm_end, 1199 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { 1200 mpol_put(vma_policy(vma)); 1201 kmem_cache_free(vm_area_cachep, vma); 1202 fput(file); 1203 if (vm_flags & VM_EXECUTABLE) 1204 removed_exe_file_vma(mm); 1205 } else { 1206 vma_link(mm, vma, prev, rb_link, rb_parent); 1207 file = vma->vm_file; 1208 } 1209 1210 /* Once vma denies write, undo our temporary denial count */ 1211 if (correct_wcount) 1212 atomic_inc(&inode->i_writecount); 1213out: 1214 mm->total_vm += len >> PAGE_SHIFT; 1215 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); 1216 if (vm_flags & VM_LOCKED) { 1217 mm->locked_vm += len >> PAGE_SHIFT; 1218 make_pages_present(addr, addr + len); 1219 } 1220 if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) 1221 make_pages_present(addr, addr + len); 1222 return addr; 1223 1224unmap_and_free_vma: 1225 if (correct_wcount) 1226 atomic_inc(&inode->i_writecount); 1227 vma->vm_file = NULL; 1228 fput(file); 1229 1230 /* Undo any partial mapping done by a device driver. */ 1231 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); 1232 charged = 0; 1233free_vma: 1234 kmem_cache_free(vm_area_cachep, vma); 1235unacct_error: 1236 if (charged) 1237 vm_unacct_memory(charged); 1238 return error; 1239} 1240 1241/* Get an address range which is currently unmapped. 1242 * For shmat() with addr=0. 1243 * 1244 * Ugly calling convention alert: 1245 * Return value with the low bits set means error value, 1246 * ie 1247 * if (ret & ~PAGE_MASK) 1248 * error = ret; 1249 * 1250 * This function "knows" that -ENOMEM has the bits set. 1251 */ 1252#ifndef HAVE_ARCH_UNMAPPED_AREA 1253unsigned long 1254arch_get_unmapped_area(struct file *filp, unsigned long addr, 1255 unsigned long len, unsigned long pgoff, unsigned long flags) 1256{ 1257 struct mm_struct *mm = current->mm; 1258 struct vm_area_struct *vma; 1259 unsigned long start_addr; 1260 1261 if (len > TASK_SIZE) 1262 return -ENOMEM; 1263 1264 if (flags & MAP_FIXED) 1265 return addr; 1266 1267 if (addr) { 1268 addr = PAGE_ALIGN(addr); 1269 vma = find_vma(mm, addr); 1270 if (TASK_SIZE - len >= addr && 1271 (!vma || addr + len <= vma->vm_start)) 1272 return addr; 1273 } 1274 if (len > mm->cached_hole_size) { 1275 start_addr = addr = mm->free_area_cache; 1276 } else { 1277 start_addr = addr = TASK_UNMAPPED_BASE; 1278 mm->cached_hole_size = 0; 1279 } 1280 1281full_search: 1282 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 1283 /* At this point: (!vma || addr < vma->vm_end). */ 1284 if (TASK_SIZE - len < addr) { 1285 /* 1286 * Start a new search - just in case we missed 1287 * some holes. 1288 */ 1289 if (start_addr != TASK_UNMAPPED_BASE) { 1290 addr = TASK_UNMAPPED_BASE; 1291 start_addr = addr; 1292 mm->cached_hole_size = 0; 1293 goto full_search; 1294 } 1295 return -ENOMEM; 1296 } 1297 if (!vma || addr + len <= vma->vm_start) { 1298 /* 1299 * Remember the place where we stopped the search: 1300 */ 1301 mm->free_area_cache = addr + len; 1302 return addr; 1303 } 1304 if (addr + mm->cached_hole_size < vma->vm_start) 1305 mm->cached_hole_size = vma->vm_start - addr; 1306 addr = vma->vm_end; 1307 } 1308} 1309#endif 1310 1311void arch_unmap_area(struct mm_struct *mm, unsigned long addr) 1312{ 1313 /* 1314 * Is this a new hole at the lowest possible address? 1315 */ 1316 if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { 1317 mm->free_area_cache = addr; 1318 mm->cached_hole_size = ~0UL; 1319 } 1320} 1321 1322/* 1323 * This mmap-allocator allocates new areas top-down from below the 1324 * stack's low limit (the base): 1325 */ 1326#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 1327unsigned long 1328arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, 1329 const unsigned long len, const unsigned long pgoff, 1330 const unsigned long flags) 1331{ 1332 struct vm_area_struct *vma; 1333 struct mm_struct *mm = current->mm; 1334 unsigned long addr = addr0; 1335 1336 /* requested length too big for entire address space */ 1337 if (len > TASK_SIZE) 1338 return -ENOMEM; 1339 1340 if (flags & MAP_FIXED) 1341 return addr; 1342 1343 /* requesting a specific address */ 1344 if (addr) { 1345 addr = PAGE_ALIGN(addr); 1346 vma = find_vma(mm, addr); 1347 if (TASK_SIZE - len >= addr && 1348 (!vma || addr + len <= vma->vm_start)) 1349 return addr; 1350 } 1351 1352 /* check if free_area_cache is useful for us */ 1353 if (len <= mm->cached_hole_size) { 1354 mm->cached_hole_size = 0; 1355 mm->free_area_cache = mm->mmap_base; 1356 } 1357 1358 /* either no address requested or can't fit in requested address hole */ 1359 addr = mm->free_area_cache; 1360 1361 /* make sure it can fit in the remaining address space */ 1362 if (addr > len) { 1363 vma = find_vma(mm, addr-len); 1364 if (!vma || addr <= vma->vm_start) 1365 /* remember the address as a hint for next time */ 1366 return (mm->free_area_cache = addr-len); 1367 } 1368 1369 if (mm->mmap_base < len) 1370 goto bottomup; 1371 1372 addr = mm->mmap_base-len; 1373 1374 do { 1375 /* 1376 * Lookup failure means no vma is above this address, 1377 * else if new region fits below vma->vm_start, 1378 * return with success: 1379 */ 1380 vma = find_vma(mm, addr); 1381 if (!vma || addr+len <= vma->vm_start) 1382 /* remember the address as a hint for next time */ 1383 return (mm->free_area_cache = addr); 1384 1385 /* remember the largest hole we saw so far */ 1386 if (addr + mm->cached_hole_size < vma->vm_start) 1387 mm->cached_hole_size = vma->vm_start - addr; 1388 1389 /* try just below the current vma->vm_start */ 1390 addr = vma->vm_start-len; 1391 } while (len < vma->vm_start); 1392 1393bottomup: 1394 /* 1395 * A failed mmap() very likely causes application failure, 1396 * so fall back to the bottom-up function here. This scenario 1397 * can happen with large stack limits and large mmap() 1398 * allocations. 1399 */ 1400 mm->cached_hole_size = ~0UL; 1401 mm->free_area_cache = TASK_UNMAPPED_BASE; 1402 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); 1403 /* 1404 * Restore the topdown base: 1405 */ 1406 mm->free_area_cache = mm->mmap_base; 1407 mm->cached_hole_size = ~0UL; 1408 1409 return addr; 1410} 1411#endif 1412 1413void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) 1414{ 1415 /* 1416 * Is this a new hole at the highest possible address? 1417 */ 1418 if (addr > mm->free_area_cache) 1419 mm->free_area_cache = addr; 1420 1421 /* dont allow allocations above current base */ 1422 if (mm->free_area_cache > mm->mmap_base) 1423 mm->free_area_cache = mm->mmap_base; 1424} 1425 1426unsigned long 1427get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, 1428 unsigned long pgoff, unsigned long flags) 1429{ 1430 unsigned long (*get_area)(struct file *, unsigned long, 1431 unsigned long, unsigned long, unsigned long); 1432 1433 get_area = current->mm->get_unmapped_area; 1434 if (file && file->f_op && file->f_op->get_unmapped_area) 1435 get_area = file->f_op->get_unmapped_area; 1436 addr = get_area(file, addr, len, pgoff, flags); 1437 if (IS_ERR_VALUE(addr)) 1438 return addr; 1439 1440 if (addr > TASK_SIZE - len) 1441 return -ENOMEM; 1442 if (addr & ~PAGE_MASK) 1443 return -EINVAL; 1444 1445 return arch_rebalance_pgtables(addr, len); 1446} 1447 1448EXPORT_SYMBOL(get_unmapped_area); 1449 1450/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1451struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr) 1452{ 1453 struct vm_area_struct *vma = NULL; 1454 1455 if (mm) { 1456 /* Check the cache first. */ 1457 /* (Cache hit rate is typically around 35%.) */ 1458 vma = mm->mmap_cache; 1459 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { 1460 struct rb_node * rb_node; 1461 1462 rb_node = mm->mm_rb.rb_node; 1463 vma = NULL; 1464 1465 while (rb_node) { 1466 struct vm_area_struct * vma_tmp; 1467 1468 vma_tmp = rb_entry(rb_node, 1469 struct vm_area_struct, vm_rb); 1470 1471 if (vma_tmp->vm_end > addr) { 1472 vma = vma_tmp; 1473 if (vma_tmp->vm_start <= addr) 1474 break; 1475 rb_node = rb_node->rb_left; 1476 } else 1477 rb_node = rb_node->rb_right; 1478 } 1479 if (vma) 1480 mm->mmap_cache = vma; 1481 } 1482 } 1483 return vma; 1484} 1485 1486EXPORT_SYMBOL(find_vma); 1487 1488/* Same as find_vma, but also return a pointer to the previous VMA in *pprev. */ 1489struct vm_area_struct * 1490find_vma_prev(struct mm_struct *mm, unsigned long addr, 1491 struct vm_area_struct **pprev) 1492{ 1493 struct vm_area_struct *vma = NULL, *prev = NULL; 1494 struct rb_node * rb_node; 1495 if (!mm) 1496 goto out; 1497 1498 /* Guard against addr being lower than the first VMA */ 1499 vma = mm->mmap; 1500 1501 /* Go through the RB tree quickly. */ 1502 rb_node = mm->mm_rb.rb_node; 1503 1504 while (rb_node) { 1505 struct vm_area_struct *vma_tmp; 1506 vma_tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb); 1507 1508 if (addr < vma_tmp->vm_end) { 1509 rb_node = rb_node->rb_left; 1510 } else { 1511 prev = vma_tmp; 1512 if (!prev->vm_next || (addr < prev->vm_next->vm_end)) 1513 break; 1514 rb_node = rb_node->rb_right; 1515 } 1516 } 1517 1518out: 1519 *pprev = prev; 1520 return prev ? prev->vm_next : vma; 1521} 1522 1523/* 1524 * Verify that the stack growth is acceptable and 1525 * update accounting. This is shared with both the 1526 * grow-up and grow-down cases. 1527 */ 1528static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, unsigned long grow) 1529{ 1530 struct mm_struct *mm = vma->vm_mm; 1531 struct rlimit *rlim = current->signal->rlim; 1532 unsigned long new_start; 1533 1534 /* address space limit tests */ 1535 if (!may_expand_vm(mm, grow)) 1536 return -ENOMEM; 1537 1538 /* Stack limit test */ 1539 if (size > rlim[RLIMIT_STACK].rlim_cur) 1540 return -ENOMEM; 1541 1542 /* mlock limit tests */ 1543 if (vma->vm_flags & VM_LOCKED) { 1544 unsigned long locked; 1545 unsigned long limit; 1546 locked = mm->locked_vm + grow; 1547 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; 1548 if (locked > limit && !capable(CAP_IPC_LOCK)) 1549 return -ENOMEM; 1550 } 1551 1552 /* Check to ensure the stack will not grow into a hugetlb-only region */ 1553 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : 1554 vma->vm_end - size; 1555 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) 1556 return -EFAULT; 1557 1558 /* 1559 * Overcommit.. This must be the final test, as it will 1560 * update security statistics. 1561 */ 1562 if (security_vm_enough_memory(grow)) 1563 return -ENOMEM; 1564 1565 /* Ok, everything looks good - let it rip */ 1566 mm->total_vm += grow; 1567 if (vma->vm_flags & VM_LOCKED) 1568 mm->locked_vm += grow; 1569 vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow); 1570 return 0; 1571} 1572 1573#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) 1574/* 1575 * PA-RISC uses this for its stack; IA64 for its Register Backing Store. 1576 * vma is the last one with address > vma->vm_end. Have to extend vma. 1577 */ 1578#ifndef CONFIG_IA64 1579static inline 1580#endif 1581int expand_upwards(struct vm_area_struct *vma, unsigned long address) 1582{ 1583 int error; 1584 1585 if (!(vma->vm_flags & VM_GROWSUP)) 1586 return -EFAULT; 1587 1588 /* 1589 * We must make sure the anon_vma is allocated 1590 * so that the anon_vma locking is not a noop. 1591 */ 1592 if (unlikely(anon_vma_prepare(vma))) 1593 return -ENOMEM; 1594 anon_vma_lock(vma); 1595 1596 /* 1597 * vma->vm_start/vm_end cannot change under us because the caller 1598 * is required to hold the mmap_sem in read mode. We need the 1599 * anon_vma lock to serialize against concurrent expand_stacks. 1600 * Also guard against wrapping around to address 0. 1601 */ 1602 if (address < PAGE_ALIGN(address+4)) 1603 address = PAGE_ALIGN(address+4); 1604 else { 1605 anon_vma_unlock(vma); 1606 return -ENOMEM; 1607 } 1608 error = 0; 1609 1610 /* Somebody else might have raced and expanded it already */ 1611 if (address > vma->vm_end) { 1612 unsigned long size, grow; 1613 1614 size = address - vma->vm_start; 1615 grow = (address - vma->vm_end) >> PAGE_SHIFT; 1616 1617 error = acct_stack_growth(vma, size, grow); 1618 if (!error) 1619 vma->vm_end = address; 1620 } 1621 anon_vma_unlock(vma); 1622 return error; 1623} 1624#endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */ 1625 1626/* 1627 * vma is the first one with address < vma->vm_start. Have to extend vma. 1628 */ 1629static inline int expand_downwards(struct vm_area_struct *vma, 1630 unsigned long address) 1631{ 1632 int error; 1633 1634 /* 1635 * We must make sure the anon_vma is allocated 1636 * so that the anon_vma locking is not a noop. 1637 */ 1638 if (unlikely(anon_vma_prepare(vma))) 1639 return -ENOMEM; 1640 1641 address &= PAGE_MASK; 1642 error = security_file_mmap(NULL, 0, 0, 0, address, 1); 1643 if (error) 1644 return error; 1645 1646 anon_vma_lock(vma); 1647 1648 /* 1649 * vma->vm_start/vm_end cannot change under us because the caller 1650 * is required to hold the mmap_sem in read mode. We need the 1651 * anon_vma lock to serialize against concurrent expand_stacks. 1652 */ 1653 1654 /* Somebody else might have raced and expanded it already */ 1655 if (address < vma->vm_start) { 1656 unsigned long size, grow; 1657 1658 size = vma->vm_end - address; 1659 grow = (vma->vm_start - address) >> PAGE_SHIFT; 1660 1661 error = acct_stack_growth(vma, size, grow); 1662 if (!error) { 1663 vma->vm_start = address; 1664 vma->vm_pgoff -= grow; 1665 } 1666 } 1667 anon_vma_unlock(vma); 1668 return error; 1669} 1670 1671int expand_stack_downwards(struct vm_area_struct *vma, unsigned long address) 1672{ 1673 return expand_downwards(vma, address); 1674} 1675 1676#ifdef CONFIG_STACK_GROWSUP 1677int expand_stack(struct vm_area_struct *vma, unsigned long address) 1678{ 1679 return expand_upwards(vma, address); 1680} 1681 1682struct vm_area_struct * 1683find_extend_vma(struct mm_struct *mm, unsigned long addr) 1684{ 1685 struct vm_area_struct *vma, *prev; 1686 1687 addr &= PAGE_MASK; 1688 vma = find_vma_prev(mm, addr, &prev); 1689 if (vma && (vma->vm_start <= addr)) 1690 return vma; 1691 if (!prev || expand_stack(prev, addr)) 1692 return NULL; 1693 if (prev->vm_flags & VM_LOCKED) 1694 make_pages_present(addr, prev->vm_end); 1695 return prev; 1696} 1697#else 1698int expand_stack(struct vm_area_struct *vma, unsigned long address) 1699{ 1700 return expand_downwards(vma, address); 1701} 1702 1703struct vm_area_struct * 1704find_extend_vma(struct mm_struct * mm, unsigned long addr) 1705{ 1706 struct vm_area_struct * vma; 1707 unsigned long start; 1708 1709 addr &= PAGE_MASK; 1710 vma = find_vma(mm,addr); 1711 if (!vma) 1712 return NULL; 1713 if (vma->vm_start <= addr) 1714 return vma; 1715 if (!(vma->vm_flags & VM_GROWSDOWN)) 1716 return NULL; 1717 start = vma->vm_start; 1718 if (expand_stack(vma, addr)) 1719 return NULL; 1720 if (vma->vm_flags & VM_LOCKED) 1721 make_pages_present(addr, start); 1722 return vma; 1723} 1724#endif 1725 1726/* 1727 * Ok - we have the memory areas we should free on the vma list, 1728 * so release them, and do the vma updates. 1729 * 1730 * Called with the mm semaphore held. 1731 */ 1732static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) 1733{ 1734 /* Update high watermark before we lower total_vm */ 1735 update_hiwater_vm(mm); 1736 do { 1737 long nrpages = vma_pages(vma); 1738 1739 mm->total_vm -= nrpages; 1740 if (vma->vm_flags & VM_LOCKED) 1741 mm->locked_vm -= nrpages; 1742 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); 1743 vma = remove_vma(vma); 1744 } while (vma); 1745 validate_mm(mm); 1746} 1747 1748/* 1749 * Get rid of page table information in the indicated region. 1750 * 1751 * Called with the mm semaphore held. 1752 */ 1753static void unmap_region(struct mm_struct *mm, 1754 struct vm_area_struct *vma, struct vm_area_struct *prev, 1755 unsigned long start, unsigned long end) 1756{ 1757 struct vm_area_struct *next = prev? prev->vm_next: mm->mmap; 1758 struct mmu_gather *tlb; 1759 unsigned long nr_accounted = 0; 1760 1761 lru_add_drain(); 1762 tlb = tlb_gather_mmu(mm, 0); 1763 update_hiwater_rss(mm); 1764 unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); 1765 vm_unacct_memory(nr_accounted); 1766 free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, 1767 next? next->vm_start: 0); 1768 tlb_finish_mmu(tlb, start, end); 1769} 1770 1771/* 1772 * Create a list of vma's touched by the unmap, removing them from the mm's 1773 * vma list as we go.. 1774 */ 1775static void 1776detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, 1777 struct vm_area_struct *prev, unsigned long end) 1778{ 1779 struct vm_area_struct **insertion_point; 1780 struct vm_area_struct *tail_vma = NULL; 1781 unsigned long addr; 1782 1783 insertion_point = (prev ? &prev->vm_next : &mm->mmap); 1784 do { 1785 rb_erase(&vma->vm_rb, &mm->mm_rb); 1786 mm->map_count--; 1787 tail_vma = vma; 1788 vma = vma->vm_next; 1789 } while (vma && vma->vm_start < end); 1790 *insertion_point = vma; 1791 tail_vma->vm_next = NULL; 1792 if (mm->unmap_area == arch_unmap_area) 1793 addr = prev ? prev->vm_end : mm->mmap_base; 1794 else 1795 addr = vma ? vma->vm_start : mm->mmap_base; 1796 mm->unmap_area(mm, addr); 1797 mm->mmap_cache = NULL; /* Kill the cache. */ 1798} 1799 1800/* 1801 * Split a vma into two pieces at address 'addr', a new vma is allocated 1802 * either for the first part or the tail. 1803 */ 1804int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, 1805 unsigned long addr, int new_below) 1806{ 1807 struct mempolicy *pol; 1808 struct vm_area_struct *new; 1809 1810 if (is_vm_hugetlb_page(vma) && (addr & ~HPAGE_MASK)) 1811 return -EINVAL; 1812 1813 if (mm->map_count >= sysctl_max_map_count) 1814 return -ENOMEM; 1815 1816 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 1817 if (!new) 1818 return -ENOMEM; 1819 1820 /* most fields are the same, copy all, and then fixup */ 1821 *new = *vma; 1822 1823 if (new_below) 1824 new->vm_end = addr; 1825 else { 1826 new->vm_start = addr; 1827 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); 1828 } 1829 1830 pol = mpol_dup(vma_policy(vma)); 1831 if (IS_ERR(pol)) { 1832 kmem_cache_free(vm_area_cachep, new); 1833 return PTR_ERR(pol); 1834 } 1835 vma_set_policy(new, pol); 1836 1837 if (new->vm_file) { 1838 get_file(new->vm_file); 1839 if (vma->vm_flags & VM_EXECUTABLE) 1840 added_exe_file_vma(mm); 1841 } 1842 1843 if (new->vm_ops && new->vm_ops->open) 1844 new->vm_ops->open(new); 1845 1846 if (new_below) 1847 vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + 1848 ((addr - new->vm_start) >> PAGE_SHIFT), new); 1849 else 1850 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); 1851 1852 return 0; 1853} 1854 1855/* Munmap is split into 2 main parts -- this part which finds 1856 * what needs doing, and the areas themselves, which do the 1857 * work. This now handles partial unmappings. 1858 * Jeremy Fitzhardinge <jeremy@goop.org> 1859 */ 1860int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) 1861{ 1862 unsigned long end; 1863 struct vm_area_struct *vma, *prev, *last; 1864 1865 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) 1866 return -EINVAL; 1867 1868 if ((len = PAGE_ALIGN(len)) == 0) 1869 return -EINVAL; 1870 1871 /* Find the first overlapping VMA */ 1872 vma = find_vma_prev(mm, start, &prev); 1873 if (!vma) 1874 return 0; 1875 /* we have start < vma->vm_end */ 1876 1877 /* if it doesn't overlap, we have nothing.. */ 1878 end = start + len; 1879 if (vma->vm_start >= end) 1880 return 0; 1881 1882 /* 1883 * If we need to split any vma, do it now to save pain later. 1884 * 1885 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially 1886 * unmapped vm_area_struct will remain in use: so lower split_vma 1887 * places tmp vma above, and higher split_vma places tmp vma below. 1888 */ 1889 if (start > vma->vm_start) { 1890 int error = split_vma(mm, vma, start, 0); 1891 if (error) 1892 return error; 1893 prev = vma; 1894 } 1895 1896 /* Does it split the last one? */ 1897 last = find_vma(mm, end); 1898 if (last && end > last->vm_start) { 1899 int error = split_vma(mm, last, end, 1); 1900 if (error) 1901 return error; 1902 } 1903 vma = prev? prev->vm_next: mm->mmap; 1904 1905 /* 1906 * Remove the vma's, and unmap the actual pages 1907 */ 1908 detach_vmas_to_be_unmapped(mm, vma, prev, end); 1909 unmap_region(mm, vma, prev, start, end); 1910 1911 /* Fix up all other VM information */ 1912 remove_vma_list(mm, vma); 1913 1914 return 0; 1915} 1916 1917EXPORT_SYMBOL(do_munmap); 1918 1919asmlinkage long sys_munmap(unsigned long addr, size_t len) 1920{ 1921 int ret; 1922 struct mm_struct *mm = current->mm; 1923 1924 profile_munmap(addr); 1925 1926 down_write(&mm->mmap_sem); 1927 ret = do_munmap(mm, addr, len); 1928 up_write(&mm->mmap_sem); 1929 return ret; 1930} 1931 1932static inline void verify_mm_writelocked(struct mm_struct *mm) 1933{ 1934#ifdef CONFIG_DEBUG_VM 1935 if (unlikely(down_read_trylock(&mm->mmap_sem))) { 1936 WARN_ON(1); 1937 up_read(&mm->mmap_sem); 1938 } 1939#endif 1940} 1941 1942/* 1943 * this is really a simplified "do_mmap". it only handles 1944 * anonymous maps. eventually we may be able to do some 1945 * brk-specific accounting here. 1946 */ 1947unsigned long do_brk(unsigned long addr, unsigned long len) 1948{ 1949 struct mm_struct * mm = current->mm; 1950 struct vm_area_struct * vma, * prev; 1951 unsigned long flags; 1952 struct rb_node ** rb_link, * rb_parent; 1953 pgoff_t pgoff = addr >> PAGE_SHIFT; 1954 int error; 1955 1956 len = PAGE_ALIGN(len); 1957 if (!len) 1958 return addr; 1959 1960 if ((addr + len) > TASK_SIZE || (addr + len) < addr) 1961 return -EINVAL; 1962 1963 if (is_hugepage_only_range(mm, addr, len)) 1964 return -EINVAL; 1965 1966 error = security_file_mmap(NULL, 0, 0, 0, addr, 1); 1967 if (error) 1968 return error; 1969 1970 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 1971 1972 error = arch_mmap_check(addr, len, flags); 1973 if (error) 1974 return error; 1975 1976 /* 1977 * mlock MCL_FUTURE? 1978 */ 1979 if (mm->def_flags & VM_LOCKED) { 1980 unsigned long locked, lock_limit; 1981 locked = len >> PAGE_SHIFT; 1982 locked += mm->locked_vm; 1983 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 1984 lock_limit >>= PAGE_SHIFT; 1985 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 1986 return -EAGAIN; 1987 } 1988 1989 /* 1990 * mm->mmap_sem is required to protect against another thread 1991 * changing the mappings in case we sleep. 1992 */ 1993 verify_mm_writelocked(mm); 1994 1995 /* 1996 * Clear old maps. this also does some error checking for us 1997 */ 1998 munmap_back: 1999 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); 2000 if (vma && vma->vm_start < addr + len) { 2001 if (do_munmap(mm, addr, len)) 2002 return -ENOMEM; 2003 goto munmap_back; 2004 } 2005 2006 /* Check against address space limits *after* clearing old maps... */ 2007 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) 2008 return -ENOMEM; 2009 2010 if (mm->map_count > sysctl_max_map_count) 2011 return -ENOMEM; 2012 2013 if (security_vm_enough_memory(len >> PAGE_SHIFT)) 2014 return -ENOMEM; 2015 2016 /* Can we just expand an old private anonymous mapping? */ 2017 if (vma_merge(mm, prev, addr, addr + len, flags, 2018 NULL, NULL, pgoff, NULL)) 2019 goto out; 2020 2021 /* 2022 * create a vma struct for an anonymous mapping 2023 */ 2024 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 2025 if (!vma) { 2026 vm_unacct_memory(len >> PAGE_SHIFT); 2027 return -ENOMEM; 2028 } 2029 2030 vma->vm_mm = mm; 2031 vma->vm_start = addr; 2032 vma->vm_end = addr + len; 2033 vma->vm_pgoff = pgoff; 2034 vma->vm_flags = flags; 2035 vma->vm_page_prot = vm_get_page_prot(flags); 2036 vma_link(mm, vma, prev, rb_link, rb_parent); 2037out: 2038 mm->total_vm += len >> PAGE_SHIFT; 2039 if (flags & VM_LOCKED) { 2040 mm->locked_vm += len >> PAGE_SHIFT; 2041 make_pages_present(addr, addr + len); 2042 } 2043 return addr; 2044} 2045 2046EXPORT_SYMBOL(do_brk); 2047 2048/* Release all mmaps. */ 2049void exit_mmap(struct mm_struct *mm) 2050{ 2051 struct mmu_gather *tlb; 2052 struct vm_area_struct *vma = mm->mmap; 2053 unsigned long nr_accounted = 0; 2054 unsigned long end; 2055 2056 /* mm's last user has gone, and its about to be pulled down */ 2057 arch_exit_mmap(mm); 2058 2059 lru_add_drain(); 2060 flush_cache_mm(mm); 2061 tlb = tlb_gather_mmu(mm, 1); 2062 /* Don't update_hiwater_rss(mm) here, do_exit already did */ 2063 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 2064 end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); 2065 vm_unacct_memory(nr_accounted); 2066 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); 2067 tlb_finish_mmu(tlb, 0, end); 2068 2069 /* 2070 * Walk the list again, actually closing and freeing it, 2071 * with preemption enabled, without holding any MM locks. 2072 */ 2073 while (vma) 2074 vma = remove_vma(vma); 2075 2076 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); 2077} 2078 2079/* Insert vm structure into process list sorted by address 2080 * and into the inode's i_mmap tree. If vm_file is non-NULL 2081 * then i_mmap_lock is taken here. 2082 */ 2083int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) 2084{ 2085 struct vm_area_struct * __vma, * prev; 2086 struct rb_node ** rb_link, * rb_parent; 2087 2088 /* 2089 * The vm_pgoff of a purely anonymous vma should be irrelevant 2090 * until its first write fault, when page's anon_vma and index 2091 * are set. But now set the vm_pgoff it will almost certainly 2092 * end up with (unless mremap moves it elsewhere before that 2093 * first wfault), so /proc/pid/maps tells a consistent story. 2094 * 2095 * By setting it to reflect the virtual start address of the 2096 * vma, merges and splits can happen in a seamless way, just 2097 * using the existing file pgoff checks and manipulations. 2098 * Similarly in do_mmap_pgoff and in do_brk. 2099 */ 2100 if (!vma->vm_file) { 2101 BUG_ON(vma->anon_vma); 2102 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; 2103 } 2104 __vma = find_vma_prepare(mm,vma->vm_start,&prev,&rb_link,&rb_parent); 2105 if (__vma && __vma->vm_start < vma->vm_end) 2106 return -ENOMEM; 2107 if ((vma->vm_flags & VM_ACCOUNT) && 2108 security_vm_enough_memory_mm(mm, vma_pages(vma))) 2109 return -ENOMEM; 2110 vma_link(mm, vma, prev, rb_link, rb_parent); 2111 return 0; 2112} 2113 2114/* 2115 * Copy the vma structure to a new location in the same mm, 2116 * prior to moving page table entries, to effect an mremap move. 2117 */ 2118struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, 2119 unsigned long addr, unsigned long len, pgoff_t pgoff) 2120{ 2121 struct vm_area_struct *vma = *vmap; 2122 unsigned long vma_start = vma->vm_start; 2123 struct mm_struct *mm = vma->vm_mm; 2124 struct vm_area_struct *new_vma, *prev; 2125 struct rb_node **rb_link, *rb_parent; 2126 struct mempolicy *pol; 2127 2128 /* 2129 * If anonymous vma has not yet been faulted, update new pgoff 2130 * to match new location, to increase its chance of merging. 2131 */ 2132 if (!vma->vm_file && !vma->anon_vma) 2133 pgoff = addr >> PAGE_SHIFT; 2134 2135 find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); 2136 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, 2137 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma)); 2138 if (new_vma) { 2139 /* 2140 * Source vma may have been merged into new_vma 2141 */ 2142 if (vma_start >= new_vma->vm_start && 2143 vma_start < new_vma->vm_end) 2144 *vmap = new_vma; 2145 } else { 2146 new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); 2147 if (new_vma) { 2148 *new_vma = *vma; 2149 pol = mpol_dup(vma_policy(vma)); 2150 if (IS_ERR(pol)) { 2151 kmem_cache_free(vm_area_cachep, new_vma); 2152 return NULL; 2153 } 2154 vma_set_policy(new_vma, pol); 2155 new_vma->vm_start = addr; 2156 new_vma->vm_end = addr + len; 2157 new_vma->vm_pgoff = pgoff; 2158 if (new_vma->vm_file) { 2159 get_file(new_vma->vm_file); 2160 if (vma->vm_flags & VM_EXECUTABLE) 2161 added_exe_file_vma(mm); 2162 } 2163 if (new_vma->vm_ops && new_vma->vm_ops->open) 2164 new_vma->vm_ops->open(new_vma); 2165 vma_link(mm, new_vma, prev, rb_link, rb_parent); 2166 } 2167 } 2168 return new_vma; 2169} 2170 2171/* 2172 * Return true if the calling process may expand its vm space by the passed 2173 * number of pages 2174 */ 2175int may_expand_vm(struct mm_struct *mm, unsigned long npages) 2176{ 2177 unsigned long cur = mm->total_vm; /* pages */ 2178 unsigned long lim; 2179 2180 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; 2181 2182 if (cur + npages > lim) 2183 return 0; 2184 return 1; 2185} 2186 2187 2188static int special_mapping_fault(struct vm_area_struct *vma, 2189 struct vm_fault *vmf) 2190{ 2191 pgoff_t pgoff; 2192 struct page **pages; 2193 2194 /* 2195 * special mappings have no vm_file, and in that case, the mm 2196 * uses vm_pgoff internally. So we have to subtract it from here. 2197 * We are allowed to do this because we are the mm; do not copy 2198 * this code into drivers! 2199 */ 2200 pgoff = vmf->pgoff - vma->vm_pgoff; 2201 2202 for (pages = vma->vm_private_data; pgoff && *pages; ++pages) 2203 pgoff--; 2204 2205 if (*pages) { 2206 struct page *page = *pages; 2207 get_page(page); 2208 vmf->page = page; 2209 return 0; 2210 } 2211 2212 return VM_FAULT_SIGBUS; 2213} 2214 2215/* 2216 * Having a close hook prevents vma merging regardless of flags. 2217 */ 2218static void special_mapping_close(struct vm_area_struct *vma) 2219{ 2220} 2221 2222static struct vm_operations_struct special_mapping_vmops = { 2223 .close = special_mapping_close, 2224 .fault = special_mapping_fault, 2225}; 2226 2227/* 2228 * Called with mm->mmap_sem held for writing. 2229 * Insert a new vma covering the given region, with the given flags. 2230 * Its pages are supplied by the given array of struct page *. 2231 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated. 2232 * The region past the last page supplied will always produce SIGBUS. 2233 * The array pointer and the pages it points to are assumed to stay alive 2234 * for as long as this mapping might exist. 2235 */ 2236int install_special_mapping(struct mm_struct *mm, 2237 unsigned long addr, unsigned long len, 2238 unsigned long vm_flags, struct page **pages) 2239{ 2240 struct vm_area_struct *vma; 2241 2242 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 2243 if (unlikely(vma == NULL)) 2244 return -ENOMEM; 2245 2246 vma->vm_mm = mm; 2247 vma->vm_start = addr; 2248 vma->vm_end = addr + len; 2249 2250 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; 2251 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 2252 2253 vma->vm_ops = &special_mapping_vmops; 2254 vma->vm_private_data = pages; 2255 2256 if (unlikely(insert_vm_struct(mm, vma))) { 2257 kmem_cache_free(vm_area_cachep, vma); 2258 return -ENOMEM; 2259 } 2260 2261 mm->total_vm += len >> PAGE_SHIFT; 2262 2263 return 0; 2264} 2265