mlock.c revision 3480b25743cb7404928d57efeaa3d085708b04c2
1/* 2 * linux/mm/mlock.c 3 * 4 * (C) Copyright 1995 Linus Torvalds 5 * (C) Copyright 2002 Christoph Hellwig 6 */ 7 8#include <linux/capability.h> 9#include <linux/mman.h> 10#include <linux/mm.h> 11#include <linux/swap.h> 12#include <linux/swapops.h> 13#include <linux/pagemap.h> 14#include <linux/mempolicy.h> 15#include <linux/syscalls.h> 16#include <linux/sched.h> 17#include <linux/module.h> 18#include <linux/rmap.h> 19#include <linux/mmzone.h> 20#include <linux/hugetlb.h> 21 22#include "internal.h" 23 24int can_do_mlock(void) 25{ 26 if (capable(CAP_IPC_LOCK)) 27 return 1; 28 if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0) 29 return 1; 30 return 0; 31} 32EXPORT_SYMBOL(can_do_mlock); 33 34#ifdef CONFIG_UNEVICTABLE_LRU 35/* 36 * Mlocked pages are marked with PageMlocked() flag for efficient testing 37 * in vmscan and, possibly, the fault path; and to support semi-accurate 38 * statistics. 39 * 40 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will 41 * be placed on the LRU "unevictable" list, rather than the [in]active lists. 42 * The unevictable list is an LRU sibling list to the [in]active lists. 43 * PageUnevictable is set to indicate the unevictable state. 44 * 45 * When lazy mlocking via vmscan, it is important to ensure that the 46 * vma's VM_LOCKED status is not concurrently being modified, otherwise we 47 * may have mlocked a page that is being munlocked. So lazy mlock must take 48 * the mmap_sem for read, and verify that the vma really is locked 49 * (see mm/rmap.c). 50 */ 51 52/* 53 * LRU accounting for clear_page_mlock() 54 */ 55void __clear_page_mlock(struct page *page) 56{ 57 VM_BUG_ON(!PageLocked(page)); 58 59 if (!page->mapping) { /* truncated ? */ 60 return; 61 } 62 63 dec_zone_page_state(page, NR_MLOCK); 64 count_vm_event(UNEVICTABLE_PGCLEARED); 65 if (!isolate_lru_page(page)) { 66 putback_lru_page(page); 67 } else { 68 /* 69 * We lost the race. the page already moved to evictable list. 70 */ 71 if (PageUnevictable(page)) 72 count_vm_event(UNEVICTABLE_PGSTRANDED); 73 } 74} 75 76/* 77 * Mark page as mlocked if not already. 78 * If page on LRU, isolate and putback to move to unevictable list. 79 */ 80void mlock_vma_page(struct page *page) 81{ 82 BUG_ON(!PageLocked(page)); 83 84 if (!TestSetPageMlocked(page)) { 85 inc_zone_page_state(page, NR_MLOCK); 86 count_vm_event(UNEVICTABLE_PGMLOCKED); 87 if (!isolate_lru_page(page)) 88 putback_lru_page(page); 89 } 90} 91 92/* 93 * called from munlock()/munmap() path with page supposedly on the LRU. 94 * 95 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked 96 * [in try_to_munlock()] and then attempt to isolate the page. We must 97 * isolate the page to keep others from messing with its unevictable 98 * and mlocked state while trying to munlock. However, we pre-clear the 99 * mlocked state anyway as we might lose the isolation race and we might 100 * not get another chance to clear PageMlocked. If we successfully 101 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas 102 * mapping the page, it will restore the PageMlocked state, unless the page 103 * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(), 104 * perhaps redundantly. 105 * If we lose the isolation race, and the page is mapped by other VM_LOCKED 106 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() 107 * either of which will restore the PageMlocked state by calling 108 * mlock_vma_page() above, if it can grab the vma's mmap sem. 109 */ 110static void munlock_vma_page(struct page *page) 111{ 112 BUG_ON(!PageLocked(page)); 113 114 if (TestClearPageMlocked(page)) { 115 dec_zone_page_state(page, NR_MLOCK); 116 if (!isolate_lru_page(page)) { 117 int ret = try_to_munlock(page); 118 /* 119 * did try_to_unlock() succeed or punt? 120 */ 121 if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN) 122 count_vm_event(UNEVICTABLE_PGMUNLOCKED); 123 124 putback_lru_page(page); 125 } else { 126 /* 127 * We lost the race. let try_to_unmap() deal 128 * with it. At least we get the page state and 129 * mlock stats right. However, page is still on 130 * the noreclaim list. We'll fix that up when 131 * the page is eventually freed or we scan the 132 * noreclaim list. 133 */ 134 if (PageUnevictable(page)) 135 count_vm_event(UNEVICTABLE_PGSTRANDED); 136 else 137 count_vm_event(UNEVICTABLE_PGMUNLOCKED); 138 } 139 } 140} 141 142/** 143 * __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma. 144 * @vma: target vma 145 * @start: start address 146 * @end: end address 147 * @mlock: 0 indicate munlock, otherwise mlock. 148 * 149 * If @mlock == 0, unlock an mlocked range; 150 * else mlock the range of pages. This takes care of making the pages present , 151 * too. 152 * 153 * return 0 on success, negative error code on error. 154 * 155 * vma->vm_mm->mmap_sem must be held for at least read. 156 */ 157static long __mlock_vma_pages_range(struct vm_area_struct *vma, 158 unsigned long start, unsigned long end, 159 int mlock) 160{ 161 struct mm_struct *mm = vma->vm_mm; 162 unsigned long addr = start; 163 struct page *pages[16]; /* 16 gives a reasonable batch */ 164 int nr_pages = (end - start) / PAGE_SIZE; 165 int ret = 0; 166 int gup_flags = 0; 167 168 VM_BUG_ON(start & ~PAGE_MASK); 169 VM_BUG_ON(end & ~PAGE_MASK); 170 VM_BUG_ON(start < vma->vm_start); 171 VM_BUG_ON(end > vma->vm_end); 172 VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) && 173 (atomic_read(&mm->mm_users) != 0)); 174 175 /* 176 * mlock: don't page populate if vma has PROT_NONE permission. 177 * munlock: always do munlock although the vma has PROT_NONE 178 * permission, or SIGKILL is pending. 179 */ 180 if (!mlock) 181 gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS | 182 GUP_FLAGS_IGNORE_SIGKILL; 183 184 if (vma->vm_flags & VM_WRITE) 185 gup_flags |= GUP_FLAGS_WRITE; 186 187 while (nr_pages > 0) { 188 int i; 189 190 cond_resched(); 191 192 /* 193 * get_user_pages makes pages present if we are 194 * setting mlock. and this extra reference count will 195 * disable migration of this page. However, page may 196 * still be truncated out from under us. 197 */ 198 ret = __get_user_pages(current, mm, addr, 199 min_t(int, nr_pages, ARRAY_SIZE(pages)), 200 gup_flags, pages, NULL); 201 /* 202 * This can happen for, e.g., VM_NONLINEAR regions before 203 * a page has been allocated and mapped at a given offset, 204 * or for addresses that map beyond end of a file. 205 * We'll mlock the the pages if/when they get faulted in. 206 */ 207 if (ret < 0) 208 break; 209 if (ret == 0) { 210 /* 211 * We know the vma is there, so the only time 212 * we cannot get a single page should be an 213 * error (ret < 0) case. 214 */ 215 WARN_ON(1); 216 break; 217 } 218 219 lru_add_drain(); /* push cached pages to LRU */ 220 221 for (i = 0; i < ret; i++) { 222 struct page *page = pages[i]; 223 224 lock_page(page); 225 /* 226 * Because we lock page here and migration is blocked 227 * by the elevated reference, we need only check for 228 * page truncation (file-cache only). 229 */ 230 if (page->mapping) { 231 if (mlock) 232 mlock_vma_page(page); 233 else 234 munlock_vma_page(page); 235 } 236 unlock_page(page); 237 put_page(page); /* ref from get_user_pages() */ 238 239 /* 240 * here we assume that get_user_pages() has given us 241 * a list of virtually contiguous pages. 242 */ 243 addr += PAGE_SIZE; /* for next get_user_pages() */ 244 nr_pages--; 245 } 246 ret = 0; 247 } 248 249 return ret; /* count entire vma as locked_vm */ 250} 251 252/* 253 * convert get_user_pages() return value to posix mlock() error 254 */ 255static int __mlock_posix_error_return(long retval) 256{ 257 if (retval == -EFAULT) 258 retval = -ENOMEM; 259 else if (retval == -ENOMEM) 260 retval = -EAGAIN; 261 return retval; 262} 263 264#else /* CONFIG_UNEVICTABLE_LRU */ 265 266/* 267 * Just make pages present if VM_LOCKED. No-op if unlocking. 268 */ 269static long __mlock_vma_pages_range(struct vm_area_struct *vma, 270 unsigned long start, unsigned long end, 271 int mlock) 272{ 273 if (mlock && (vma->vm_flags & VM_LOCKED)) 274 return make_pages_present(start, end); 275 return 0; 276} 277 278static inline int __mlock_posix_error_return(long retval) 279{ 280 return 0; 281} 282 283#endif /* CONFIG_UNEVICTABLE_LRU */ 284 285/** 286 * mlock_vma_pages_range() - mlock pages in specified vma range. 287 * @vma - the vma containing the specfied address range 288 * @start - starting address in @vma to mlock 289 * @end - end address [+1] in @vma to mlock 290 * 291 * For mmap()/mremap()/expansion of mlocked vma. 292 * 293 * return 0 on success for "normal" vmas. 294 * 295 * return number of pages [> 0] to be removed from locked_vm on success 296 * of "special" vmas. 297 * 298 * return negative error if vma spanning @start-@range disappears while 299 * mmap semaphore is dropped. Unlikely? 300 */ 301long mlock_vma_pages_range(struct vm_area_struct *vma, 302 unsigned long start, unsigned long end) 303{ 304 struct mm_struct *mm = vma->vm_mm; 305 int nr_pages = (end - start) / PAGE_SIZE; 306 BUG_ON(!(vma->vm_flags & VM_LOCKED)); 307 308 /* 309 * filter unlockable vmas 310 */ 311 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 312 goto no_mlock; 313 314 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 315 is_vm_hugetlb_page(vma) || 316 vma == get_gate_vma(current))) { 317 long error; 318 downgrade_write(&mm->mmap_sem); 319 320 error = __mlock_vma_pages_range(vma, start, end, 1); 321 322 up_read(&mm->mmap_sem); 323 /* vma can change or disappear */ 324 down_write(&mm->mmap_sem); 325 vma = find_vma(mm, start); 326 /* non-NULL vma must contain @start, but need to check @end */ 327 if (!vma || end > vma->vm_end) 328 return -ENOMEM; 329 330 return 0; /* hide other errors from mmap(), et al */ 331 } 332 333 /* 334 * User mapped kernel pages or huge pages: 335 * make these pages present to populate the ptes, but 336 * fall thru' to reset VM_LOCKED--no need to unlock, and 337 * return nr_pages so these don't get counted against task's 338 * locked limit. huge pages are already counted against 339 * locked vm limit. 340 */ 341 make_pages_present(start, end); 342 343no_mlock: 344 vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */ 345 return nr_pages; /* error or pages NOT mlocked */ 346} 347 348 349/* 350 * munlock_vma_pages_range() - munlock all pages in the vma range.' 351 * @vma - vma containing range to be munlock()ed. 352 * @start - start address in @vma of the range 353 * @end - end of range in @vma. 354 * 355 * For mremap(), munmap() and exit(). 356 * 357 * Called with @vma VM_LOCKED. 358 * 359 * Returns with VM_LOCKED cleared. Callers must be prepared to 360 * deal with this. 361 * 362 * We don't save and restore VM_LOCKED here because pages are 363 * still on lru. In unmap path, pages might be scanned by reclaim 364 * and re-mlocked by try_to_{munlock|unmap} before we unmap and 365 * free them. This will result in freeing mlocked pages. 366 */ 367void munlock_vma_pages_range(struct vm_area_struct *vma, 368 unsigned long start, unsigned long end) 369{ 370 vma->vm_flags &= ~VM_LOCKED; 371 __mlock_vma_pages_range(vma, start, end, 0); 372} 373 374/* 375 * mlock_fixup - handle mlock[all]/munlock[all] requests. 376 * 377 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and 378 * munlock is a no-op. However, for some special vmas, we go ahead and 379 * populate the ptes via make_pages_present(). 380 * 381 * For vmas that pass the filters, merge/split as appropriate. 382 */ 383static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, 384 unsigned long start, unsigned long end, unsigned int newflags) 385{ 386 struct mm_struct *mm = vma->vm_mm; 387 pgoff_t pgoff; 388 int nr_pages; 389 int ret = 0; 390 int lock = newflags & VM_LOCKED; 391 392 if (newflags == vma->vm_flags || 393 (vma->vm_flags & (VM_IO | VM_PFNMAP))) 394 goto out; /* don't set VM_LOCKED, don't count */ 395 396 if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 397 is_vm_hugetlb_page(vma) || 398 vma == get_gate_vma(current)) { 399 if (lock) 400 make_pages_present(start, end); 401 goto out; /* don't set VM_LOCKED, don't count */ 402 } 403 404 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 405 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, 406 vma->vm_file, pgoff, vma_policy(vma)); 407 if (*prev) { 408 vma = *prev; 409 goto success; 410 } 411 412 if (start != vma->vm_start) { 413 ret = split_vma(mm, vma, start, 1); 414 if (ret) 415 goto out; 416 } 417 418 if (end != vma->vm_end) { 419 ret = split_vma(mm, vma, end, 0); 420 if (ret) 421 goto out; 422 } 423 424success: 425 /* 426 * Keep track of amount of locked VM. 427 */ 428 nr_pages = (end - start) >> PAGE_SHIFT; 429 if (!lock) 430 nr_pages = -nr_pages; 431 mm->locked_vm += nr_pages; 432 433 /* 434 * vm_flags is protected by the mmap_sem held in write mode. 435 * It's okay if try_to_unmap_one unmaps a page just after we 436 * set VM_LOCKED, __mlock_vma_pages_range will bring it back. 437 */ 438 vma->vm_flags = newflags; 439 440 if (lock) { 441 /* 442 * mmap_sem is currently held for write. Downgrade the write 443 * lock to a read lock so that other faults, mmap scans, ... 444 * while we fault in all pages. 445 */ 446 downgrade_write(&mm->mmap_sem); 447 448 ret = __mlock_vma_pages_range(vma, start, end, 1); 449 450 /* 451 * Need to reacquire mmap sem in write mode, as our callers 452 * expect this. We have no support for atomically upgrading 453 * a sem to write, so we need to check for ranges while sem 454 * is unlocked. 455 */ 456 up_read(&mm->mmap_sem); 457 /* vma can change or disappear */ 458 down_write(&mm->mmap_sem); 459 *prev = find_vma(mm, start); 460 /* non-NULL *prev must contain @start, but need to check @end */ 461 if (!(*prev) || end > (*prev)->vm_end) 462 ret = -ENOMEM; 463 else if (ret > 0) { 464 mm->locked_vm -= ret; 465 ret = 0; 466 } else 467 ret = __mlock_posix_error_return(ret); /* translate if needed */ 468 } else { 469 /* 470 * TODO: for unlocking, pages will already be resident, so 471 * we don't need to wait for allocations/reclaim/pagein, ... 472 * However, unlocking a very large region can still take a 473 * while. Should we downgrade the semaphore for both lock 474 * AND unlock ? 475 */ 476 __mlock_vma_pages_range(vma, start, end, 0); 477 } 478 479out: 480 *prev = vma; 481 return ret; 482} 483 484static int do_mlock(unsigned long start, size_t len, int on) 485{ 486 unsigned long nstart, end, tmp; 487 struct vm_area_struct * vma, * prev; 488 int error; 489 490 len = PAGE_ALIGN(len); 491 end = start + len; 492 if (end < start) 493 return -EINVAL; 494 if (end == start) 495 return 0; 496 vma = find_vma_prev(current->mm, start, &prev); 497 if (!vma || vma->vm_start > start) 498 return -ENOMEM; 499 500 if (start > vma->vm_start) 501 prev = vma; 502 503 for (nstart = start ; ; ) { 504 unsigned int newflags; 505 506 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 507 508 newflags = vma->vm_flags | VM_LOCKED; 509 if (!on) 510 newflags &= ~VM_LOCKED; 511 512 tmp = vma->vm_end; 513 if (tmp > end) 514 tmp = end; 515 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); 516 if (error) 517 break; 518 nstart = tmp; 519 if (nstart < prev->vm_end) 520 nstart = prev->vm_end; 521 if (nstart >= end) 522 break; 523 524 vma = prev->vm_next; 525 if (!vma || vma->vm_start != nstart) { 526 error = -ENOMEM; 527 break; 528 } 529 } 530 return error; 531} 532 533SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) 534{ 535 unsigned long locked; 536 unsigned long lock_limit; 537 int error = -ENOMEM; 538 539 if (!can_do_mlock()) 540 return -EPERM; 541 542 lru_add_drain_all(); /* flush pagevec */ 543 544 down_write(¤t->mm->mmap_sem); 545 len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); 546 start &= PAGE_MASK; 547 548 locked = len >> PAGE_SHIFT; 549 locked += current->mm->locked_vm; 550 551 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 552 lock_limit >>= PAGE_SHIFT; 553 554 /* check against resource limits */ 555 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) 556 error = do_mlock(start, len, 1); 557 up_write(¤t->mm->mmap_sem); 558 return error; 559} 560 561SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) 562{ 563 int ret; 564 565 down_write(¤t->mm->mmap_sem); 566 len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); 567 start &= PAGE_MASK; 568 ret = do_mlock(start, len, 0); 569 up_write(¤t->mm->mmap_sem); 570 return ret; 571} 572 573static int do_mlockall(int flags) 574{ 575 struct vm_area_struct * vma, * prev = NULL; 576 unsigned int def_flags = 0; 577 578 if (flags & MCL_FUTURE) 579 def_flags = VM_LOCKED; 580 current->mm->def_flags = def_flags; 581 if (flags == MCL_FUTURE) 582 goto out; 583 584 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { 585 unsigned int newflags; 586 587 newflags = vma->vm_flags | VM_LOCKED; 588 if (!(flags & MCL_CURRENT)) 589 newflags &= ~VM_LOCKED; 590 591 /* Ignore errors */ 592 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 593 } 594out: 595 return 0; 596} 597 598SYSCALL_DEFINE1(mlockall, int, flags) 599{ 600 unsigned long lock_limit; 601 int ret = -EINVAL; 602 603 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) 604 goto out; 605 606 ret = -EPERM; 607 if (!can_do_mlock()) 608 goto out; 609 610 lru_add_drain_all(); /* flush pagevec */ 611 612 down_write(¤t->mm->mmap_sem); 613 614 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 615 lock_limit >>= PAGE_SHIFT; 616 617 ret = -ENOMEM; 618 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || 619 capable(CAP_IPC_LOCK)) 620 ret = do_mlockall(flags); 621 up_write(¤t->mm->mmap_sem); 622out: 623 return ret; 624} 625 626SYSCALL_DEFINE0(munlockall) 627{ 628 int ret; 629 630 down_write(¤t->mm->mmap_sem); 631 ret = do_mlockall(0); 632 up_write(¤t->mm->mmap_sem); 633 return ret; 634} 635 636/* 637 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB 638 * shm segments) get accounted against the user_struct instead. 639 */ 640static DEFINE_SPINLOCK(shmlock_user_lock); 641 642int user_shm_lock(size_t size, struct user_struct *user) 643{ 644 unsigned long lock_limit, locked; 645 int allowed = 0; 646 647 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 648 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 649 if (lock_limit == RLIM_INFINITY) 650 allowed = 1; 651 lock_limit >>= PAGE_SHIFT; 652 spin_lock(&shmlock_user_lock); 653 if (!allowed && 654 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) 655 goto out; 656 get_uid(user); 657 user->locked_shm += locked; 658 allowed = 1; 659out: 660 spin_unlock(&shmlock_user_lock); 661 return allowed; 662} 663 664void user_shm_unlock(size_t size, struct user_struct *user) 665{ 666 spin_lock(&shmlock_user_lock); 667 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 668 spin_unlock(&shmlock_user_lock); 669 free_uid(user); 670} 671 672void *alloc_locked_buffer(size_t size) 673{ 674 unsigned long rlim, vm, pgsz; 675 void *buffer = NULL; 676 677 pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; 678 679 down_write(¤t->mm->mmap_sem); 680 681 rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; 682 vm = current->mm->total_vm + pgsz; 683 if (rlim < vm) 684 goto out; 685 686 rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; 687 vm = current->mm->locked_vm + pgsz; 688 if (rlim < vm) 689 goto out; 690 691 buffer = kzalloc(size, GFP_KERNEL); 692 if (!buffer) 693 goto out; 694 695 current->mm->total_vm += pgsz; 696 current->mm->locked_vm += pgsz; 697 698 out: 699 up_write(¤t->mm->mmap_sem); 700 return buffer; 701} 702 703void free_locked_buffer(void *buffer, size_t size) 704{ 705 unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT; 706 707 down_write(¤t->mm->mmap_sem); 708 709 current->mm->total_vm -= pgsz; 710 current->mm->locked_vm -= pgsz; 711 712 up_write(¤t->mm->mmap_sem); 713 714 kfree(buffer); 715} 716