mlock.c revision 5344b7e648980cc2ca613ec03a56a8222ff48820
1/* 2 * linux/mm/mlock.c 3 * 4 * (C) Copyright 1995 Linus Torvalds 5 * (C) Copyright 2002 Christoph Hellwig 6 */ 7 8#include <linux/capability.h> 9#include <linux/mman.h> 10#include <linux/mm.h> 11#include <linux/swap.h> 12#include <linux/swapops.h> 13#include <linux/pagemap.h> 14#include <linux/mempolicy.h> 15#include <linux/syscalls.h> 16#include <linux/sched.h> 17#include <linux/module.h> 18#include <linux/rmap.h> 19#include <linux/mmzone.h> 20#include <linux/hugetlb.h> 21 22#include "internal.h" 23 24int can_do_mlock(void) 25{ 26 if (capable(CAP_IPC_LOCK)) 27 return 1; 28 if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0) 29 return 1; 30 return 0; 31} 32EXPORT_SYMBOL(can_do_mlock); 33 34#ifdef CONFIG_UNEVICTABLE_LRU 35/* 36 * Mlocked pages are marked with PageMlocked() flag for efficient testing 37 * in vmscan and, possibly, the fault path; and to support semi-accurate 38 * statistics. 39 * 40 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will 41 * be placed on the LRU "unevictable" list, rather than the [in]active lists. 42 * The unevictable list is an LRU sibling list to the [in]active lists. 43 * PageUnevictable is set to indicate the unevictable state. 44 * 45 * When lazy mlocking via vmscan, it is important to ensure that the 46 * vma's VM_LOCKED status is not concurrently being modified, otherwise we 47 * may have mlocked a page that is being munlocked. So lazy mlock must take 48 * the mmap_sem for read, and verify that the vma really is locked 49 * (see mm/rmap.c). 50 */ 51 52/* 53 * LRU accounting for clear_page_mlock() 54 */ 55void __clear_page_mlock(struct page *page) 56{ 57 VM_BUG_ON(!PageLocked(page)); 58 59 if (!page->mapping) { /* truncated ? */ 60 return; 61 } 62 63 dec_zone_page_state(page, NR_MLOCK); 64 count_vm_event(UNEVICTABLE_PGCLEARED); 65 if (!isolate_lru_page(page)) { 66 putback_lru_page(page); 67 } else { 68 /* 69 * Page not on the LRU yet. Flush all pagevecs and retry. 70 */ 71 lru_add_drain_all(); 72 if (!isolate_lru_page(page)) 73 putback_lru_page(page); 74 else if (PageUnevictable(page)) 75 count_vm_event(UNEVICTABLE_PGSTRANDED); 76 77 } 78} 79 80/* 81 * Mark page as mlocked if not already. 82 * If page on LRU, isolate and putback to move to unevictable list. 83 */ 84void mlock_vma_page(struct page *page) 85{ 86 BUG_ON(!PageLocked(page)); 87 88 if (!TestSetPageMlocked(page)) { 89 inc_zone_page_state(page, NR_MLOCK); 90 count_vm_event(UNEVICTABLE_PGMLOCKED); 91 if (!isolate_lru_page(page)) 92 putback_lru_page(page); 93 } 94} 95 96/* 97 * called from munlock()/munmap() path with page supposedly on the LRU. 98 * 99 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked 100 * [in try_to_munlock()] and then attempt to isolate the page. We must 101 * isolate the page to keep others from messing with its unevictable 102 * and mlocked state while trying to munlock. However, we pre-clear the 103 * mlocked state anyway as we might lose the isolation race and we might 104 * not get another chance to clear PageMlocked. If we successfully 105 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas 106 * mapping the page, it will restore the PageMlocked state, unless the page 107 * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(), 108 * perhaps redundantly. 109 * If we lose the isolation race, and the page is mapped by other VM_LOCKED 110 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() 111 * either of which will restore the PageMlocked state by calling 112 * mlock_vma_page() above, if it can grab the vma's mmap sem. 113 */ 114static void munlock_vma_page(struct page *page) 115{ 116 BUG_ON(!PageLocked(page)); 117 118 if (TestClearPageMlocked(page)) { 119 dec_zone_page_state(page, NR_MLOCK); 120 if (!isolate_lru_page(page)) { 121 int ret = try_to_munlock(page); 122 /* 123 * did try_to_unlock() succeed or punt? 124 */ 125 if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN) 126 count_vm_event(UNEVICTABLE_PGMUNLOCKED); 127 128 putback_lru_page(page); 129 } else { 130 /* 131 * We lost the race. let try_to_unmap() deal 132 * with it. At least we get the page state and 133 * mlock stats right. However, page is still on 134 * the noreclaim list. We'll fix that up when 135 * the page is eventually freed or we scan the 136 * noreclaim list. 137 */ 138 if (PageUnevictable(page)) 139 count_vm_event(UNEVICTABLE_PGSTRANDED); 140 else 141 count_vm_event(UNEVICTABLE_PGMUNLOCKED); 142 } 143 } 144} 145 146/** 147 * __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma. 148 * @vma: target vma 149 * @start: start address 150 * @end: end address 151 * @mlock: 0 indicate munlock, otherwise mlock. 152 * 153 * If @mlock == 0, unlock an mlocked range; 154 * else mlock the range of pages. This takes care of making the pages present , 155 * too. 156 * 157 * return 0 on success, negative error code on error. 158 * 159 * vma->vm_mm->mmap_sem must be held for at least read. 160 */ 161static long __mlock_vma_pages_range(struct vm_area_struct *vma, 162 unsigned long start, unsigned long end, 163 int mlock) 164{ 165 struct mm_struct *mm = vma->vm_mm; 166 unsigned long addr = start; 167 struct page *pages[16]; /* 16 gives a reasonable batch */ 168 int nr_pages = (end - start) / PAGE_SIZE; 169 int ret; 170 int gup_flags = 0; 171 172 VM_BUG_ON(start & ~PAGE_MASK); 173 VM_BUG_ON(end & ~PAGE_MASK); 174 VM_BUG_ON(start < vma->vm_start); 175 VM_BUG_ON(end > vma->vm_end); 176 VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) && 177 (atomic_read(&mm->mm_users) != 0)); 178 179 /* 180 * mlock: don't page populate if page has PROT_NONE permission. 181 * munlock: the pages always do munlock althrough 182 * its has PROT_NONE permission. 183 */ 184 if (!mlock) 185 gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS; 186 187 if (vma->vm_flags & VM_WRITE) 188 gup_flags |= GUP_FLAGS_WRITE; 189 190 lru_add_drain_all(); /* push cached pages to LRU */ 191 192 while (nr_pages > 0) { 193 int i; 194 195 cond_resched(); 196 197 /* 198 * get_user_pages makes pages present if we are 199 * setting mlock. and this extra reference count will 200 * disable migration of this page. However, page may 201 * still be truncated out from under us. 202 */ 203 ret = __get_user_pages(current, mm, addr, 204 min_t(int, nr_pages, ARRAY_SIZE(pages)), 205 gup_flags, pages, NULL); 206 /* 207 * This can happen for, e.g., VM_NONLINEAR regions before 208 * a page has been allocated and mapped at a given offset, 209 * or for addresses that map beyond end of a file. 210 * We'll mlock the the pages if/when they get faulted in. 211 */ 212 if (ret < 0) 213 break; 214 if (ret == 0) { 215 /* 216 * We know the vma is there, so the only time 217 * we cannot get a single page should be an 218 * error (ret < 0) case. 219 */ 220 WARN_ON(1); 221 break; 222 } 223 224 lru_add_drain(); /* push cached pages to LRU */ 225 226 for (i = 0; i < ret; i++) { 227 struct page *page = pages[i]; 228 229 lock_page(page); 230 /* 231 * Because we lock page here and migration is blocked 232 * by the elevated reference, we need only check for 233 * page truncation (file-cache only). 234 */ 235 if (page->mapping) { 236 if (mlock) 237 mlock_vma_page(page); 238 else 239 munlock_vma_page(page); 240 } 241 unlock_page(page); 242 put_page(page); /* ref from get_user_pages() */ 243 244 /* 245 * here we assume that get_user_pages() has given us 246 * a list of virtually contiguous pages. 247 */ 248 addr += PAGE_SIZE; /* for next get_user_pages() */ 249 nr_pages--; 250 } 251 } 252 253 lru_add_drain_all(); /* to update stats */ 254 255 return 0; /* count entire vma as locked_vm */ 256} 257 258#else /* CONFIG_UNEVICTABLE_LRU */ 259 260/* 261 * Just make pages present if VM_LOCKED. No-op if unlocking. 262 */ 263static long __mlock_vma_pages_range(struct vm_area_struct *vma, 264 unsigned long start, unsigned long end, 265 int mlock) 266{ 267 if (mlock && (vma->vm_flags & VM_LOCKED)) 268 make_pages_present(start, end); 269 return 0; 270} 271#endif /* CONFIG_UNEVICTABLE_LRU */ 272 273/** 274 * mlock_vma_pages_range() - mlock pages in specified vma range. 275 * @vma - the vma containing the specfied address range 276 * @start - starting address in @vma to mlock 277 * @end - end address [+1] in @vma to mlock 278 * 279 * For mmap()/mremap()/expansion of mlocked vma. 280 * 281 * return 0 on success for "normal" vmas. 282 * 283 * return number of pages [> 0] to be removed from locked_vm on success 284 * of "special" vmas. 285 * 286 * return negative error if vma spanning @start-@range disappears while 287 * mmap semaphore is dropped. Unlikely? 288 */ 289long mlock_vma_pages_range(struct vm_area_struct *vma, 290 unsigned long start, unsigned long end) 291{ 292 struct mm_struct *mm = vma->vm_mm; 293 int nr_pages = (end - start) / PAGE_SIZE; 294 BUG_ON(!(vma->vm_flags & VM_LOCKED)); 295 296 /* 297 * filter unlockable vmas 298 */ 299 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) 300 goto no_mlock; 301 302 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 303 is_vm_hugetlb_page(vma) || 304 vma == get_gate_vma(current))) { 305 long error; 306 downgrade_write(&mm->mmap_sem); 307 308 error = __mlock_vma_pages_range(vma, start, end, 1); 309 310 up_read(&mm->mmap_sem); 311 /* vma can change or disappear */ 312 down_write(&mm->mmap_sem); 313 vma = find_vma(mm, start); 314 /* non-NULL vma must contain @start, but need to check @end */ 315 if (!vma || end > vma->vm_end) 316 return -ENOMEM; 317 318 return 0; /* hide other errors from mmap(), et al */ 319 } 320 321 /* 322 * User mapped kernel pages or huge pages: 323 * make these pages present to populate the ptes, but 324 * fall thru' to reset VM_LOCKED--no need to unlock, and 325 * return nr_pages so these don't get counted against task's 326 * locked limit. huge pages are already counted against 327 * locked vm limit. 328 */ 329 make_pages_present(start, end); 330 331no_mlock: 332 vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */ 333 return nr_pages; /* error or pages NOT mlocked */ 334} 335 336 337/* 338 * munlock_vma_pages_range() - munlock all pages in the vma range.' 339 * @vma - vma containing range to be munlock()ed. 340 * @start - start address in @vma of the range 341 * @end - end of range in @vma. 342 * 343 * For mremap(), munmap() and exit(). 344 * 345 * Called with @vma VM_LOCKED. 346 * 347 * Returns with VM_LOCKED cleared. Callers must be prepared to 348 * deal with this. 349 * 350 * We don't save and restore VM_LOCKED here because pages are 351 * still on lru. In unmap path, pages might be scanned by reclaim 352 * and re-mlocked by try_to_{munlock|unmap} before we unmap and 353 * free them. This will result in freeing mlocked pages. 354 */ 355void munlock_vma_pages_range(struct vm_area_struct *vma, 356 unsigned long start, unsigned long end) 357{ 358 vma->vm_flags &= ~VM_LOCKED; 359 __mlock_vma_pages_range(vma, start, end, 0); 360} 361 362/* 363 * mlock_fixup - handle mlock[all]/munlock[all] requests. 364 * 365 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and 366 * munlock is a no-op. However, for some special vmas, we go ahead and 367 * populate the ptes via make_pages_present(). 368 * 369 * For vmas that pass the filters, merge/split as appropriate. 370 */ 371static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, 372 unsigned long start, unsigned long end, unsigned int newflags) 373{ 374 struct mm_struct *mm = vma->vm_mm; 375 pgoff_t pgoff; 376 int nr_pages; 377 int ret = 0; 378 int lock = newflags & VM_LOCKED; 379 380 if (newflags == vma->vm_flags || 381 (vma->vm_flags & (VM_IO | VM_PFNMAP))) 382 goto out; /* don't set VM_LOCKED, don't count */ 383 384 if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 385 is_vm_hugetlb_page(vma) || 386 vma == get_gate_vma(current)) { 387 if (lock) 388 make_pages_present(start, end); 389 goto out; /* don't set VM_LOCKED, don't count */ 390 } 391 392 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 393 *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma, 394 vma->vm_file, pgoff, vma_policy(vma)); 395 if (*prev) { 396 vma = *prev; 397 goto success; 398 } 399 400 if (start != vma->vm_start) { 401 ret = split_vma(mm, vma, start, 1); 402 if (ret) 403 goto out; 404 } 405 406 if (end != vma->vm_end) { 407 ret = split_vma(mm, vma, end, 0); 408 if (ret) 409 goto out; 410 } 411 412success: 413 /* 414 * Keep track of amount of locked VM. 415 */ 416 nr_pages = (end - start) >> PAGE_SHIFT; 417 if (!lock) 418 nr_pages = -nr_pages; 419 mm->locked_vm += nr_pages; 420 421 /* 422 * vm_flags is protected by the mmap_sem held in write mode. 423 * It's okay if try_to_unmap_one unmaps a page just after we 424 * set VM_LOCKED, __mlock_vma_pages_range will bring it back. 425 */ 426 vma->vm_flags = newflags; 427 428 if (lock) { 429 /* 430 * mmap_sem is currently held for write. Downgrade the write 431 * lock to a read lock so that other faults, mmap scans, ... 432 * while we fault in all pages. 433 */ 434 downgrade_write(&mm->mmap_sem); 435 436 ret = __mlock_vma_pages_range(vma, start, end, 1); 437 if (ret > 0) { 438 mm->locked_vm -= ret; 439 ret = 0; 440 } 441 /* 442 * Need to reacquire mmap sem in write mode, as our callers 443 * expect this. We have no support for atomically upgrading 444 * a sem to write, so we need to check for ranges while sem 445 * is unlocked. 446 */ 447 up_read(&mm->mmap_sem); 448 /* vma can change or disappear */ 449 down_write(&mm->mmap_sem); 450 *prev = find_vma(mm, start); 451 /* non-NULL *prev must contain @start, but need to check @end */ 452 if (!(*prev) || end > (*prev)->vm_end) 453 ret = -ENOMEM; 454 } else { 455 /* 456 * TODO: for unlocking, pages will already be resident, so 457 * we don't need to wait for allocations/reclaim/pagein, ... 458 * However, unlocking a very large region can still take a 459 * while. Should we downgrade the semaphore for both lock 460 * AND unlock ? 461 */ 462 __mlock_vma_pages_range(vma, start, end, 0); 463 } 464 465out: 466 *prev = vma; 467 return ret; 468} 469 470static int do_mlock(unsigned long start, size_t len, int on) 471{ 472 unsigned long nstart, end, tmp; 473 struct vm_area_struct * vma, * prev; 474 int error; 475 476 len = PAGE_ALIGN(len); 477 end = start + len; 478 if (end < start) 479 return -EINVAL; 480 if (end == start) 481 return 0; 482 vma = find_vma_prev(current->mm, start, &prev); 483 if (!vma || vma->vm_start > start) 484 return -ENOMEM; 485 486 if (start > vma->vm_start) 487 prev = vma; 488 489 for (nstart = start ; ; ) { 490 unsigned int newflags; 491 492 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ 493 494 newflags = vma->vm_flags | VM_LOCKED; 495 if (!on) 496 newflags &= ~VM_LOCKED; 497 498 tmp = vma->vm_end; 499 if (tmp > end) 500 tmp = end; 501 error = mlock_fixup(vma, &prev, nstart, tmp, newflags); 502 if (error) 503 break; 504 nstart = tmp; 505 if (nstart < prev->vm_end) 506 nstart = prev->vm_end; 507 if (nstart >= end) 508 break; 509 510 vma = prev->vm_next; 511 if (!vma || vma->vm_start != nstart) { 512 error = -ENOMEM; 513 break; 514 } 515 } 516 return error; 517} 518 519asmlinkage long sys_mlock(unsigned long start, size_t len) 520{ 521 unsigned long locked; 522 unsigned long lock_limit; 523 int error = -ENOMEM; 524 525 if (!can_do_mlock()) 526 return -EPERM; 527 528 down_write(¤t->mm->mmap_sem); 529 len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); 530 start &= PAGE_MASK; 531 532 locked = len >> PAGE_SHIFT; 533 locked += current->mm->locked_vm; 534 535 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 536 lock_limit >>= PAGE_SHIFT; 537 538 /* check against resource limits */ 539 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) 540 error = do_mlock(start, len, 1); 541 up_write(¤t->mm->mmap_sem); 542 return error; 543} 544 545asmlinkage long sys_munlock(unsigned long start, size_t len) 546{ 547 int ret; 548 549 down_write(¤t->mm->mmap_sem); 550 len = PAGE_ALIGN(len + (start & ~PAGE_MASK)); 551 start &= PAGE_MASK; 552 ret = do_mlock(start, len, 0); 553 up_write(¤t->mm->mmap_sem); 554 return ret; 555} 556 557static int do_mlockall(int flags) 558{ 559 struct vm_area_struct * vma, * prev = NULL; 560 unsigned int def_flags = 0; 561 562 if (flags & MCL_FUTURE) 563 def_flags = VM_LOCKED; 564 current->mm->def_flags = def_flags; 565 if (flags == MCL_FUTURE) 566 goto out; 567 568 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { 569 unsigned int newflags; 570 571 newflags = vma->vm_flags | VM_LOCKED; 572 if (!(flags & MCL_CURRENT)) 573 newflags &= ~VM_LOCKED; 574 575 /* Ignore errors */ 576 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags); 577 } 578out: 579 return 0; 580} 581 582asmlinkage long sys_mlockall(int flags) 583{ 584 unsigned long lock_limit; 585 int ret = -EINVAL; 586 587 if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE))) 588 goto out; 589 590 ret = -EPERM; 591 if (!can_do_mlock()) 592 goto out; 593 594 down_write(¤t->mm->mmap_sem); 595 596 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 597 lock_limit >>= PAGE_SHIFT; 598 599 ret = -ENOMEM; 600 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || 601 capable(CAP_IPC_LOCK)) 602 ret = do_mlockall(flags); 603 up_write(¤t->mm->mmap_sem); 604out: 605 return ret; 606} 607 608asmlinkage long sys_munlockall(void) 609{ 610 int ret; 611 612 down_write(¤t->mm->mmap_sem); 613 ret = do_mlockall(0); 614 up_write(¤t->mm->mmap_sem); 615 return ret; 616} 617 618/* 619 * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB 620 * shm segments) get accounted against the user_struct instead. 621 */ 622static DEFINE_SPINLOCK(shmlock_user_lock); 623 624int user_shm_lock(size_t size, struct user_struct *user) 625{ 626 unsigned long lock_limit, locked; 627 int allowed = 0; 628 629 locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 630 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; 631 if (lock_limit == RLIM_INFINITY) 632 allowed = 1; 633 lock_limit >>= PAGE_SHIFT; 634 spin_lock(&shmlock_user_lock); 635 if (!allowed && 636 locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK)) 637 goto out; 638 get_uid(user); 639 user->locked_shm += locked; 640 allowed = 1; 641out: 642 spin_unlock(&shmlock_user_lock); 643 return allowed; 644} 645 646void user_shm_unlock(size_t size, struct user_struct *user) 647{ 648 spin_lock(&shmlock_user_lock); 649 user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 650 spin_unlock(&shmlock_user_lock); 651 free_uid(user); 652} 653