memory.c revision a477097d9c37c1cf289c7f0257dffcfa42d50197
1/* 2 * linux/mm/memory.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7/* 8 * demand-loading started 01.12.91 - seems it is high on the list of 9 * things wanted, and it should be easy to implement. - Linus 10 */ 11 12/* 13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 14 * pages started 02.12.91, seems to work. - Linus. 15 * 16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 17 * would have taken more than the 6M I have free, but it worked well as 18 * far as I could see. 19 * 20 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 21 */ 22 23/* 24 * Real VM (paging to/from disk) started 18.12.91. Much more work and 25 * thought has to go into this. Oh, well.. 26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 27 * Found it. Everything seems to work now. 28 * 20.12.91 - Ok, making the swap-device changeable like the root. 29 */ 30 31/* 32 * 05.04.94 - Multi-page memory management added for v1.1. 33 * Idea by Alex Bligh (alex@cconcepts.co.uk) 34 * 35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 36 * (Gerhard.Wichert@pdb.siemens.de) 37 * 38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 39 */ 40 41#include <linux/kernel_stat.h> 42#include <linux/mm.h> 43#include <linux/hugetlb.h> 44#include <linux/mman.h> 45#include <linux/swap.h> 46#include <linux/highmem.h> 47#include <linux/pagemap.h> 48#include <linux/rmap.h> 49#include <linux/module.h> 50#include <linux/delayacct.h> 51#include <linux/init.h> 52#include <linux/writeback.h> 53#include <linux/memcontrol.h> 54#include <linux/mmu_notifier.h> 55 56#include <asm/pgalloc.h> 57#include <asm/uaccess.h> 58#include <asm/tlb.h> 59#include <asm/tlbflush.h> 60#include <asm/pgtable.h> 61 62#include <linux/swapops.h> 63#include <linux/elf.h> 64 65#include "internal.h" 66 67#ifndef CONFIG_NEED_MULTIPLE_NODES 68/* use the per-pgdat data instead for discontigmem - mbligh */ 69unsigned long max_mapnr; 70struct page *mem_map; 71 72EXPORT_SYMBOL(max_mapnr); 73EXPORT_SYMBOL(mem_map); 74#endif 75 76unsigned long num_physpages; 77/* 78 * A number of key systems in x86 including ioremap() rely on the assumption 79 * that high_memory defines the upper bound on direct map memory, then end 80 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 81 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 82 * and ZONE_HIGHMEM. 83 */ 84void * high_memory; 85 86EXPORT_SYMBOL(num_physpages); 87EXPORT_SYMBOL(high_memory); 88 89/* 90 * Randomize the address space (stacks, mmaps, brk, etc.). 91 * 92 * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization, 93 * as ancient (libc5 based) binaries can segfault. ) 94 */ 95int randomize_va_space __read_mostly = 96#ifdef CONFIG_COMPAT_BRK 97 1; 98#else 99 2; 100#endif 101 102static int __init disable_randmaps(char *s) 103{ 104 randomize_va_space = 0; 105 return 1; 106} 107__setup("norandmaps", disable_randmaps); 108 109 110/* 111 * If a p?d_bad entry is found while walking page tables, report 112 * the error, before resetting entry to p?d_none. Usually (but 113 * very seldom) called out from the p?d_none_or_clear_bad macros. 114 */ 115 116void pgd_clear_bad(pgd_t *pgd) 117{ 118 pgd_ERROR(*pgd); 119 pgd_clear(pgd); 120} 121 122void pud_clear_bad(pud_t *pud) 123{ 124 pud_ERROR(*pud); 125 pud_clear(pud); 126} 127 128void pmd_clear_bad(pmd_t *pmd) 129{ 130 pmd_ERROR(*pmd); 131 pmd_clear(pmd); 132} 133 134/* 135 * Note: this doesn't free the actual pages themselves. That 136 * has been handled earlier when unmapping all the memory regions. 137 */ 138static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) 139{ 140 pgtable_t token = pmd_pgtable(*pmd); 141 pmd_clear(pmd); 142 pte_free_tlb(tlb, token); 143 tlb->mm->nr_ptes--; 144} 145 146static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 147 unsigned long addr, unsigned long end, 148 unsigned long floor, unsigned long ceiling) 149{ 150 pmd_t *pmd; 151 unsigned long next; 152 unsigned long start; 153 154 start = addr; 155 pmd = pmd_offset(pud, addr); 156 do { 157 next = pmd_addr_end(addr, end); 158 if (pmd_none_or_clear_bad(pmd)) 159 continue; 160 free_pte_range(tlb, pmd); 161 } while (pmd++, addr = next, addr != end); 162 163 start &= PUD_MASK; 164 if (start < floor) 165 return; 166 if (ceiling) { 167 ceiling &= PUD_MASK; 168 if (!ceiling) 169 return; 170 } 171 if (end - 1 > ceiling - 1) 172 return; 173 174 pmd = pmd_offset(pud, start); 175 pud_clear(pud); 176 pmd_free_tlb(tlb, pmd); 177} 178 179static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 180 unsigned long addr, unsigned long end, 181 unsigned long floor, unsigned long ceiling) 182{ 183 pud_t *pud; 184 unsigned long next; 185 unsigned long start; 186 187 start = addr; 188 pud = pud_offset(pgd, addr); 189 do { 190 next = pud_addr_end(addr, end); 191 if (pud_none_or_clear_bad(pud)) 192 continue; 193 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 194 } while (pud++, addr = next, addr != end); 195 196 start &= PGDIR_MASK; 197 if (start < floor) 198 return; 199 if (ceiling) { 200 ceiling &= PGDIR_MASK; 201 if (!ceiling) 202 return; 203 } 204 if (end - 1 > ceiling - 1) 205 return; 206 207 pud = pud_offset(pgd, start); 208 pgd_clear(pgd); 209 pud_free_tlb(tlb, pud); 210} 211 212/* 213 * This function frees user-level page tables of a process. 214 * 215 * Must be called with pagetable lock held. 216 */ 217void free_pgd_range(struct mmu_gather *tlb, 218 unsigned long addr, unsigned long end, 219 unsigned long floor, unsigned long ceiling) 220{ 221 pgd_t *pgd; 222 unsigned long next; 223 unsigned long start; 224 225 /* 226 * The next few lines have given us lots of grief... 227 * 228 * Why are we testing PMD* at this top level? Because often 229 * there will be no work to do at all, and we'd prefer not to 230 * go all the way down to the bottom just to discover that. 231 * 232 * Why all these "- 1"s? Because 0 represents both the bottom 233 * of the address space and the top of it (using -1 for the 234 * top wouldn't help much: the masks would do the wrong thing). 235 * The rule is that addr 0 and floor 0 refer to the bottom of 236 * the address space, but end 0 and ceiling 0 refer to the top 237 * Comparisons need to use "end - 1" and "ceiling - 1" (though 238 * that end 0 case should be mythical). 239 * 240 * Wherever addr is brought up or ceiling brought down, we must 241 * be careful to reject "the opposite 0" before it confuses the 242 * subsequent tests. But what about where end is brought down 243 * by PMD_SIZE below? no, end can't go down to 0 there. 244 * 245 * Whereas we round start (addr) and ceiling down, by different 246 * masks at different levels, in order to test whether a table 247 * now has no other vmas using it, so can be freed, we don't 248 * bother to round floor or end up - the tests don't need that. 249 */ 250 251 addr &= PMD_MASK; 252 if (addr < floor) { 253 addr += PMD_SIZE; 254 if (!addr) 255 return; 256 } 257 if (ceiling) { 258 ceiling &= PMD_MASK; 259 if (!ceiling) 260 return; 261 } 262 if (end - 1 > ceiling - 1) 263 end -= PMD_SIZE; 264 if (addr > end - 1) 265 return; 266 267 start = addr; 268 pgd = pgd_offset(tlb->mm, addr); 269 do { 270 next = pgd_addr_end(addr, end); 271 if (pgd_none_or_clear_bad(pgd)) 272 continue; 273 free_pud_range(tlb, pgd, addr, next, floor, ceiling); 274 } while (pgd++, addr = next, addr != end); 275} 276 277void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, 278 unsigned long floor, unsigned long ceiling) 279{ 280 while (vma) { 281 struct vm_area_struct *next = vma->vm_next; 282 unsigned long addr = vma->vm_start; 283 284 /* 285 * Hide vma from rmap and vmtruncate before freeing pgtables 286 */ 287 anon_vma_unlink(vma); 288 unlink_file_vma(vma); 289 290 if (is_vm_hugetlb_page(vma)) { 291 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 292 floor, next? next->vm_start: ceiling); 293 } else { 294 /* 295 * Optimization: gather nearby vmas into one call down 296 */ 297 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 298 && !is_vm_hugetlb_page(next)) { 299 vma = next; 300 next = vma->vm_next; 301 anon_vma_unlink(vma); 302 unlink_file_vma(vma); 303 } 304 free_pgd_range(tlb, addr, vma->vm_end, 305 floor, next? next->vm_start: ceiling); 306 } 307 vma = next; 308 } 309} 310 311int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 312{ 313 pgtable_t new = pte_alloc_one(mm, address); 314 if (!new) 315 return -ENOMEM; 316 317 /* 318 * Ensure all pte setup (eg. pte page lock and page clearing) are 319 * visible before the pte is made visible to other CPUs by being 320 * put into page tables. 321 * 322 * The other side of the story is the pointer chasing in the page 323 * table walking code (when walking the page table without locking; 324 * ie. most of the time). Fortunately, these data accesses consist 325 * of a chain of data-dependent loads, meaning most CPUs (alpha 326 * being the notable exception) will already guarantee loads are 327 * seen in-order. See the alpha page table accessors for the 328 * smp_read_barrier_depends() barriers in page table walking code. 329 */ 330 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 331 332 spin_lock(&mm->page_table_lock); 333 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 334 mm->nr_ptes++; 335 pmd_populate(mm, pmd, new); 336 new = NULL; 337 } 338 spin_unlock(&mm->page_table_lock); 339 if (new) 340 pte_free(mm, new); 341 return 0; 342} 343 344int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 345{ 346 pte_t *new = pte_alloc_one_kernel(&init_mm, address); 347 if (!new) 348 return -ENOMEM; 349 350 smp_wmb(); /* See comment in __pte_alloc */ 351 352 spin_lock(&init_mm.page_table_lock); 353 if (!pmd_present(*pmd)) { /* Has another populated it ? */ 354 pmd_populate_kernel(&init_mm, pmd, new); 355 new = NULL; 356 } 357 spin_unlock(&init_mm.page_table_lock); 358 if (new) 359 pte_free_kernel(&init_mm, new); 360 return 0; 361} 362 363static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 364{ 365 if (file_rss) 366 add_mm_counter(mm, file_rss, file_rss); 367 if (anon_rss) 368 add_mm_counter(mm, anon_rss, anon_rss); 369} 370 371/* 372 * This function is called to print an error when a bad pte 373 * is found. For example, we might have a PFN-mapped pte in 374 * a region that doesn't allow it. 375 * 376 * The calling function must still handle the error. 377 */ 378static void print_bad_pte(struct vm_area_struct *vma, pte_t pte, 379 unsigned long vaddr) 380{ 381 printk(KERN_ERR "Bad pte = %08llx, process = %s, " 382 "vm_flags = %lx, vaddr = %lx\n", 383 (long long)pte_val(pte), 384 (vma->vm_mm == current->mm ? current->comm : "???"), 385 vma->vm_flags, vaddr); 386 dump_stack(); 387} 388 389static inline int is_cow_mapping(unsigned int flags) 390{ 391 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 392} 393 394/* 395 * vm_normal_page -- This function gets the "struct page" associated with a pte. 396 * 397 * "Special" mappings do not wish to be associated with a "struct page" (either 398 * it doesn't exist, or it exists but they don't want to touch it). In this 399 * case, NULL is returned here. "Normal" mappings do have a struct page. 400 * 401 * There are 2 broad cases. Firstly, an architecture may define a pte_special() 402 * pte bit, in which case this function is trivial. Secondly, an architecture 403 * may not have a spare pte bit, which requires a more complicated scheme, 404 * described below. 405 * 406 * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a 407 * special mapping (even if there are underlying and valid "struct pages"). 408 * COWed pages of a VM_PFNMAP are always normal. 409 * 410 * The way we recognize COWed pages within VM_PFNMAP mappings is through the 411 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit 412 * set, and the vm_pgoff will point to the first PFN mapped: thus every special 413 * mapping will always honor the rule 414 * 415 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 416 * 417 * And for normal mappings this is false. 418 * 419 * This restricts such mappings to be a linear translation from virtual address 420 * to pfn. To get around this restriction, we allow arbitrary mappings so long 421 * as the vma is not a COW mapping; in that case, we know that all ptes are 422 * special (because none can have been COWed). 423 * 424 * 425 * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP. 426 * 427 * VM_MIXEDMAP mappings can likewise contain memory with or without "struct 428 * page" backing, however the difference is that _all_ pages with a struct 429 * page (that is, those where pfn_valid is true) are refcounted and considered 430 * normal pages by the VM. The disadvantage is that pages are refcounted 431 * (which can be slower and simply not an option for some PFNMAP users). The 432 * advantage is that we don't have to follow the strict linearity rule of 433 * PFNMAP mappings in order to support COWable mappings. 434 * 435 */ 436#ifdef __HAVE_ARCH_PTE_SPECIAL 437# define HAVE_PTE_SPECIAL 1 438#else 439# define HAVE_PTE_SPECIAL 0 440#endif 441struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 442 pte_t pte) 443{ 444 unsigned long pfn; 445 446 if (HAVE_PTE_SPECIAL) { 447 if (likely(!pte_special(pte))) { 448 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 449 return pte_page(pte); 450 } 451 VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))); 452 return NULL; 453 } 454 455 /* !HAVE_PTE_SPECIAL case follows: */ 456 457 pfn = pte_pfn(pte); 458 459 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { 460 if (vma->vm_flags & VM_MIXEDMAP) { 461 if (!pfn_valid(pfn)) 462 return NULL; 463 goto out; 464 } else { 465 unsigned long off; 466 off = (addr - vma->vm_start) >> PAGE_SHIFT; 467 if (pfn == vma->vm_pgoff + off) 468 return NULL; 469 if (!is_cow_mapping(vma->vm_flags)) 470 return NULL; 471 } 472 } 473 474 VM_BUG_ON(!pfn_valid(pfn)); 475 476 /* 477 * NOTE! We still have PageReserved() pages in the page tables. 478 * 479 * eg. VDSO mappings can cause them to exist. 480 */ 481out: 482 return pfn_to_page(pfn); 483} 484 485/* 486 * copy one vm_area from one task to the other. Assumes the page tables 487 * already present in the new task to be cleared in the whole range 488 * covered by this vma. 489 */ 490 491static inline void 492copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 493 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 494 unsigned long addr, int *rss) 495{ 496 unsigned long vm_flags = vma->vm_flags; 497 pte_t pte = *src_pte; 498 struct page *page; 499 500 /* pte contains position in swap or file, so copy. */ 501 if (unlikely(!pte_present(pte))) { 502 if (!pte_file(pte)) { 503 swp_entry_t entry = pte_to_swp_entry(pte); 504 505 swap_duplicate(entry); 506 /* make sure dst_mm is on swapoff's mmlist. */ 507 if (unlikely(list_empty(&dst_mm->mmlist))) { 508 spin_lock(&mmlist_lock); 509 if (list_empty(&dst_mm->mmlist)) 510 list_add(&dst_mm->mmlist, 511 &src_mm->mmlist); 512 spin_unlock(&mmlist_lock); 513 } 514 if (is_write_migration_entry(entry) && 515 is_cow_mapping(vm_flags)) { 516 /* 517 * COW mappings require pages in both parent 518 * and child to be set to read. 519 */ 520 make_migration_entry_read(&entry); 521 pte = swp_entry_to_pte(entry); 522 set_pte_at(src_mm, addr, src_pte, pte); 523 } 524 } 525 goto out_set_pte; 526 } 527 528 /* 529 * If it's a COW mapping, write protect it both 530 * in the parent and the child 531 */ 532 if (is_cow_mapping(vm_flags)) { 533 ptep_set_wrprotect(src_mm, addr, src_pte); 534 pte = pte_wrprotect(pte); 535 } 536 537 /* 538 * If it's a shared mapping, mark it clean in 539 * the child 540 */ 541 if (vm_flags & VM_SHARED) 542 pte = pte_mkclean(pte); 543 pte = pte_mkold(pte); 544 545 page = vm_normal_page(vma, addr, pte); 546 if (page) { 547 get_page(page); 548 page_dup_rmap(page, vma, addr); 549 rss[!!PageAnon(page)]++; 550 } 551 552out_set_pte: 553 set_pte_at(dst_mm, addr, dst_pte, pte); 554} 555 556static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 557 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 558 unsigned long addr, unsigned long end) 559{ 560 pte_t *src_pte, *dst_pte; 561 spinlock_t *src_ptl, *dst_ptl; 562 int progress = 0; 563 int rss[2]; 564 565again: 566 rss[1] = rss[0] = 0; 567 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 568 if (!dst_pte) 569 return -ENOMEM; 570 src_pte = pte_offset_map_nested(src_pmd, addr); 571 src_ptl = pte_lockptr(src_mm, src_pmd); 572 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 573 arch_enter_lazy_mmu_mode(); 574 575 do { 576 /* 577 * We are holding two locks at this point - either of them 578 * could generate latencies in another task on another CPU. 579 */ 580 if (progress >= 32) { 581 progress = 0; 582 if (need_resched() || 583 spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) 584 break; 585 } 586 if (pte_none(*src_pte)) { 587 progress++; 588 continue; 589 } 590 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); 591 progress += 8; 592 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 593 594 arch_leave_lazy_mmu_mode(); 595 spin_unlock(src_ptl); 596 pte_unmap_nested(src_pte - 1); 597 add_mm_rss(dst_mm, rss[0], rss[1]); 598 pte_unmap_unlock(dst_pte - 1, dst_ptl); 599 cond_resched(); 600 if (addr != end) 601 goto again; 602 return 0; 603} 604 605static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 606 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, 607 unsigned long addr, unsigned long end) 608{ 609 pmd_t *src_pmd, *dst_pmd; 610 unsigned long next; 611 612 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 613 if (!dst_pmd) 614 return -ENOMEM; 615 src_pmd = pmd_offset(src_pud, addr); 616 do { 617 next = pmd_addr_end(addr, end); 618 if (pmd_none_or_clear_bad(src_pmd)) 619 continue; 620 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, 621 vma, addr, next)) 622 return -ENOMEM; 623 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 624 return 0; 625} 626 627static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 628 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 629 unsigned long addr, unsigned long end) 630{ 631 pud_t *src_pud, *dst_pud; 632 unsigned long next; 633 634 dst_pud = pud_alloc(dst_mm, dst_pgd, addr); 635 if (!dst_pud) 636 return -ENOMEM; 637 src_pud = pud_offset(src_pgd, addr); 638 do { 639 next = pud_addr_end(addr, end); 640 if (pud_none_or_clear_bad(src_pud)) 641 continue; 642 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, 643 vma, addr, next)) 644 return -ENOMEM; 645 } while (dst_pud++, src_pud++, addr = next, addr != end); 646 return 0; 647} 648 649int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 650 struct vm_area_struct *vma) 651{ 652 pgd_t *src_pgd, *dst_pgd; 653 unsigned long next; 654 unsigned long addr = vma->vm_start; 655 unsigned long end = vma->vm_end; 656 int ret; 657 658 /* 659 * Don't copy ptes where a page fault will fill them correctly. 660 * Fork becomes much lighter when there are big shared or private 661 * readonly mappings. The tradeoff is that copy_page_range is more 662 * efficient than faulting. 663 */ 664 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) { 665 if (!vma->anon_vma) 666 return 0; 667 } 668 669 if (is_vm_hugetlb_page(vma)) 670 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 671 672 /* 673 * We need to invalidate the secondary MMU mappings only when 674 * there could be a permission downgrade on the ptes of the 675 * parent mm. And a permission downgrade will only happen if 676 * is_cow_mapping() returns true. 677 */ 678 if (is_cow_mapping(vma->vm_flags)) 679 mmu_notifier_invalidate_range_start(src_mm, addr, end); 680 681 ret = 0; 682 dst_pgd = pgd_offset(dst_mm, addr); 683 src_pgd = pgd_offset(src_mm, addr); 684 do { 685 next = pgd_addr_end(addr, end); 686 if (pgd_none_or_clear_bad(src_pgd)) 687 continue; 688 if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 689 vma, addr, next))) { 690 ret = -ENOMEM; 691 break; 692 } 693 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 694 695 if (is_cow_mapping(vma->vm_flags)) 696 mmu_notifier_invalidate_range_end(src_mm, 697 vma->vm_start, end); 698 return ret; 699} 700 701static unsigned long zap_pte_range(struct mmu_gather *tlb, 702 struct vm_area_struct *vma, pmd_t *pmd, 703 unsigned long addr, unsigned long end, 704 long *zap_work, struct zap_details *details) 705{ 706 struct mm_struct *mm = tlb->mm; 707 pte_t *pte; 708 spinlock_t *ptl; 709 int file_rss = 0; 710 int anon_rss = 0; 711 712 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 713 arch_enter_lazy_mmu_mode(); 714 do { 715 pte_t ptent = *pte; 716 if (pte_none(ptent)) { 717 (*zap_work)--; 718 continue; 719 } 720 721 (*zap_work) -= PAGE_SIZE; 722 723 if (pte_present(ptent)) { 724 struct page *page; 725 726 page = vm_normal_page(vma, addr, ptent); 727 if (unlikely(details) && page) { 728 /* 729 * unmap_shared_mapping_pages() wants to 730 * invalidate cache without truncating: 731 * unmap shared but keep private pages. 732 */ 733 if (details->check_mapping && 734 details->check_mapping != page->mapping) 735 continue; 736 /* 737 * Each page->index must be checked when 738 * invalidating or truncating nonlinear. 739 */ 740 if (details->nonlinear_vma && 741 (page->index < details->first_index || 742 page->index > details->last_index)) 743 continue; 744 } 745 ptent = ptep_get_and_clear_full(mm, addr, pte, 746 tlb->fullmm); 747 tlb_remove_tlb_entry(tlb, pte, addr); 748 if (unlikely(!page)) 749 continue; 750 if (unlikely(details) && details->nonlinear_vma 751 && linear_page_index(details->nonlinear_vma, 752 addr) != page->index) 753 set_pte_at(mm, addr, pte, 754 pgoff_to_pte(page->index)); 755 if (PageAnon(page)) 756 anon_rss--; 757 else { 758 if (pte_dirty(ptent)) 759 set_page_dirty(page); 760 if (pte_young(ptent)) 761 SetPageReferenced(page); 762 file_rss--; 763 } 764 page_remove_rmap(page, vma); 765 tlb_remove_page(tlb, page); 766 continue; 767 } 768 /* 769 * If details->check_mapping, we leave swap entries; 770 * if details->nonlinear_vma, we leave file entries. 771 */ 772 if (unlikely(details)) 773 continue; 774 if (!pte_file(ptent)) 775 free_swap_and_cache(pte_to_swp_entry(ptent)); 776 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); 777 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 778 779 add_mm_rss(mm, file_rss, anon_rss); 780 arch_leave_lazy_mmu_mode(); 781 pte_unmap_unlock(pte - 1, ptl); 782 783 return addr; 784} 785 786static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 787 struct vm_area_struct *vma, pud_t *pud, 788 unsigned long addr, unsigned long end, 789 long *zap_work, struct zap_details *details) 790{ 791 pmd_t *pmd; 792 unsigned long next; 793 794 pmd = pmd_offset(pud, addr); 795 do { 796 next = pmd_addr_end(addr, end); 797 if (pmd_none_or_clear_bad(pmd)) { 798 (*zap_work)--; 799 continue; 800 } 801 next = zap_pte_range(tlb, vma, pmd, addr, next, 802 zap_work, details); 803 } while (pmd++, addr = next, (addr != end && *zap_work > 0)); 804 805 return addr; 806} 807 808static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 809 struct vm_area_struct *vma, pgd_t *pgd, 810 unsigned long addr, unsigned long end, 811 long *zap_work, struct zap_details *details) 812{ 813 pud_t *pud; 814 unsigned long next; 815 816 pud = pud_offset(pgd, addr); 817 do { 818 next = pud_addr_end(addr, end); 819 if (pud_none_or_clear_bad(pud)) { 820 (*zap_work)--; 821 continue; 822 } 823 next = zap_pmd_range(tlb, vma, pud, addr, next, 824 zap_work, details); 825 } while (pud++, addr = next, (addr != end && *zap_work > 0)); 826 827 return addr; 828} 829 830static unsigned long unmap_page_range(struct mmu_gather *tlb, 831 struct vm_area_struct *vma, 832 unsigned long addr, unsigned long end, 833 long *zap_work, struct zap_details *details) 834{ 835 pgd_t *pgd; 836 unsigned long next; 837 838 if (details && !details->check_mapping && !details->nonlinear_vma) 839 details = NULL; 840 841 BUG_ON(addr >= end); 842 tlb_start_vma(tlb, vma); 843 pgd = pgd_offset(vma->vm_mm, addr); 844 do { 845 next = pgd_addr_end(addr, end); 846 if (pgd_none_or_clear_bad(pgd)) { 847 (*zap_work)--; 848 continue; 849 } 850 next = zap_pud_range(tlb, vma, pgd, addr, next, 851 zap_work, details); 852 } while (pgd++, addr = next, (addr != end && *zap_work > 0)); 853 tlb_end_vma(tlb, vma); 854 855 return addr; 856} 857 858#ifdef CONFIG_PREEMPT 859# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) 860#else 861/* No preempt: go for improved straight-line efficiency */ 862# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) 863#endif 864 865/** 866 * unmap_vmas - unmap a range of memory covered by a list of vma's 867 * @tlbp: address of the caller's struct mmu_gather 868 * @vma: the starting vma 869 * @start_addr: virtual address at which to start unmapping 870 * @end_addr: virtual address at which to end unmapping 871 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 872 * @details: details of nonlinear truncation or shared cache invalidation 873 * 874 * Returns the end address of the unmapping (restart addr if interrupted). 875 * 876 * Unmap all pages in the vma list. 877 * 878 * We aim to not hold locks for too long (for scheduling latency reasons). 879 * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to 880 * return the ending mmu_gather to the caller. 881 * 882 * Only addresses between `start' and `end' will be unmapped. 883 * 884 * The VMA list must be sorted in ascending virtual address order. 885 * 886 * unmap_vmas() assumes that the caller will flush the whole unmapped address 887 * range after unmap_vmas() returns. So the only responsibility here is to 888 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 889 * drops the lock and schedules. 890 */ 891unsigned long unmap_vmas(struct mmu_gather **tlbp, 892 struct vm_area_struct *vma, unsigned long start_addr, 893 unsigned long end_addr, unsigned long *nr_accounted, 894 struct zap_details *details) 895{ 896 long zap_work = ZAP_BLOCK_SIZE; 897 unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 898 int tlb_start_valid = 0; 899 unsigned long start = start_addr; 900 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 901 int fullmm = (*tlbp)->fullmm; 902 struct mm_struct *mm = vma->vm_mm; 903 904 mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); 905 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 906 unsigned long end; 907 908 start = max(vma->vm_start, start_addr); 909 if (start >= vma->vm_end) 910 continue; 911 end = min(vma->vm_end, end_addr); 912 if (end <= vma->vm_start) 913 continue; 914 915 if (vma->vm_flags & VM_ACCOUNT) 916 *nr_accounted += (end - start) >> PAGE_SHIFT; 917 918 while (start != end) { 919 if (!tlb_start_valid) { 920 tlb_start = start; 921 tlb_start_valid = 1; 922 } 923 924 if (unlikely(is_vm_hugetlb_page(vma))) { 925 /* 926 * It is undesirable to test vma->vm_file as it 927 * should be non-null for valid hugetlb area. 928 * However, vm_file will be NULL in the error 929 * cleanup path of do_mmap_pgoff. When 930 * hugetlbfs ->mmap method fails, 931 * do_mmap_pgoff() nullifies vma->vm_file 932 * before calling this function to clean up. 933 * Since no pte has actually been setup, it is 934 * safe to do nothing in this case. 935 */ 936 if (vma->vm_file) { 937 unmap_hugepage_range(vma, start, end, NULL); 938 zap_work -= (end - start) / 939 pages_per_huge_page(hstate_vma(vma)); 940 } 941 942 start = end; 943 } else 944 start = unmap_page_range(*tlbp, vma, 945 start, end, &zap_work, details); 946 947 if (zap_work > 0) { 948 BUG_ON(start != end); 949 break; 950 } 951 952 tlb_finish_mmu(*tlbp, tlb_start, start); 953 954 if (need_resched() || 955 (i_mmap_lock && spin_needbreak(i_mmap_lock))) { 956 if (i_mmap_lock) { 957 *tlbp = NULL; 958 goto out; 959 } 960 cond_resched(); 961 } 962 963 *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); 964 tlb_start_valid = 0; 965 zap_work = ZAP_BLOCK_SIZE; 966 } 967 } 968out: 969 mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); 970 return start; /* which is now the end (or restart) address */ 971} 972 973/** 974 * zap_page_range - remove user pages in a given range 975 * @vma: vm_area_struct holding the applicable pages 976 * @address: starting address of pages to zap 977 * @size: number of bytes to zap 978 * @details: details of nonlinear truncation or shared cache invalidation 979 */ 980unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 981 unsigned long size, struct zap_details *details) 982{ 983 struct mm_struct *mm = vma->vm_mm; 984 struct mmu_gather *tlb; 985 unsigned long end = address + size; 986 unsigned long nr_accounted = 0; 987 988 lru_add_drain(); 989 tlb = tlb_gather_mmu(mm, 0); 990 update_hiwater_rss(mm); 991 end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 992 if (tlb) 993 tlb_finish_mmu(tlb, address, end); 994 return end; 995} 996 997/** 998 * zap_vma_ptes - remove ptes mapping the vma 999 * @vma: vm_area_struct holding ptes to be zapped 1000 * @address: starting address of pages to zap 1001 * @size: number of bytes to zap 1002 * 1003 * This function only unmaps ptes assigned to VM_PFNMAP vmas. 1004 * 1005 * The entire address range must be fully contained within the vma. 1006 * 1007 * Returns 0 if successful. 1008 */ 1009int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1010 unsigned long size) 1011{ 1012 if (address < vma->vm_start || address + size > vma->vm_end || 1013 !(vma->vm_flags & VM_PFNMAP)) 1014 return -1; 1015 zap_page_range(vma, address, size, NULL); 1016 return 0; 1017} 1018EXPORT_SYMBOL_GPL(zap_vma_ptes); 1019 1020/* 1021 * Do a quick page-table lookup for a single page. 1022 */ 1023struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 1024 unsigned int flags) 1025{ 1026 pgd_t *pgd; 1027 pud_t *pud; 1028 pmd_t *pmd; 1029 pte_t *ptep, pte; 1030 spinlock_t *ptl; 1031 struct page *page; 1032 struct mm_struct *mm = vma->vm_mm; 1033 1034 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 1035 if (!IS_ERR(page)) { 1036 BUG_ON(flags & FOLL_GET); 1037 goto out; 1038 } 1039 1040 page = NULL; 1041 pgd = pgd_offset(mm, address); 1042 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 1043 goto no_page_table; 1044 1045 pud = pud_offset(pgd, address); 1046 if (pud_none(*pud)) 1047 goto no_page_table; 1048 if (pud_huge(*pud)) { 1049 BUG_ON(flags & FOLL_GET); 1050 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE); 1051 goto out; 1052 } 1053 if (unlikely(pud_bad(*pud))) 1054 goto no_page_table; 1055 1056 pmd = pmd_offset(pud, address); 1057 if (pmd_none(*pmd)) 1058 goto no_page_table; 1059 if (pmd_huge(*pmd)) { 1060 BUG_ON(flags & FOLL_GET); 1061 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); 1062 goto out; 1063 } 1064 if (unlikely(pmd_bad(*pmd))) 1065 goto no_page_table; 1066 1067 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 1068 1069 pte = *ptep; 1070 if (!pte_present(pte)) 1071 goto no_page; 1072 if ((flags & FOLL_WRITE) && !pte_write(pte)) 1073 goto unlock; 1074 page = vm_normal_page(vma, address, pte); 1075 if (unlikely(!page)) 1076 goto bad_page; 1077 1078 if (flags & FOLL_GET) 1079 get_page(page); 1080 if (flags & FOLL_TOUCH) { 1081 if ((flags & FOLL_WRITE) && 1082 !pte_dirty(pte) && !PageDirty(page)) 1083 set_page_dirty(page); 1084 mark_page_accessed(page); 1085 } 1086unlock: 1087 pte_unmap_unlock(ptep, ptl); 1088out: 1089 return page; 1090 1091bad_page: 1092 pte_unmap_unlock(ptep, ptl); 1093 return ERR_PTR(-EFAULT); 1094 1095no_page: 1096 pte_unmap_unlock(ptep, ptl); 1097 if (!pte_none(pte)) 1098 return page; 1099 /* Fall through to ZERO_PAGE handling */ 1100no_page_table: 1101 /* 1102 * When core dumping an enormous anonymous area that nobody 1103 * has touched so far, we don't want to allocate page tables. 1104 */ 1105 if (flags & FOLL_ANON) { 1106 page = ZERO_PAGE(0); 1107 if (flags & FOLL_GET) 1108 get_page(page); 1109 BUG_ON(flags & FOLL_WRITE); 1110 } 1111 return page; 1112} 1113 1114/* Can we do the FOLL_ANON optimization? */ 1115static inline int use_zero_page(struct vm_area_struct *vma) 1116{ 1117 /* 1118 * We don't want to optimize FOLL_ANON for make_pages_present() 1119 * when it tries to page in a VM_LOCKED region. As to VM_SHARED, 1120 * we want to get the page from the page tables to make sure 1121 * that we serialize and update with any other user of that 1122 * mapping. 1123 */ 1124 if (vma->vm_flags & (VM_LOCKED | VM_SHARED)) 1125 return 0; 1126 /* 1127 * And if we have a fault routine, it's not an anonymous region. 1128 */ 1129 return !vma->vm_ops || !vma->vm_ops->fault; 1130} 1131 1132int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1133 unsigned long start, int len, int write, int force, 1134 struct page **pages, struct vm_area_struct **vmas) 1135{ 1136 int i; 1137 unsigned int vm_flags; 1138 1139 if (len <= 0) 1140 return 0; 1141 /* 1142 * Require read or write permissions. 1143 * If 'force' is set, we only require the "MAY" flags. 1144 */ 1145 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 1146 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 1147 i = 0; 1148 1149 do { 1150 struct vm_area_struct *vma; 1151 unsigned int foll_flags; 1152 1153 vma = find_extend_vma(mm, start); 1154 if (!vma && in_gate_area(tsk, start)) { 1155 unsigned long pg = start & PAGE_MASK; 1156 struct vm_area_struct *gate_vma = get_gate_vma(tsk); 1157 pgd_t *pgd; 1158 pud_t *pud; 1159 pmd_t *pmd; 1160 pte_t *pte; 1161 if (write) /* user gate pages are read-only */ 1162 return i ? : -EFAULT; 1163 if (pg > TASK_SIZE) 1164 pgd = pgd_offset_k(pg); 1165 else 1166 pgd = pgd_offset_gate(mm, pg); 1167 BUG_ON(pgd_none(*pgd)); 1168 pud = pud_offset(pgd, pg); 1169 BUG_ON(pud_none(*pud)); 1170 pmd = pmd_offset(pud, pg); 1171 if (pmd_none(*pmd)) 1172 return i ? : -EFAULT; 1173 pte = pte_offset_map(pmd, pg); 1174 if (pte_none(*pte)) { 1175 pte_unmap(pte); 1176 return i ? : -EFAULT; 1177 } 1178 if (pages) { 1179 struct page *page = vm_normal_page(gate_vma, start, *pte); 1180 pages[i] = page; 1181 if (page) 1182 get_page(page); 1183 } 1184 pte_unmap(pte); 1185 if (vmas) 1186 vmas[i] = gate_vma; 1187 i++; 1188 start += PAGE_SIZE; 1189 len--; 1190 continue; 1191 } 1192 1193 if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1194 || !(vm_flags & vma->vm_flags)) 1195 return i ? : -EFAULT; 1196 1197 if (is_vm_hugetlb_page(vma)) { 1198 i = follow_hugetlb_page(mm, vma, pages, vmas, 1199 &start, &len, i, write); 1200 continue; 1201 } 1202 1203 foll_flags = FOLL_TOUCH; 1204 if (pages) 1205 foll_flags |= FOLL_GET; 1206 if (!write && use_zero_page(vma)) 1207 foll_flags |= FOLL_ANON; 1208 1209 do { 1210 struct page *page; 1211 1212 /* 1213 * If tsk is ooming, cut off its access to large memory 1214 * allocations. It has a pending SIGKILL, but it can't 1215 * be processed until returning to user space. 1216 */ 1217 if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE))) 1218 return i ? i : -ENOMEM; 1219 1220 if (write) 1221 foll_flags |= FOLL_WRITE; 1222 1223 cond_resched(); 1224 while (!(page = follow_page(vma, start, foll_flags))) { 1225 int ret; 1226 ret = handle_mm_fault(mm, vma, start, 1227 foll_flags & FOLL_WRITE); 1228 if (ret & VM_FAULT_ERROR) { 1229 if (ret & VM_FAULT_OOM) 1230 return i ? i : -ENOMEM; 1231 else if (ret & VM_FAULT_SIGBUS) 1232 return i ? i : -EFAULT; 1233 BUG(); 1234 } 1235 if (ret & VM_FAULT_MAJOR) 1236 tsk->maj_flt++; 1237 else 1238 tsk->min_flt++; 1239 1240 /* 1241 * The VM_FAULT_WRITE bit tells us that 1242 * do_wp_page has broken COW when necessary, 1243 * even if maybe_mkwrite decided not to set 1244 * pte_write. We can thus safely do subsequent 1245 * page lookups as if they were reads. 1246 */ 1247 if (ret & VM_FAULT_WRITE) 1248 foll_flags &= ~FOLL_WRITE; 1249 1250 cond_resched(); 1251 } 1252 if (IS_ERR(page)) 1253 return i ? i : PTR_ERR(page); 1254 if (pages) { 1255 pages[i] = page; 1256 1257 flush_anon_page(vma, page, start); 1258 flush_dcache_page(page); 1259 } 1260 if (vmas) 1261 vmas[i] = vma; 1262 i++; 1263 start += PAGE_SIZE; 1264 len--; 1265 } while (len && start < vma->vm_end); 1266 } while (len); 1267 return i; 1268} 1269EXPORT_SYMBOL(get_user_pages); 1270 1271pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, 1272 spinlock_t **ptl) 1273{ 1274 pgd_t * pgd = pgd_offset(mm, addr); 1275 pud_t * pud = pud_alloc(mm, pgd, addr); 1276 if (pud) { 1277 pmd_t * pmd = pmd_alloc(mm, pud, addr); 1278 if (pmd) 1279 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1280 } 1281 return NULL; 1282} 1283 1284/* 1285 * This is the old fallback for page remapping. 1286 * 1287 * For historical reasons, it only allows reserved pages. Only 1288 * old drivers should use this, and they needed to mark their 1289 * pages reserved for the old functions anyway. 1290 */ 1291static int insert_page(struct vm_area_struct *vma, unsigned long addr, 1292 struct page *page, pgprot_t prot) 1293{ 1294 struct mm_struct *mm = vma->vm_mm; 1295 int retval; 1296 pte_t *pte; 1297 spinlock_t *ptl; 1298 1299 retval = mem_cgroup_charge(page, mm, GFP_KERNEL); 1300 if (retval) 1301 goto out; 1302 1303 retval = -EINVAL; 1304 if (PageAnon(page)) 1305 goto out_uncharge; 1306 retval = -ENOMEM; 1307 flush_dcache_page(page); 1308 pte = get_locked_pte(mm, addr, &ptl); 1309 if (!pte) 1310 goto out_uncharge; 1311 retval = -EBUSY; 1312 if (!pte_none(*pte)) 1313 goto out_unlock; 1314 1315 /* Ok, finally just insert the thing.. */ 1316 get_page(page); 1317 inc_mm_counter(mm, file_rss); 1318 page_add_file_rmap(page); 1319 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1320 1321 retval = 0; 1322 pte_unmap_unlock(pte, ptl); 1323 return retval; 1324out_unlock: 1325 pte_unmap_unlock(pte, ptl); 1326out_uncharge: 1327 mem_cgroup_uncharge_page(page); 1328out: 1329 return retval; 1330} 1331 1332/** 1333 * vm_insert_page - insert single page into user vma 1334 * @vma: user vma to map to 1335 * @addr: target user address of this page 1336 * @page: source kernel page 1337 * 1338 * This allows drivers to insert individual pages they've allocated 1339 * into a user vma. 1340 * 1341 * The page has to be a nice clean _individual_ kernel allocation. 1342 * If you allocate a compound page, you need to have marked it as 1343 * such (__GFP_COMP), or manually just split the page up yourself 1344 * (see split_page()). 1345 * 1346 * NOTE! Traditionally this was done with "remap_pfn_range()" which 1347 * took an arbitrary page protection parameter. This doesn't allow 1348 * that. Your vma protection will have to be set up correctly, which 1349 * means that if you want a shared writable mapping, you'd better 1350 * ask for a shared writable mapping! 1351 * 1352 * The page does not need to be reserved. 1353 */ 1354int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, 1355 struct page *page) 1356{ 1357 if (addr < vma->vm_start || addr >= vma->vm_end) 1358 return -EFAULT; 1359 if (!page_count(page)) 1360 return -EINVAL; 1361 vma->vm_flags |= VM_INSERTPAGE; 1362 return insert_page(vma, addr, page, vma->vm_page_prot); 1363} 1364EXPORT_SYMBOL(vm_insert_page); 1365 1366static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1367 unsigned long pfn, pgprot_t prot) 1368{ 1369 struct mm_struct *mm = vma->vm_mm; 1370 int retval; 1371 pte_t *pte, entry; 1372 spinlock_t *ptl; 1373 1374 retval = -ENOMEM; 1375 pte = get_locked_pte(mm, addr, &ptl); 1376 if (!pte) 1377 goto out; 1378 retval = -EBUSY; 1379 if (!pte_none(*pte)) 1380 goto out_unlock; 1381 1382 /* Ok, finally just insert the thing.. */ 1383 entry = pte_mkspecial(pfn_pte(pfn, prot)); 1384 set_pte_at(mm, addr, pte, entry); 1385 update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */ 1386 1387 retval = 0; 1388out_unlock: 1389 pte_unmap_unlock(pte, ptl); 1390out: 1391 return retval; 1392} 1393 1394/** 1395 * vm_insert_pfn - insert single pfn into user vma 1396 * @vma: user vma to map to 1397 * @addr: target user address of this page 1398 * @pfn: source kernel pfn 1399 * 1400 * Similar to vm_inert_page, this allows drivers to insert individual pages 1401 * they've allocated into a user vma. Same comments apply. 1402 * 1403 * This function should only be called from a vm_ops->fault handler, and 1404 * in that case the handler should return NULL. 1405 * 1406 * vma cannot be a COW mapping. 1407 * 1408 * As this is called only for pages that do not currently exist, we 1409 * do not need to flush old virtual caches or the TLB. 1410 */ 1411int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, 1412 unsigned long pfn) 1413{ 1414 /* 1415 * Technically, architectures with pte_special can avoid all these 1416 * restrictions (same for remap_pfn_range). However we would like 1417 * consistency in testing and feature parity among all, so we should 1418 * try to keep these invariants in place for everybody. 1419 */ 1420 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); 1421 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == 1422 (VM_PFNMAP|VM_MIXEDMAP)); 1423 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); 1424 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn)); 1425 1426 if (addr < vma->vm_start || addr >= vma->vm_end) 1427 return -EFAULT; 1428 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1429} 1430EXPORT_SYMBOL(vm_insert_pfn); 1431 1432int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, 1433 unsigned long pfn) 1434{ 1435 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); 1436 1437 if (addr < vma->vm_start || addr >= vma->vm_end) 1438 return -EFAULT; 1439 1440 /* 1441 * If we don't have pte special, then we have to use the pfn_valid() 1442 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* 1443 * refcount the page if pfn_valid is true (hence insert_page rather 1444 * than insert_pfn). 1445 */ 1446 if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) { 1447 struct page *page; 1448 1449 page = pfn_to_page(pfn); 1450 return insert_page(vma, addr, page, vma->vm_page_prot); 1451 } 1452 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1453} 1454EXPORT_SYMBOL(vm_insert_mixed); 1455 1456/* 1457 * maps a range of physical memory into the requested pages. the old 1458 * mappings are removed. any references to nonexistent pages results 1459 * in null mappings (currently treated as "copy-on-access") 1460 */ 1461static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 1462 unsigned long addr, unsigned long end, 1463 unsigned long pfn, pgprot_t prot) 1464{ 1465 pte_t *pte; 1466 spinlock_t *ptl; 1467 1468 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1469 if (!pte) 1470 return -ENOMEM; 1471 arch_enter_lazy_mmu_mode(); 1472 do { 1473 BUG_ON(!pte_none(*pte)); 1474 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); 1475 pfn++; 1476 } while (pte++, addr += PAGE_SIZE, addr != end); 1477 arch_leave_lazy_mmu_mode(); 1478 pte_unmap_unlock(pte - 1, ptl); 1479 return 0; 1480} 1481 1482static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 1483 unsigned long addr, unsigned long end, 1484 unsigned long pfn, pgprot_t prot) 1485{ 1486 pmd_t *pmd; 1487 unsigned long next; 1488 1489 pfn -= addr >> PAGE_SHIFT; 1490 pmd = pmd_alloc(mm, pud, addr); 1491 if (!pmd) 1492 return -ENOMEM; 1493 do { 1494 next = pmd_addr_end(addr, end); 1495 if (remap_pte_range(mm, pmd, addr, next, 1496 pfn + (addr >> PAGE_SHIFT), prot)) 1497 return -ENOMEM; 1498 } while (pmd++, addr = next, addr != end); 1499 return 0; 1500} 1501 1502static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1503 unsigned long addr, unsigned long end, 1504 unsigned long pfn, pgprot_t prot) 1505{ 1506 pud_t *pud; 1507 unsigned long next; 1508 1509 pfn -= addr >> PAGE_SHIFT; 1510 pud = pud_alloc(mm, pgd, addr); 1511 if (!pud) 1512 return -ENOMEM; 1513 do { 1514 next = pud_addr_end(addr, end); 1515 if (remap_pmd_range(mm, pud, addr, next, 1516 pfn + (addr >> PAGE_SHIFT), prot)) 1517 return -ENOMEM; 1518 } while (pud++, addr = next, addr != end); 1519 return 0; 1520} 1521 1522/** 1523 * remap_pfn_range - remap kernel memory to userspace 1524 * @vma: user vma to map to 1525 * @addr: target user address to start at 1526 * @pfn: physical address of kernel memory 1527 * @size: size of map area 1528 * @prot: page protection flags for this mapping 1529 * 1530 * Note: this is only safe if the mm semaphore is held when called. 1531 */ 1532int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 1533 unsigned long pfn, unsigned long size, pgprot_t prot) 1534{ 1535 pgd_t *pgd; 1536 unsigned long next; 1537 unsigned long end = addr + PAGE_ALIGN(size); 1538 struct mm_struct *mm = vma->vm_mm; 1539 int err; 1540 1541 /* 1542 * Physically remapped pages are special. Tell the 1543 * rest of the world about it: 1544 * VM_IO tells people not to look at these pages 1545 * (accesses can have side effects). 1546 * VM_RESERVED is specified all over the place, because 1547 * in 2.4 it kept swapout's vma scan off this vma; but 1548 * in 2.6 the LRU scan won't even find its pages, so this 1549 * flag means no more than count its pages in reserved_vm, 1550 * and omit it from core dump, even when VM_IO turned off. 1551 * VM_PFNMAP tells the core MM that the base pages are just 1552 * raw PFN mappings, and do not have a "struct page" associated 1553 * with them. 1554 * 1555 * There's a horrible special case to handle copy-on-write 1556 * behaviour that some programs depend on. We mark the "original" 1557 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 1558 */ 1559 if (is_cow_mapping(vma->vm_flags)) { 1560 if (addr != vma->vm_start || end != vma->vm_end) 1561 return -EINVAL; 1562 vma->vm_pgoff = pfn; 1563 } 1564 1565 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1566 1567 BUG_ON(addr >= end); 1568 pfn -= addr >> PAGE_SHIFT; 1569 pgd = pgd_offset(mm, addr); 1570 flush_cache_range(vma, addr, end); 1571 do { 1572 next = pgd_addr_end(addr, end); 1573 err = remap_pud_range(mm, pgd, addr, next, 1574 pfn + (addr >> PAGE_SHIFT), prot); 1575 if (err) 1576 break; 1577 } while (pgd++, addr = next, addr != end); 1578 return err; 1579} 1580EXPORT_SYMBOL(remap_pfn_range); 1581 1582static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, 1583 unsigned long addr, unsigned long end, 1584 pte_fn_t fn, void *data) 1585{ 1586 pte_t *pte; 1587 int err; 1588 pgtable_t token; 1589 spinlock_t *uninitialized_var(ptl); 1590 1591 pte = (mm == &init_mm) ? 1592 pte_alloc_kernel(pmd, addr) : 1593 pte_alloc_map_lock(mm, pmd, addr, &ptl); 1594 if (!pte) 1595 return -ENOMEM; 1596 1597 BUG_ON(pmd_huge(*pmd)); 1598 1599 token = pmd_pgtable(*pmd); 1600 1601 do { 1602 err = fn(pte, token, addr, data); 1603 if (err) 1604 break; 1605 } while (pte++, addr += PAGE_SIZE, addr != end); 1606 1607 if (mm != &init_mm) 1608 pte_unmap_unlock(pte-1, ptl); 1609 return err; 1610} 1611 1612static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, 1613 unsigned long addr, unsigned long end, 1614 pte_fn_t fn, void *data) 1615{ 1616 pmd_t *pmd; 1617 unsigned long next; 1618 int err; 1619 1620 BUG_ON(pud_huge(*pud)); 1621 1622 pmd = pmd_alloc(mm, pud, addr); 1623 if (!pmd) 1624 return -ENOMEM; 1625 do { 1626 next = pmd_addr_end(addr, end); 1627 err = apply_to_pte_range(mm, pmd, addr, next, fn, data); 1628 if (err) 1629 break; 1630 } while (pmd++, addr = next, addr != end); 1631 return err; 1632} 1633 1634static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, 1635 unsigned long addr, unsigned long end, 1636 pte_fn_t fn, void *data) 1637{ 1638 pud_t *pud; 1639 unsigned long next; 1640 int err; 1641 1642 pud = pud_alloc(mm, pgd, addr); 1643 if (!pud) 1644 return -ENOMEM; 1645 do { 1646 next = pud_addr_end(addr, end); 1647 err = apply_to_pmd_range(mm, pud, addr, next, fn, data); 1648 if (err) 1649 break; 1650 } while (pud++, addr = next, addr != end); 1651 return err; 1652} 1653 1654/* 1655 * Scan a region of virtual memory, filling in page tables as necessary 1656 * and calling a provided function on each leaf page table. 1657 */ 1658int apply_to_page_range(struct mm_struct *mm, unsigned long addr, 1659 unsigned long size, pte_fn_t fn, void *data) 1660{ 1661 pgd_t *pgd; 1662 unsigned long next; 1663 unsigned long start = addr, end = addr + size; 1664 int err; 1665 1666 BUG_ON(addr >= end); 1667 mmu_notifier_invalidate_range_start(mm, start, end); 1668 pgd = pgd_offset(mm, addr); 1669 do { 1670 next = pgd_addr_end(addr, end); 1671 err = apply_to_pud_range(mm, pgd, addr, next, fn, data); 1672 if (err) 1673 break; 1674 } while (pgd++, addr = next, addr != end); 1675 mmu_notifier_invalidate_range_end(mm, start, end); 1676 return err; 1677} 1678EXPORT_SYMBOL_GPL(apply_to_page_range); 1679 1680/* 1681 * handle_pte_fault chooses page fault handler according to an entry 1682 * which was read non-atomically. Before making any commitment, on 1683 * those architectures or configurations (e.g. i386 with PAE) which 1684 * might give a mix of unmatched parts, do_swap_page and do_file_page 1685 * must check under lock before unmapping the pte and proceeding 1686 * (but do_wp_page is only called after already making such a check; 1687 * and do_anonymous_page and do_no_page can safely check later on). 1688 */ 1689static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, 1690 pte_t *page_table, pte_t orig_pte) 1691{ 1692 int same = 1; 1693#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 1694 if (sizeof(pte_t) > sizeof(unsigned long)) { 1695 spinlock_t *ptl = pte_lockptr(mm, pmd); 1696 spin_lock(ptl); 1697 same = pte_same(*page_table, orig_pte); 1698 spin_unlock(ptl); 1699 } 1700#endif 1701 pte_unmap(page_table); 1702 return same; 1703} 1704 1705/* 1706 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 1707 * servicing faults for write access. In the normal case, do always want 1708 * pte_mkwrite. But get_user_pages can cause write faults for mappings 1709 * that do not have writing enabled, when used by access_process_vm. 1710 */ 1711static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 1712{ 1713 if (likely(vma->vm_flags & VM_WRITE)) 1714 pte = pte_mkwrite(pte); 1715 return pte; 1716} 1717 1718static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) 1719{ 1720 /* 1721 * If the source page was a PFN mapping, we don't have 1722 * a "struct page" for it. We do a best-effort copy by 1723 * just copying from the original user address. If that 1724 * fails, we just zero-fill it. Live with it. 1725 */ 1726 if (unlikely(!src)) { 1727 void *kaddr = kmap_atomic(dst, KM_USER0); 1728 void __user *uaddr = (void __user *)(va & PAGE_MASK); 1729 1730 /* 1731 * This really shouldn't fail, because the page is there 1732 * in the page tables. But it might just be unreadable, 1733 * in which case we just give up and fill the result with 1734 * zeroes. 1735 */ 1736 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 1737 memset(kaddr, 0, PAGE_SIZE); 1738 kunmap_atomic(kaddr, KM_USER0); 1739 flush_dcache_page(dst); 1740 } else 1741 copy_user_highpage(dst, src, va, vma); 1742} 1743 1744/* 1745 * This routine handles present pages, when users try to write 1746 * to a shared page. It is done by copying the page to a new address 1747 * and decrementing the shared-page counter for the old page. 1748 * 1749 * Note that this routine assumes that the protection checks have been 1750 * done by the caller (the low-level page fault routine in most cases). 1751 * Thus we can safely just mark it writable once we've done any necessary 1752 * COW. 1753 * 1754 * We also mark the page dirty at this point even though the page will 1755 * change only once the write actually happens. This avoids a few races, 1756 * and potentially makes it more efficient. 1757 * 1758 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1759 * but allow concurrent faults), with pte both mapped and locked. 1760 * We return with mmap_sem still held, but pte unmapped and unlocked. 1761 */ 1762static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 1763 unsigned long address, pte_t *page_table, pmd_t *pmd, 1764 spinlock_t *ptl, pte_t orig_pte) 1765{ 1766 struct page *old_page, *new_page; 1767 pte_t entry; 1768 int reuse = 0, ret = 0; 1769 int page_mkwrite = 0; 1770 struct page *dirty_page = NULL; 1771 1772 old_page = vm_normal_page(vma, address, orig_pte); 1773 if (!old_page) { 1774 /* 1775 * VM_MIXEDMAP !pfn_valid() case 1776 * 1777 * We should not cow pages in a shared writeable mapping. 1778 * Just mark the pages writable as we can't do any dirty 1779 * accounting on raw pfn maps. 1780 */ 1781 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 1782 (VM_WRITE|VM_SHARED)) 1783 goto reuse; 1784 goto gotten; 1785 } 1786 1787 /* 1788 * Take out anonymous pages first, anonymous shared vmas are 1789 * not dirty accountable. 1790 */ 1791 if (PageAnon(old_page)) { 1792 if (!TestSetPageLocked(old_page)) { 1793 reuse = can_share_swap_page(old_page); 1794 unlock_page(old_page); 1795 } 1796 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 1797 (VM_WRITE|VM_SHARED))) { 1798 /* 1799 * Only catch write-faults on shared writable pages, 1800 * read-only shared pages can get COWed by 1801 * get_user_pages(.write=1, .force=1). 1802 */ 1803 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { 1804 /* 1805 * Notify the address space that the page is about to 1806 * become writable so that it can prohibit this or wait 1807 * for the page to get into an appropriate state. 1808 * 1809 * We do this without the lock held, so that it can 1810 * sleep if it needs to. 1811 */ 1812 page_cache_get(old_page); 1813 pte_unmap_unlock(page_table, ptl); 1814 1815 if (vma->vm_ops->page_mkwrite(vma, old_page) < 0) 1816 goto unwritable_page; 1817 1818 /* 1819 * Since we dropped the lock we need to revalidate 1820 * the PTE as someone else may have changed it. If 1821 * they did, we just return, as we can count on the 1822 * MMU to tell us if they didn't also make it writable. 1823 */ 1824 page_table = pte_offset_map_lock(mm, pmd, address, 1825 &ptl); 1826 page_cache_release(old_page); 1827 if (!pte_same(*page_table, orig_pte)) 1828 goto unlock; 1829 1830 page_mkwrite = 1; 1831 } 1832 dirty_page = old_page; 1833 get_page(dirty_page); 1834 reuse = 1; 1835 } 1836 1837 if (reuse) { 1838reuse: 1839 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1840 entry = pte_mkyoung(orig_pte); 1841 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1842 if (ptep_set_access_flags(vma, address, page_table, entry,1)) 1843 update_mmu_cache(vma, address, entry); 1844 ret |= VM_FAULT_WRITE; 1845 goto unlock; 1846 } 1847 1848 /* 1849 * Ok, we need to copy. Oh, well.. 1850 */ 1851 page_cache_get(old_page); 1852gotten: 1853 pte_unmap_unlock(page_table, ptl); 1854 1855 if (unlikely(anon_vma_prepare(vma))) 1856 goto oom; 1857 VM_BUG_ON(old_page == ZERO_PAGE(0)); 1858 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1859 if (!new_page) 1860 goto oom; 1861 cow_user_page(new_page, old_page, address, vma); 1862 __SetPageUptodate(new_page); 1863 1864 if (mem_cgroup_charge(new_page, mm, GFP_KERNEL)) 1865 goto oom_free_new; 1866 1867 /* 1868 * Re-check the pte - we dropped the lock 1869 */ 1870 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1871 if (likely(pte_same(*page_table, orig_pte))) { 1872 if (old_page) { 1873 if (!PageAnon(old_page)) { 1874 dec_mm_counter(mm, file_rss); 1875 inc_mm_counter(mm, anon_rss); 1876 } 1877 } else 1878 inc_mm_counter(mm, anon_rss); 1879 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1880 entry = mk_pte(new_page, vma->vm_page_prot); 1881 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1882 /* 1883 * Clear the pte entry and flush it first, before updating the 1884 * pte with the new entry. This will avoid a race condition 1885 * seen in the presence of one thread doing SMC and another 1886 * thread doing COW. 1887 */ 1888 ptep_clear_flush_notify(vma, address, page_table); 1889 set_pte_at(mm, address, page_table, entry); 1890 update_mmu_cache(vma, address, entry); 1891 lru_cache_add_active(new_page); 1892 page_add_new_anon_rmap(new_page, vma, address); 1893 1894 if (old_page) { 1895 /* 1896 * Only after switching the pte to the new page may 1897 * we remove the mapcount here. Otherwise another 1898 * process may come and find the rmap count decremented 1899 * before the pte is switched to the new page, and 1900 * "reuse" the old page writing into it while our pte 1901 * here still points into it and can be read by other 1902 * threads. 1903 * 1904 * The critical issue is to order this 1905 * page_remove_rmap with the ptp_clear_flush above. 1906 * Those stores are ordered by (if nothing else,) 1907 * the barrier present in the atomic_add_negative 1908 * in page_remove_rmap. 1909 * 1910 * Then the TLB flush in ptep_clear_flush ensures that 1911 * no process can access the old page before the 1912 * decremented mapcount is visible. And the old page 1913 * cannot be reused until after the decremented 1914 * mapcount is visible. So transitively, TLBs to 1915 * old page will be flushed before it can be reused. 1916 */ 1917 page_remove_rmap(old_page, vma); 1918 } 1919 1920 /* Free the old page.. */ 1921 new_page = old_page; 1922 ret |= VM_FAULT_WRITE; 1923 } else 1924 mem_cgroup_uncharge_page(new_page); 1925 1926 if (new_page) 1927 page_cache_release(new_page); 1928 if (old_page) 1929 page_cache_release(old_page); 1930unlock: 1931 pte_unmap_unlock(page_table, ptl); 1932 if (dirty_page) { 1933 if (vma->vm_file) 1934 file_update_time(vma->vm_file); 1935 1936 /* 1937 * Yes, Virginia, this is actually required to prevent a race 1938 * with clear_page_dirty_for_io() from clearing the page dirty 1939 * bit after it clear all dirty ptes, but before a racing 1940 * do_wp_page installs a dirty pte. 1941 * 1942 * do_no_page is protected similarly. 1943 */ 1944 wait_on_page_locked(dirty_page); 1945 set_page_dirty_balance(dirty_page, page_mkwrite); 1946 put_page(dirty_page); 1947 } 1948 return ret; 1949oom_free_new: 1950 page_cache_release(new_page); 1951oom: 1952 if (old_page) 1953 page_cache_release(old_page); 1954 return VM_FAULT_OOM; 1955 1956unwritable_page: 1957 page_cache_release(old_page); 1958 return VM_FAULT_SIGBUS; 1959} 1960 1961/* 1962 * Helper functions for unmap_mapping_range(). 1963 * 1964 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ 1965 * 1966 * We have to restart searching the prio_tree whenever we drop the lock, 1967 * since the iterator is only valid while the lock is held, and anyway 1968 * a later vma might be split and reinserted earlier while lock dropped. 1969 * 1970 * The list of nonlinear vmas could be handled more efficiently, using 1971 * a placeholder, but handle it in the same way until a need is shown. 1972 * It is important to search the prio_tree before nonlinear list: a vma 1973 * may become nonlinear and be shifted from prio_tree to nonlinear list 1974 * while the lock is dropped; but never shifted from list to prio_tree. 1975 * 1976 * In order to make forward progress despite restarting the search, 1977 * vm_truncate_count is used to mark a vma as now dealt with, so we can 1978 * quickly skip it next time around. Since the prio_tree search only 1979 * shows us those vmas affected by unmapping the range in question, we 1980 * can't efficiently keep all vmas in step with mapping->truncate_count: 1981 * so instead reset them all whenever it wraps back to 0 (then go to 1). 1982 * mapping->truncate_count and vma->vm_truncate_count are protected by 1983 * i_mmap_lock. 1984 * 1985 * In order to make forward progress despite repeatedly restarting some 1986 * large vma, note the restart_addr from unmap_vmas when it breaks out: 1987 * and restart from that address when we reach that vma again. It might 1988 * have been split or merged, shrunk or extended, but never shifted: so 1989 * restart_addr remains valid so long as it remains in the vma's range. 1990 * unmap_mapping_range forces truncate_count to leap over page-aligned 1991 * values so we can save vma's restart_addr in its truncate_count field. 1992 */ 1993#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) 1994 1995static void reset_vma_truncate_counts(struct address_space *mapping) 1996{ 1997 struct vm_area_struct *vma; 1998 struct prio_tree_iter iter; 1999 2000 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) 2001 vma->vm_truncate_count = 0; 2002 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 2003 vma->vm_truncate_count = 0; 2004} 2005 2006static int unmap_mapping_range_vma(struct vm_area_struct *vma, 2007 unsigned long start_addr, unsigned long end_addr, 2008 struct zap_details *details) 2009{ 2010 unsigned long restart_addr; 2011 int need_break; 2012 2013 /* 2014 * files that support invalidating or truncating portions of the 2015 * file from under mmaped areas must have their ->fault function 2016 * return a locked page (and set VM_FAULT_LOCKED in the return). 2017 * This provides synchronisation against concurrent unmapping here. 2018 */ 2019 2020again: 2021 restart_addr = vma->vm_truncate_count; 2022 if (is_restart_addr(restart_addr) && start_addr < restart_addr) { 2023 start_addr = restart_addr; 2024 if (start_addr >= end_addr) { 2025 /* Top of vma has been split off since last time */ 2026 vma->vm_truncate_count = details->truncate_count; 2027 return 0; 2028 } 2029 } 2030 2031 restart_addr = zap_page_range(vma, start_addr, 2032 end_addr - start_addr, details); 2033 need_break = need_resched() || spin_needbreak(details->i_mmap_lock); 2034 2035 if (restart_addr >= end_addr) { 2036 /* We have now completed this vma: mark it so */ 2037 vma->vm_truncate_count = details->truncate_count; 2038 if (!need_break) 2039 return 0; 2040 } else { 2041 /* Note restart_addr in vma's truncate_count field */ 2042 vma->vm_truncate_count = restart_addr; 2043 if (!need_break) 2044 goto again; 2045 } 2046 2047 spin_unlock(details->i_mmap_lock); 2048 cond_resched(); 2049 spin_lock(details->i_mmap_lock); 2050 return -EINTR; 2051} 2052 2053static inline void unmap_mapping_range_tree(struct prio_tree_root *root, 2054 struct zap_details *details) 2055{ 2056 struct vm_area_struct *vma; 2057 struct prio_tree_iter iter; 2058 pgoff_t vba, vea, zba, zea; 2059 2060restart: 2061 vma_prio_tree_foreach(vma, &iter, root, 2062 details->first_index, details->last_index) { 2063 /* Skip quickly over those we have already dealt with */ 2064 if (vma->vm_truncate_count == details->truncate_count) 2065 continue; 2066 2067 vba = vma->vm_pgoff; 2068 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; 2069 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ 2070 zba = details->first_index; 2071 if (zba < vba) 2072 zba = vba; 2073 zea = details->last_index; 2074 if (zea > vea) 2075 zea = vea; 2076 2077 if (unmap_mapping_range_vma(vma, 2078 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 2079 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 2080 details) < 0) 2081 goto restart; 2082 } 2083} 2084 2085static inline void unmap_mapping_range_list(struct list_head *head, 2086 struct zap_details *details) 2087{ 2088 struct vm_area_struct *vma; 2089 2090 /* 2091 * In nonlinear VMAs there is no correspondence between virtual address 2092 * offset and file offset. So we must perform an exhaustive search 2093 * across *all* the pages in each nonlinear VMA, not just the pages 2094 * whose virtual address lies outside the file truncation point. 2095 */ 2096restart: 2097 list_for_each_entry(vma, head, shared.vm_set.list) { 2098 /* Skip quickly over those we have already dealt with */ 2099 if (vma->vm_truncate_count == details->truncate_count) 2100 continue; 2101 details->nonlinear_vma = vma; 2102 if (unmap_mapping_range_vma(vma, vma->vm_start, 2103 vma->vm_end, details) < 0) 2104 goto restart; 2105 } 2106} 2107 2108/** 2109 * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file. 2110 * @mapping: the address space containing mmaps to be unmapped. 2111 * @holebegin: byte in first page to unmap, relative to the start of 2112 * the underlying file. This will be rounded down to a PAGE_SIZE 2113 * boundary. Note that this is different from vmtruncate(), which 2114 * must keep the partial page. In contrast, we must get rid of 2115 * partial pages. 2116 * @holelen: size of prospective hole in bytes. This will be rounded 2117 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 2118 * end of the file. 2119 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 2120 * but 0 when invalidating pagecache, don't throw away private data. 2121 */ 2122void unmap_mapping_range(struct address_space *mapping, 2123 loff_t const holebegin, loff_t const holelen, int even_cows) 2124{ 2125 struct zap_details details; 2126 pgoff_t hba = holebegin >> PAGE_SHIFT; 2127 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 2128 2129 /* Check for overflow. */ 2130 if (sizeof(holelen) > sizeof(hlen)) { 2131 long long holeend = 2132 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 2133 if (holeend & ~(long long)ULONG_MAX) 2134 hlen = ULONG_MAX - hba + 1; 2135 } 2136 2137 details.check_mapping = even_cows? NULL: mapping; 2138 details.nonlinear_vma = NULL; 2139 details.first_index = hba; 2140 details.last_index = hba + hlen - 1; 2141 if (details.last_index < details.first_index) 2142 details.last_index = ULONG_MAX; 2143 details.i_mmap_lock = &mapping->i_mmap_lock; 2144 2145 spin_lock(&mapping->i_mmap_lock); 2146 2147 /* Protect against endless unmapping loops */ 2148 mapping->truncate_count++; 2149 if (unlikely(is_restart_addr(mapping->truncate_count))) { 2150 if (mapping->truncate_count == 0) 2151 reset_vma_truncate_counts(mapping); 2152 mapping->truncate_count++; 2153 } 2154 details.truncate_count = mapping->truncate_count; 2155 2156 if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 2157 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2158 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2159 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2160 spin_unlock(&mapping->i_mmap_lock); 2161} 2162EXPORT_SYMBOL(unmap_mapping_range); 2163 2164/** 2165 * vmtruncate - unmap mappings "freed" by truncate() syscall 2166 * @inode: inode of the file used 2167 * @offset: file offset to start truncating 2168 * 2169 * NOTE! We have to be ready to update the memory sharing 2170 * between the file and the memory map for a potential last 2171 * incomplete page. Ugly, but necessary. 2172 */ 2173int vmtruncate(struct inode * inode, loff_t offset) 2174{ 2175 if (inode->i_size < offset) { 2176 unsigned long limit; 2177 2178 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 2179 if (limit != RLIM_INFINITY && offset > limit) 2180 goto out_sig; 2181 if (offset > inode->i_sb->s_maxbytes) 2182 goto out_big; 2183 i_size_write(inode, offset); 2184 } else { 2185 struct address_space *mapping = inode->i_mapping; 2186 2187 /* 2188 * truncation of in-use swapfiles is disallowed - it would 2189 * cause subsequent swapout to scribble on the now-freed 2190 * blocks. 2191 */ 2192 if (IS_SWAPFILE(inode)) 2193 return -ETXTBSY; 2194 i_size_write(inode, offset); 2195 2196 /* 2197 * unmap_mapping_range is called twice, first simply for 2198 * efficiency so that truncate_inode_pages does fewer 2199 * single-page unmaps. However after this first call, and 2200 * before truncate_inode_pages finishes, it is possible for 2201 * private pages to be COWed, which remain after 2202 * truncate_inode_pages finishes, hence the second 2203 * unmap_mapping_range call must be made for correctness. 2204 */ 2205 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 2206 truncate_inode_pages(mapping, offset); 2207 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 2208 } 2209 2210 if (inode->i_op && inode->i_op->truncate) 2211 inode->i_op->truncate(inode); 2212 return 0; 2213 2214out_sig: 2215 send_sig(SIGXFSZ, current, 0); 2216out_big: 2217 return -EFBIG; 2218} 2219EXPORT_SYMBOL(vmtruncate); 2220 2221int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) 2222{ 2223 struct address_space *mapping = inode->i_mapping; 2224 2225 /* 2226 * If the underlying filesystem is not going to provide 2227 * a way to truncate a range of blocks (punch a hole) - 2228 * we should return failure right now. 2229 */ 2230 if (!inode->i_op || !inode->i_op->truncate_range) 2231 return -ENOSYS; 2232 2233 mutex_lock(&inode->i_mutex); 2234 down_write(&inode->i_alloc_sem); 2235 unmap_mapping_range(mapping, offset, (end - offset), 1); 2236 truncate_inode_pages_range(mapping, offset, end); 2237 unmap_mapping_range(mapping, offset, (end - offset), 1); 2238 inode->i_op->truncate_range(inode, offset, end); 2239 up_write(&inode->i_alloc_sem); 2240 mutex_unlock(&inode->i_mutex); 2241 2242 return 0; 2243} 2244 2245/* 2246 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2247 * but allow concurrent faults), and pte mapped but not yet locked. 2248 * We return with mmap_sem still held, but pte unmapped and unlocked. 2249 */ 2250static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, 2251 unsigned long address, pte_t *page_table, pmd_t *pmd, 2252 int write_access, pte_t orig_pte) 2253{ 2254 spinlock_t *ptl; 2255 struct page *page; 2256 swp_entry_t entry; 2257 pte_t pte; 2258 int ret = 0; 2259 2260 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2261 goto out; 2262 2263 entry = pte_to_swp_entry(orig_pte); 2264 if (is_migration_entry(entry)) { 2265 migration_entry_wait(mm, pmd, address); 2266 goto out; 2267 } 2268 delayacct_set_flag(DELAYACCT_PF_SWAPIN); 2269 page = lookup_swap_cache(entry); 2270 if (!page) { 2271 grab_swap_token(); /* Contend for token _before_ read-in */ 2272 page = swapin_readahead(entry, 2273 GFP_HIGHUSER_MOVABLE, vma, address); 2274 if (!page) { 2275 /* 2276 * Back out if somebody else faulted in this pte 2277 * while we released the pte lock. 2278 */ 2279 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2280 if (likely(pte_same(*page_table, orig_pte))) 2281 ret = VM_FAULT_OOM; 2282 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2283 goto unlock; 2284 } 2285 2286 /* Had to read the page from swap area: Major fault */ 2287 ret = VM_FAULT_MAJOR; 2288 count_vm_event(PGMAJFAULT); 2289 } 2290 2291 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2292 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2293 ret = VM_FAULT_OOM; 2294 goto out; 2295 } 2296 2297 mark_page_accessed(page); 2298 lock_page(page); 2299 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2300 2301 /* 2302 * Back out if somebody else already faulted in this pte. 2303 */ 2304 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2305 if (unlikely(!pte_same(*page_table, orig_pte))) 2306 goto out_nomap; 2307 2308 if (unlikely(!PageUptodate(page))) { 2309 ret = VM_FAULT_SIGBUS; 2310 goto out_nomap; 2311 } 2312 2313 /* The page isn't present yet, go ahead with the fault. */ 2314 2315 inc_mm_counter(mm, anon_rss); 2316 pte = mk_pte(page, vma->vm_page_prot); 2317 if (write_access && can_share_swap_page(page)) { 2318 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 2319 write_access = 0; 2320 } 2321 2322 flush_icache_page(vma, page); 2323 set_pte_at(mm, address, page_table, pte); 2324 page_add_anon_rmap(page, vma, address); 2325 2326 swap_free(entry); 2327 if (vm_swap_full()) 2328 remove_exclusive_swap_page(page); 2329 unlock_page(page); 2330 2331 if (write_access) { 2332 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); 2333 if (ret & VM_FAULT_ERROR) 2334 ret &= VM_FAULT_ERROR; 2335 goto out; 2336 } 2337 2338 /* No need to invalidate - it was non-present before */ 2339 update_mmu_cache(vma, address, pte); 2340unlock: 2341 pte_unmap_unlock(page_table, ptl); 2342out: 2343 return ret; 2344out_nomap: 2345 mem_cgroup_uncharge_page(page); 2346 pte_unmap_unlock(page_table, ptl); 2347 unlock_page(page); 2348 page_cache_release(page); 2349 return ret; 2350} 2351 2352/* 2353 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2354 * but allow concurrent faults), and pte mapped but not yet locked. 2355 * We return with mmap_sem still held, but pte unmapped and unlocked. 2356 */ 2357static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 2358 unsigned long address, pte_t *page_table, pmd_t *pmd, 2359 int write_access) 2360{ 2361 struct page *page; 2362 spinlock_t *ptl; 2363 pte_t entry; 2364 2365 /* Allocate our own private page. */ 2366 pte_unmap(page_table); 2367 2368 if (unlikely(anon_vma_prepare(vma))) 2369 goto oom; 2370 page = alloc_zeroed_user_highpage_movable(vma, address); 2371 if (!page) 2372 goto oom; 2373 __SetPageUptodate(page); 2374 2375 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) 2376 goto oom_free_page; 2377 2378 entry = mk_pte(page, vma->vm_page_prot); 2379 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2380 2381 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2382 if (!pte_none(*page_table)) 2383 goto release; 2384 inc_mm_counter(mm, anon_rss); 2385 lru_cache_add_active(page); 2386 page_add_new_anon_rmap(page, vma, address); 2387 set_pte_at(mm, address, page_table, entry); 2388 2389 /* No need to invalidate - it was non-present before */ 2390 update_mmu_cache(vma, address, entry); 2391unlock: 2392 pte_unmap_unlock(page_table, ptl); 2393 return 0; 2394release: 2395 mem_cgroup_uncharge_page(page); 2396 page_cache_release(page); 2397 goto unlock; 2398oom_free_page: 2399 page_cache_release(page); 2400oom: 2401 return VM_FAULT_OOM; 2402} 2403 2404/* 2405 * __do_fault() tries to create a new page mapping. It aggressively 2406 * tries to share with existing pages, but makes a separate copy if 2407 * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid 2408 * the next page fault. 2409 * 2410 * As this is called only for pages that do not currently exist, we 2411 * do not need to flush old virtual caches or the TLB. 2412 * 2413 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2414 * but allow concurrent faults), and pte neither mapped nor locked. 2415 * We return with mmap_sem still held, but pte unmapped and unlocked. 2416 */ 2417static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2418 unsigned long address, pmd_t *pmd, 2419 pgoff_t pgoff, unsigned int flags, pte_t orig_pte) 2420{ 2421 pte_t *page_table; 2422 spinlock_t *ptl; 2423 struct page *page; 2424 pte_t entry; 2425 int anon = 0; 2426 struct page *dirty_page = NULL; 2427 struct vm_fault vmf; 2428 int ret; 2429 int page_mkwrite = 0; 2430 2431 vmf.virtual_address = (void __user *)(address & PAGE_MASK); 2432 vmf.pgoff = pgoff; 2433 vmf.flags = flags; 2434 vmf.page = NULL; 2435 2436 ret = vma->vm_ops->fault(vma, &vmf); 2437 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) 2438 return ret; 2439 2440 /* 2441 * For consistency in subsequent calls, make the faulted page always 2442 * locked. 2443 */ 2444 if (unlikely(!(ret & VM_FAULT_LOCKED))) 2445 lock_page(vmf.page); 2446 else 2447 VM_BUG_ON(!PageLocked(vmf.page)); 2448 2449 /* 2450 * Should we do an early C-O-W break? 2451 */ 2452 page = vmf.page; 2453 if (flags & FAULT_FLAG_WRITE) { 2454 if (!(vma->vm_flags & VM_SHARED)) { 2455 anon = 1; 2456 if (unlikely(anon_vma_prepare(vma))) { 2457 ret = VM_FAULT_OOM; 2458 goto out; 2459 } 2460 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 2461 vma, address); 2462 if (!page) { 2463 ret = VM_FAULT_OOM; 2464 goto out; 2465 } 2466 copy_user_highpage(page, vmf.page, address, vma); 2467 __SetPageUptodate(page); 2468 } else { 2469 /* 2470 * If the page will be shareable, see if the backing 2471 * address space wants to know that the page is about 2472 * to become writable 2473 */ 2474 if (vma->vm_ops->page_mkwrite) { 2475 unlock_page(page); 2476 if (vma->vm_ops->page_mkwrite(vma, page) < 0) { 2477 ret = VM_FAULT_SIGBUS; 2478 anon = 1; /* no anon but release vmf.page */ 2479 goto out_unlocked; 2480 } 2481 lock_page(page); 2482 /* 2483 * XXX: this is not quite right (racy vs 2484 * invalidate) to unlock and relock the page 2485 * like this, however a better fix requires 2486 * reworking page_mkwrite locking API, which 2487 * is better done later. 2488 */ 2489 if (!page->mapping) { 2490 ret = 0; 2491 anon = 1; /* no anon but release vmf.page */ 2492 goto out; 2493 } 2494 page_mkwrite = 1; 2495 } 2496 } 2497 2498 } 2499 2500 if (mem_cgroup_charge(page, mm, GFP_KERNEL)) { 2501 ret = VM_FAULT_OOM; 2502 goto out; 2503 } 2504 2505 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2506 2507 /* 2508 * This silly early PAGE_DIRTY setting removes a race 2509 * due to the bad i386 page protection. But it's valid 2510 * for other architectures too. 2511 * 2512 * Note that if write_access is true, we either now have 2513 * an exclusive copy of the page, or this is a shared mapping, 2514 * so we can make it writable and dirty to avoid having to 2515 * handle that later. 2516 */ 2517 /* Only go through if we didn't race with anybody else... */ 2518 if (likely(pte_same(*page_table, orig_pte))) { 2519 flush_icache_page(vma, page); 2520 entry = mk_pte(page, vma->vm_page_prot); 2521 if (flags & FAULT_FLAG_WRITE) 2522 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2523 set_pte_at(mm, address, page_table, entry); 2524 if (anon) { 2525 inc_mm_counter(mm, anon_rss); 2526 lru_cache_add_active(page); 2527 page_add_new_anon_rmap(page, vma, address); 2528 } else { 2529 inc_mm_counter(mm, file_rss); 2530 page_add_file_rmap(page); 2531 if (flags & FAULT_FLAG_WRITE) { 2532 dirty_page = page; 2533 get_page(dirty_page); 2534 } 2535 } 2536 2537 /* no need to invalidate: a not-present page won't be cached */ 2538 update_mmu_cache(vma, address, entry); 2539 } else { 2540 mem_cgroup_uncharge_page(page); 2541 if (anon) 2542 page_cache_release(page); 2543 else 2544 anon = 1; /* no anon but release faulted_page */ 2545 } 2546 2547 pte_unmap_unlock(page_table, ptl); 2548 2549out: 2550 unlock_page(vmf.page); 2551out_unlocked: 2552 if (anon) 2553 page_cache_release(vmf.page); 2554 else if (dirty_page) { 2555 if (vma->vm_file) 2556 file_update_time(vma->vm_file); 2557 2558 set_page_dirty_balance(dirty_page, page_mkwrite); 2559 put_page(dirty_page); 2560 } 2561 2562 return ret; 2563} 2564 2565static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2566 unsigned long address, pte_t *page_table, pmd_t *pmd, 2567 int write_access, pte_t orig_pte) 2568{ 2569 pgoff_t pgoff = (((address & PAGE_MASK) 2570 - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2571 unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); 2572 2573 pte_unmap(page_table); 2574 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 2575} 2576 2577/* 2578 * Fault of a previously existing named mapping. Repopulate the pte 2579 * from the encoded file_pte if possible. This enables swappable 2580 * nonlinear vmas. 2581 * 2582 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2583 * but allow concurrent faults), and pte mapped but not yet locked. 2584 * We return with mmap_sem still held, but pte unmapped and unlocked. 2585 */ 2586static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2587 unsigned long address, pte_t *page_table, pmd_t *pmd, 2588 int write_access, pte_t orig_pte) 2589{ 2590 unsigned int flags = FAULT_FLAG_NONLINEAR | 2591 (write_access ? FAULT_FLAG_WRITE : 0); 2592 pgoff_t pgoff; 2593 2594 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2595 return 0; 2596 2597 if (unlikely(!(vma->vm_flags & VM_NONLINEAR) || 2598 !(vma->vm_flags & VM_CAN_NONLINEAR))) { 2599 /* 2600 * Page table corrupted: show pte and kill process. 2601 */ 2602 print_bad_pte(vma, orig_pte, address); 2603 return VM_FAULT_OOM; 2604 } 2605 2606 pgoff = pte_to_pgoff(orig_pte); 2607 return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); 2608} 2609 2610/* 2611 * These routines also need to handle stuff like marking pages dirty 2612 * and/or accessed for architectures that don't do it in hardware (most 2613 * RISC architectures). The early dirtying is also good on the i386. 2614 * 2615 * There is also a hook called "update_mmu_cache()" that architectures 2616 * with external mmu caches can use to update those (ie the Sparc or 2617 * PowerPC hashed page tables that act as extended TLBs). 2618 * 2619 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2620 * but allow concurrent faults), and pte mapped but not yet locked. 2621 * We return with mmap_sem still held, but pte unmapped and unlocked. 2622 */ 2623static inline int handle_pte_fault(struct mm_struct *mm, 2624 struct vm_area_struct *vma, unsigned long address, 2625 pte_t *pte, pmd_t *pmd, int write_access) 2626{ 2627 pte_t entry; 2628 spinlock_t *ptl; 2629 2630 entry = *pte; 2631 if (!pte_present(entry)) { 2632 if (pte_none(entry)) { 2633 if (vma->vm_ops) { 2634 if (likely(vma->vm_ops->fault)) 2635 return do_linear_fault(mm, vma, address, 2636 pte, pmd, write_access, entry); 2637 } 2638 return do_anonymous_page(mm, vma, address, 2639 pte, pmd, write_access); 2640 } 2641 if (pte_file(entry)) 2642 return do_nonlinear_fault(mm, vma, address, 2643 pte, pmd, write_access, entry); 2644 return do_swap_page(mm, vma, address, 2645 pte, pmd, write_access, entry); 2646 } 2647 2648 ptl = pte_lockptr(mm, pmd); 2649 spin_lock(ptl); 2650 if (unlikely(!pte_same(*pte, entry))) 2651 goto unlock; 2652 if (write_access) { 2653 if (!pte_write(entry)) 2654 return do_wp_page(mm, vma, address, 2655 pte, pmd, ptl, entry); 2656 entry = pte_mkdirty(entry); 2657 } 2658 entry = pte_mkyoung(entry); 2659 if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { 2660 update_mmu_cache(vma, address, entry); 2661 } else { 2662 /* 2663 * This is needed only for protection faults but the arch code 2664 * is not yet telling us if this is a protection fault or not. 2665 * This still avoids useless tlb flushes for .text page faults 2666 * with threads. 2667 */ 2668 if (write_access) 2669 flush_tlb_page(vma, address); 2670 } 2671unlock: 2672 pte_unmap_unlock(pte, ptl); 2673 return 0; 2674} 2675 2676/* 2677 * By the time we get here, we already hold the mm semaphore 2678 */ 2679int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2680 unsigned long address, int write_access) 2681{ 2682 pgd_t *pgd; 2683 pud_t *pud; 2684 pmd_t *pmd; 2685 pte_t *pte; 2686 2687 __set_current_state(TASK_RUNNING); 2688 2689 count_vm_event(PGFAULT); 2690 2691 if (unlikely(is_vm_hugetlb_page(vma))) 2692 return hugetlb_fault(mm, vma, address, write_access); 2693 2694 pgd = pgd_offset(mm, address); 2695 pud = pud_alloc(mm, pgd, address); 2696 if (!pud) 2697 return VM_FAULT_OOM; 2698 pmd = pmd_alloc(mm, pud, address); 2699 if (!pmd) 2700 return VM_FAULT_OOM; 2701 pte = pte_alloc_map(mm, pmd, address); 2702 if (!pte) 2703 return VM_FAULT_OOM; 2704 2705 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2706} 2707 2708#ifndef __PAGETABLE_PUD_FOLDED 2709/* 2710 * Allocate page upper directory. 2711 * We've already handled the fast-path in-line. 2712 */ 2713int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 2714{ 2715 pud_t *new = pud_alloc_one(mm, address); 2716 if (!new) 2717 return -ENOMEM; 2718 2719 smp_wmb(); /* See comment in __pte_alloc */ 2720 2721 spin_lock(&mm->page_table_lock); 2722 if (pgd_present(*pgd)) /* Another has populated it */ 2723 pud_free(mm, new); 2724 else 2725 pgd_populate(mm, pgd, new); 2726 spin_unlock(&mm->page_table_lock); 2727 return 0; 2728} 2729#endif /* __PAGETABLE_PUD_FOLDED */ 2730 2731#ifndef __PAGETABLE_PMD_FOLDED 2732/* 2733 * Allocate page middle directory. 2734 * We've already handled the fast-path in-line. 2735 */ 2736int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2737{ 2738 pmd_t *new = pmd_alloc_one(mm, address); 2739 if (!new) 2740 return -ENOMEM; 2741 2742 smp_wmb(); /* See comment in __pte_alloc */ 2743 2744 spin_lock(&mm->page_table_lock); 2745#ifndef __ARCH_HAS_4LEVEL_HACK 2746 if (pud_present(*pud)) /* Another has populated it */ 2747 pmd_free(mm, new); 2748 else 2749 pud_populate(mm, pud, new); 2750#else 2751 if (pgd_present(*pud)) /* Another has populated it */ 2752 pmd_free(mm, new); 2753 else 2754 pgd_populate(mm, pud, new); 2755#endif /* __ARCH_HAS_4LEVEL_HACK */ 2756 spin_unlock(&mm->page_table_lock); 2757 return 0; 2758} 2759#endif /* __PAGETABLE_PMD_FOLDED */ 2760 2761int make_pages_present(unsigned long addr, unsigned long end) 2762{ 2763 int ret, len, write; 2764 struct vm_area_struct * vma; 2765 2766 vma = find_vma(current->mm, addr); 2767 if (!vma) 2768 return -ENOMEM; 2769 write = (vma->vm_flags & VM_WRITE) != 0; 2770 BUG_ON(addr >= end); 2771 BUG_ON(end > vma->vm_end); 2772 len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; 2773 ret = get_user_pages(current, current->mm, addr, 2774 len, write, 0, NULL, NULL); 2775 if (ret < 0) { 2776 /* 2777 SUS require strange return value to mlock 2778 - invalid addr generate to ENOMEM. 2779 - out of memory should generate EAGAIN. 2780 */ 2781 if (ret == -EFAULT) 2782 ret = -ENOMEM; 2783 else if (ret == -ENOMEM) 2784 ret = -EAGAIN; 2785 return ret; 2786 } 2787 return ret == len ? 0 : -ENOMEM; 2788} 2789 2790#if !defined(__HAVE_ARCH_GATE_AREA) 2791 2792#if defined(AT_SYSINFO_EHDR) 2793static struct vm_area_struct gate_vma; 2794 2795static int __init gate_vma_init(void) 2796{ 2797 gate_vma.vm_mm = NULL; 2798 gate_vma.vm_start = FIXADDR_USER_START; 2799 gate_vma.vm_end = FIXADDR_USER_END; 2800 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; 2801 gate_vma.vm_page_prot = __P101; 2802 /* 2803 * Make sure the vDSO gets into every core dump. 2804 * Dumping its contents makes post-mortem fully interpretable later 2805 * without matching up the same kernel and hardware config to see 2806 * what PC values meant. 2807 */ 2808 gate_vma.vm_flags |= VM_ALWAYSDUMP; 2809 return 0; 2810} 2811__initcall(gate_vma_init); 2812#endif 2813 2814struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 2815{ 2816#ifdef AT_SYSINFO_EHDR 2817 return &gate_vma; 2818#else 2819 return NULL; 2820#endif 2821} 2822 2823int in_gate_area_no_task(unsigned long addr) 2824{ 2825#ifdef AT_SYSINFO_EHDR 2826 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 2827 return 1; 2828#endif 2829 return 0; 2830} 2831 2832#endif /* __HAVE_ARCH_GATE_AREA */ 2833 2834#ifdef CONFIG_HAVE_IOREMAP_PROT 2835static resource_size_t follow_phys(struct vm_area_struct *vma, 2836 unsigned long address, unsigned int flags, 2837 unsigned long *prot) 2838{ 2839 pgd_t *pgd; 2840 pud_t *pud; 2841 pmd_t *pmd; 2842 pte_t *ptep, pte; 2843 spinlock_t *ptl; 2844 resource_size_t phys_addr = 0; 2845 struct mm_struct *mm = vma->vm_mm; 2846 2847 VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP))); 2848 2849 pgd = pgd_offset(mm, address); 2850 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 2851 goto no_page_table; 2852 2853 pud = pud_offset(pgd, address); 2854 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 2855 goto no_page_table; 2856 2857 pmd = pmd_offset(pud, address); 2858 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 2859 goto no_page_table; 2860 2861 /* We cannot handle huge page PFN maps. Luckily they don't exist. */ 2862 if (pmd_huge(*pmd)) 2863 goto no_page_table; 2864 2865 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 2866 if (!ptep) 2867 goto out; 2868 2869 pte = *ptep; 2870 if (!pte_present(pte)) 2871 goto unlock; 2872 if ((flags & FOLL_WRITE) && !pte_write(pte)) 2873 goto unlock; 2874 phys_addr = pte_pfn(pte); 2875 phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */ 2876 2877 *prot = pgprot_val(pte_pgprot(pte)); 2878 2879unlock: 2880 pte_unmap_unlock(ptep, ptl); 2881out: 2882 return phys_addr; 2883no_page_table: 2884 return 0; 2885} 2886 2887int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 2888 void *buf, int len, int write) 2889{ 2890 resource_size_t phys_addr; 2891 unsigned long prot = 0; 2892 void *maddr; 2893 int offset = addr & (PAGE_SIZE-1); 2894 2895 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) 2896 return -EINVAL; 2897 2898 phys_addr = follow_phys(vma, addr, write, &prot); 2899 2900 if (!phys_addr) 2901 return -EINVAL; 2902 2903 maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot); 2904 if (write) 2905 memcpy_toio(maddr + offset, buf, len); 2906 else 2907 memcpy_fromio(buf, maddr + offset, len); 2908 iounmap(maddr); 2909 2910 return len; 2911} 2912#endif 2913 2914/* 2915 * Access another process' address space. 2916 * Source/target buffer must be kernel space, 2917 * Do not walk the page table directly, use get_user_pages 2918 */ 2919int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 2920{ 2921 struct mm_struct *mm; 2922 struct vm_area_struct *vma; 2923 void *old_buf = buf; 2924 2925 mm = get_task_mm(tsk); 2926 if (!mm) 2927 return 0; 2928 2929 down_read(&mm->mmap_sem); 2930 /* ignore errors, just check how much was successfully transferred */ 2931 while (len) { 2932 int bytes, ret, offset; 2933 void *maddr; 2934 struct page *page = NULL; 2935 2936 ret = get_user_pages(tsk, mm, addr, 1, 2937 write, 1, &page, &vma); 2938 if (ret <= 0) { 2939 /* 2940 * Check if this is a VM_IO | VM_PFNMAP VMA, which 2941 * we can access using slightly different code. 2942 */ 2943#ifdef CONFIG_HAVE_IOREMAP_PROT 2944 vma = find_vma(mm, addr); 2945 if (!vma) 2946 break; 2947 if (vma->vm_ops && vma->vm_ops->access) 2948 ret = vma->vm_ops->access(vma, addr, buf, 2949 len, write); 2950 if (ret <= 0) 2951#endif 2952 break; 2953 bytes = ret; 2954 } else { 2955 bytes = len; 2956 offset = addr & (PAGE_SIZE-1); 2957 if (bytes > PAGE_SIZE-offset) 2958 bytes = PAGE_SIZE-offset; 2959 2960 maddr = kmap(page); 2961 if (write) { 2962 copy_to_user_page(vma, page, addr, 2963 maddr + offset, buf, bytes); 2964 set_page_dirty_lock(page); 2965 } else { 2966 copy_from_user_page(vma, page, addr, 2967 buf, maddr + offset, bytes); 2968 } 2969 kunmap(page); 2970 page_cache_release(page); 2971 } 2972 len -= bytes; 2973 buf += bytes; 2974 addr += bytes; 2975 } 2976 up_read(&mm->mmap_sem); 2977 mmput(mm); 2978 2979 return buf - old_buf; 2980} 2981 2982/* 2983 * Print the name of a VMA. 2984 */ 2985void print_vma_addr(char *prefix, unsigned long ip) 2986{ 2987 struct mm_struct *mm = current->mm; 2988 struct vm_area_struct *vma; 2989 2990 /* 2991 * Do not print if we are in atomic 2992 * contexts (in exception stacks, etc.): 2993 */ 2994 if (preempt_count()) 2995 return; 2996 2997 down_read(&mm->mmap_sem); 2998 vma = find_vma(mm, ip); 2999 if (vma && vma->vm_file) { 3000 struct file *f = vma->vm_file; 3001 char *buf = (char *)__get_free_page(GFP_KERNEL); 3002 if (buf) { 3003 char *p, *s; 3004 3005 p = d_path(&f->f_path, buf, PAGE_SIZE); 3006 if (IS_ERR(p)) 3007 p = "?"; 3008 s = strrchr(p, '/'); 3009 if (s) 3010 p = s+1; 3011 printk("%s%s[%lx+%lx]", prefix, p, 3012 vma->vm_start, 3013 vma->vm_end - vma->vm_start); 3014 free_page((unsigned long)buf); 3015 } 3016 } 3017 up_read(¤t->mm->mmap_sem); 3018} 3019