memory.c revision 1ff8038988adecfde71d82c0597727fc239d4e8c
1/* 2 * linux/mm/memory.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7/* 8 * demand-loading started 01.12.91 - seems it is high on the list of 9 * things wanted, and it should be easy to implement. - Linus 10 */ 11 12/* 13 * Ok, demand-loading was easy, shared pages a little bit tricker. Shared 14 * pages started 02.12.91, seems to work. - Linus. 15 * 16 * Tested sharing by executing about 30 /bin/sh: under the old kernel it 17 * would have taken more than the 6M I have free, but it worked well as 18 * far as I could see. 19 * 20 * Also corrected some "invalidate()"s - I wasn't doing enough of them. 21 */ 22 23/* 24 * Real VM (paging to/from disk) started 18.12.91. Much more work and 25 * thought has to go into this. Oh, well.. 26 * 19.12.91 - works, somewhat. Sometimes I get faults, don't know why. 27 * Found it. Everything seems to work now. 28 * 20.12.91 - Ok, making the swap-device changeable like the root. 29 */ 30 31/* 32 * 05.04.94 - Multi-page memory management added for v1.1. 33 * Idea by Alex Bligh (alex@cconcepts.co.uk) 34 * 35 * 16.07.99 - Support of BIGMEM added by Gerhard Wichert, Siemens AG 36 * (Gerhard.Wichert@pdb.siemens.de) 37 * 38 * Aug/Sep 2004 Changed to four level page tables (Andi Kleen) 39 */ 40 41#include <linux/kernel_stat.h> 42#include <linux/mm.h> 43#include <linux/hugetlb.h> 44#include <linux/mman.h> 45#include <linux/swap.h> 46#include <linux/highmem.h> 47#include <linux/pagemap.h> 48#include <linux/rmap.h> 49#include <linux/module.h> 50#include <linux/init.h> 51 52#include <asm/pgalloc.h> 53#include <asm/uaccess.h> 54#include <asm/tlb.h> 55#include <asm/tlbflush.h> 56#include <asm/pgtable.h> 57 58#include <linux/swapops.h> 59#include <linux/elf.h> 60 61#ifndef CONFIG_NEED_MULTIPLE_NODES 62/* use the per-pgdat data instead for discontigmem - mbligh */ 63unsigned long max_mapnr; 64struct page *mem_map; 65 66EXPORT_SYMBOL(max_mapnr); 67EXPORT_SYMBOL(mem_map); 68#endif 69 70unsigned long num_physpages; 71/* 72 * A number of key systems in x86 including ioremap() rely on the assumption 73 * that high_memory defines the upper bound on direct map memory, then end 74 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 75 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 76 * and ZONE_HIGHMEM. 77 */ 78void * high_memory; 79unsigned long vmalloc_earlyreserve; 80 81EXPORT_SYMBOL(num_physpages); 82EXPORT_SYMBOL(high_memory); 83EXPORT_SYMBOL(vmalloc_earlyreserve); 84 85/* 86 * If a p?d_bad entry is found while walking page tables, report 87 * the error, before resetting entry to p?d_none. Usually (but 88 * very seldom) called out from the p?d_none_or_clear_bad macros. 89 */ 90 91void pgd_clear_bad(pgd_t *pgd) 92{ 93 pgd_ERROR(*pgd); 94 pgd_clear(pgd); 95} 96 97void pud_clear_bad(pud_t *pud) 98{ 99 pud_ERROR(*pud); 100 pud_clear(pud); 101} 102 103void pmd_clear_bad(pmd_t *pmd) 104{ 105 pmd_ERROR(*pmd); 106 pmd_clear(pmd); 107} 108 109/* 110 * Note: this doesn't free the actual pages themselves. That 111 * has been handled earlier when unmapping all the memory regions. 112 */ 113static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd) 114{ 115 struct page *page = pmd_page(*pmd); 116 pmd_clear(pmd); 117 pte_lock_deinit(page); 118 pte_free_tlb(tlb, page); 119 dec_page_state(nr_page_table_pages); 120 tlb->mm->nr_ptes--; 121} 122 123static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, 124 unsigned long addr, unsigned long end, 125 unsigned long floor, unsigned long ceiling) 126{ 127 pmd_t *pmd; 128 unsigned long next; 129 unsigned long start; 130 131 start = addr; 132 pmd = pmd_offset(pud, addr); 133 do { 134 next = pmd_addr_end(addr, end); 135 if (pmd_none_or_clear_bad(pmd)) 136 continue; 137 free_pte_range(tlb, pmd); 138 } while (pmd++, addr = next, addr != end); 139 140 start &= PUD_MASK; 141 if (start < floor) 142 return; 143 if (ceiling) { 144 ceiling &= PUD_MASK; 145 if (!ceiling) 146 return; 147 } 148 if (end - 1 > ceiling - 1) 149 return; 150 151 pmd = pmd_offset(pud, start); 152 pud_clear(pud); 153 pmd_free_tlb(tlb, pmd); 154} 155 156static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, 157 unsigned long addr, unsigned long end, 158 unsigned long floor, unsigned long ceiling) 159{ 160 pud_t *pud; 161 unsigned long next; 162 unsigned long start; 163 164 start = addr; 165 pud = pud_offset(pgd, addr); 166 do { 167 next = pud_addr_end(addr, end); 168 if (pud_none_or_clear_bad(pud)) 169 continue; 170 free_pmd_range(tlb, pud, addr, next, floor, ceiling); 171 } while (pud++, addr = next, addr != end); 172 173 start &= PGDIR_MASK; 174 if (start < floor) 175 return; 176 if (ceiling) { 177 ceiling &= PGDIR_MASK; 178 if (!ceiling) 179 return; 180 } 181 if (end - 1 > ceiling - 1) 182 return; 183 184 pud = pud_offset(pgd, start); 185 pgd_clear(pgd); 186 pud_free_tlb(tlb, pud); 187} 188 189/* 190 * This function frees user-level page tables of a process. 191 * 192 * Must be called with pagetable lock held. 193 */ 194void free_pgd_range(struct mmu_gather **tlb, 195 unsigned long addr, unsigned long end, 196 unsigned long floor, unsigned long ceiling) 197{ 198 pgd_t *pgd; 199 unsigned long next; 200 unsigned long start; 201 202 /* 203 * The next few lines have given us lots of grief... 204 * 205 * Why are we testing PMD* at this top level? Because often 206 * there will be no work to do at all, and we'd prefer not to 207 * go all the way down to the bottom just to discover that. 208 * 209 * Why all these "- 1"s? Because 0 represents both the bottom 210 * of the address space and the top of it (using -1 for the 211 * top wouldn't help much: the masks would do the wrong thing). 212 * The rule is that addr 0 and floor 0 refer to the bottom of 213 * the address space, but end 0 and ceiling 0 refer to the top 214 * Comparisons need to use "end - 1" and "ceiling - 1" (though 215 * that end 0 case should be mythical). 216 * 217 * Wherever addr is brought up or ceiling brought down, we must 218 * be careful to reject "the opposite 0" before it confuses the 219 * subsequent tests. But what about where end is brought down 220 * by PMD_SIZE below? no, end can't go down to 0 there. 221 * 222 * Whereas we round start (addr) and ceiling down, by different 223 * masks at different levels, in order to test whether a table 224 * now has no other vmas using it, so can be freed, we don't 225 * bother to round floor or end up - the tests don't need that. 226 */ 227 228 addr &= PMD_MASK; 229 if (addr < floor) { 230 addr += PMD_SIZE; 231 if (!addr) 232 return; 233 } 234 if (ceiling) { 235 ceiling &= PMD_MASK; 236 if (!ceiling) 237 return; 238 } 239 if (end - 1 > ceiling - 1) 240 end -= PMD_SIZE; 241 if (addr > end - 1) 242 return; 243 244 start = addr; 245 pgd = pgd_offset((*tlb)->mm, addr); 246 do { 247 next = pgd_addr_end(addr, end); 248 if (pgd_none_or_clear_bad(pgd)) 249 continue; 250 free_pud_range(*tlb, pgd, addr, next, floor, ceiling); 251 } while (pgd++, addr = next, addr != end); 252 253 if (!(*tlb)->fullmm) 254 flush_tlb_pgtables((*tlb)->mm, start, end); 255} 256 257void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, 258 unsigned long floor, unsigned long ceiling) 259{ 260 while (vma) { 261 struct vm_area_struct *next = vma->vm_next; 262 unsigned long addr = vma->vm_start; 263 264 /* 265 * Hide vma from rmap and vmtruncate before freeing pgtables 266 */ 267 anon_vma_unlink(vma); 268 unlink_file_vma(vma); 269 270 if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) { 271 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 272 floor, next? next->vm_start: ceiling); 273 } else { 274 /* 275 * Optimization: gather nearby vmas into one call down 276 */ 277 while (next && next->vm_start <= vma->vm_end + PMD_SIZE 278 && !is_hugepage_only_range(vma->vm_mm, next->vm_start, 279 HPAGE_SIZE)) { 280 vma = next; 281 next = vma->vm_next; 282 anon_vma_unlink(vma); 283 unlink_file_vma(vma); 284 } 285 free_pgd_range(tlb, addr, vma->vm_end, 286 floor, next? next->vm_start: ceiling); 287 } 288 vma = next; 289 } 290} 291 292int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address) 293{ 294 struct page *new = pte_alloc_one(mm, address); 295 if (!new) 296 return -ENOMEM; 297 298 pte_lock_init(new); 299 spin_lock(&mm->page_table_lock); 300 if (pmd_present(*pmd)) { /* Another has populated it */ 301 pte_lock_deinit(new); 302 pte_free(new); 303 } else { 304 mm->nr_ptes++; 305 inc_page_state(nr_page_table_pages); 306 pmd_populate(mm, pmd, new); 307 } 308 spin_unlock(&mm->page_table_lock); 309 return 0; 310} 311 312int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) 313{ 314 pte_t *new = pte_alloc_one_kernel(&init_mm, address); 315 if (!new) 316 return -ENOMEM; 317 318 spin_lock(&init_mm.page_table_lock); 319 if (pmd_present(*pmd)) /* Another has populated it */ 320 pte_free_kernel(new); 321 else 322 pmd_populate_kernel(&init_mm, pmd, new); 323 spin_unlock(&init_mm.page_table_lock); 324 return 0; 325} 326 327static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) 328{ 329 if (file_rss) 330 add_mm_counter(mm, file_rss, file_rss); 331 if (anon_rss) 332 add_mm_counter(mm, anon_rss, anon_rss); 333} 334 335/* 336 * This function is called to print an error when a bad pte 337 * is found. For example, we might have a PFN-mapped pte in 338 * a region that doesn't allow it. 339 * 340 * The calling function must still handle the error. 341 */ 342void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr) 343{ 344 printk(KERN_ERR "Bad pte = %08llx, process = %s, " 345 "vm_flags = %lx, vaddr = %lx\n", 346 (long long)pte_val(pte), 347 (vma->vm_mm == current->mm ? current->comm : "???"), 348 vma->vm_flags, vaddr); 349 dump_stack(); 350} 351 352static inline int is_cow_mapping(unsigned int flags) 353{ 354 return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 355} 356 357/* 358 * This function gets the "struct page" associated with a pte. 359 * 360 * NOTE! Some mappings do not have "struct pages". A raw PFN mapping 361 * will have each page table entry just pointing to a raw page frame 362 * number, and as far as the VM layer is concerned, those do not have 363 * pages associated with them - even if the PFN might point to memory 364 * that otherwise is perfectly fine and has a "struct page". 365 * 366 * The way we recognize those mappings is through the rules set up 367 * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set, 368 * and the vm_pgoff will point to the first PFN mapped: thus every 369 * page that is a raw mapping will always honor the rule 370 * 371 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) 372 * 373 * and if that isn't true, the page has been COW'ed (in which case it 374 * _does_ have a "struct page" associated with it even if it is in a 375 * VM_PFNMAP range). 376 */ 377struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 378{ 379 unsigned long pfn = pte_pfn(pte); 380 381 if (vma->vm_flags & VM_PFNMAP) { 382 unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; 383 if (pfn == vma->vm_pgoff + off) 384 return NULL; 385 if (!is_cow_mapping(vma->vm_flags)) 386 return NULL; 387 } 388 389 /* 390 * Add some anal sanity checks for now. Eventually, 391 * we should just do "return pfn_to_page(pfn)", but 392 * in the meantime we check that we get a valid pfn, 393 * and that the resulting page looks ok. 394 * 395 * Remove this test eventually! 396 */ 397 if (unlikely(!pfn_valid(pfn))) { 398 print_bad_pte(vma, pte, addr); 399 return NULL; 400 } 401 402 /* 403 * NOTE! We still have PageReserved() pages in the page 404 * tables. 405 * 406 * The PAGE_ZERO() pages and various VDSO mappings can 407 * cause them to exist. 408 */ 409 return pfn_to_page(pfn); 410} 411 412/* 413 * copy one vm_area from one task to the other. Assumes the page tables 414 * already present in the new task to be cleared in the whole range 415 * covered by this vma. 416 */ 417 418static inline void 419copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, 420 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, 421 unsigned long addr, int *rss) 422{ 423 unsigned long vm_flags = vma->vm_flags; 424 pte_t pte = *src_pte; 425 struct page *page; 426 427 /* pte contains position in swap or file, so copy. */ 428 if (unlikely(!pte_present(pte))) { 429 if (!pte_file(pte)) { 430 swap_duplicate(pte_to_swp_entry(pte)); 431 /* make sure dst_mm is on swapoff's mmlist. */ 432 if (unlikely(list_empty(&dst_mm->mmlist))) { 433 spin_lock(&mmlist_lock); 434 if (list_empty(&dst_mm->mmlist)) 435 list_add(&dst_mm->mmlist, 436 &src_mm->mmlist); 437 spin_unlock(&mmlist_lock); 438 } 439 } 440 goto out_set_pte; 441 } 442 443 /* 444 * If it's a COW mapping, write protect it both 445 * in the parent and the child 446 */ 447 if (is_cow_mapping(vm_flags)) { 448 ptep_set_wrprotect(src_mm, addr, src_pte); 449 pte = *src_pte; 450 } 451 452 /* 453 * If it's a shared mapping, mark it clean in 454 * the child 455 */ 456 if (vm_flags & VM_SHARED) 457 pte = pte_mkclean(pte); 458 pte = pte_mkold(pte); 459 460 page = vm_normal_page(vma, addr, pte); 461 if (page) { 462 get_page(page); 463 page_dup_rmap(page); 464 rss[!!PageAnon(page)]++; 465 } 466 467out_set_pte: 468 set_pte_at(dst_mm, addr, dst_pte, pte); 469} 470 471static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 472 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, 473 unsigned long addr, unsigned long end) 474{ 475 pte_t *src_pte, *dst_pte; 476 spinlock_t *src_ptl, *dst_ptl; 477 int progress = 0; 478 int rss[2]; 479 480again: 481 rss[1] = rss[0] = 0; 482 dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); 483 if (!dst_pte) 484 return -ENOMEM; 485 src_pte = pte_offset_map_nested(src_pmd, addr); 486 src_ptl = pte_lockptr(src_mm, src_pmd); 487 spin_lock(src_ptl); 488 489 do { 490 /* 491 * We are holding two locks at this point - either of them 492 * could generate latencies in another task on another CPU. 493 */ 494 if (progress >= 32) { 495 progress = 0; 496 if (need_resched() || 497 need_lockbreak(src_ptl) || 498 need_lockbreak(dst_ptl)) 499 break; 500 } 501 if (pte_none(*src_pte)) { 502 progress++; 503 continue; 504 } 505 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss); 506 progress += 8; 507 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); 508 509 spin_unlock(src_ptl); 510 pte_unmap_nested(src_pte - 1); 511 add_mm_rss(dst_mm, rss[0], rss[1]); 512 pte_unmap_unlock(dst_pte - 1, dst_ptl); 513 cond_resched(); 514 if (addr != end) 515 goto again; 516 return 0; 517} 518 519static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 520 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, 521 unsigned long addr, unsigned long end) 522{ 523 pmd_t *src_pmd, *dst_pmd; 524 unsigned long next; 525 526 dst_pmd = pmd_alloc(dst_mm, dst_pud, addr); 527 if (!dst_pmd) 528 return -ENOMEM; 529 src_pmd = pmd_offset(src_pud, addr); 530 do { 531 next = pmd_addr_end(addr, end); 532 if (pmd_none_or_clear_bad(src_pmd)) 533 continue; 534 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, 535 vma, addr, next)) 536 return -ENOMEM; 537 } while (dst_pmd++, src_pmd++, addr = next, addr != end); 538 return 0; 539} 540 541static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 542 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, 543 unsigned long addr, unsigned long end) 544{ 545 pud_t *src_pud, *dst_pud; 546 unsigned long next; 547 548 dst_pud = pud_alloc(dst_mm, dst_pgd, addr); 549 if (!dst_pud) 550 return -ENOMEM; 551 src_pud = pud_offset(src_pgd, addr); 552 do { 553 next = pud_addr_end(addr, end); 554 if (pud_none_or_clear_bad(src_pud)) 555 continue; 556 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, 557 vma, addr, next)) 558 return -ENOMEM; 559 } while (dst_pud++, src_pud++, addr = next, addr != end); 560 return 0; 561} 562 563int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, 564 struct vm_area_struct *vma) 565{ 566 pgd_t *src_pgd, *dst_pgd; 567 unsigned long next; 568 unsigned long addr = vma->vm_start; 569 unsigned long end = vma->vm_end; 570 571 /* 572 * Don't copy ptes where a page fault will fill them correctly. 573 * Fork becomes much lighter when there are big shared or private 574 * readonly mappings. The tradeoff is that copy_page_range is more 575 * efficient than faulting. 576 */ 577 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) { 578 if (!vma->anon_vma) 579 return 0; 580 } 581 582 if (is_vm_hugetlb_page(vma)) 583 return copy_hugetlb_page_range(dst_mm, src_mm, vma); 584 585 dst_pgd = pgd_offset(dst_mm, addr); 586 src_pgd = pgd_offset(src_mm, addr); 587 do { 588 next = pgd_addr_end(addr, end); 589 if (pgd_none_or_clear_bad(src_pgd)) 590 continue; 591 if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd, 592 vma, addr, next)) 593 return -ENOMEM; 594 } while (dst_pgd++, src_pgd++, addr = next, addr != end); 595 return 0; 596} 597 598static unsigned long zap_pte_range(struct mmu_gather *tlb, 599 struct vm_area_struct *vma, pmd_t *pmd, 600 unsigned long addr, unsigned long end, 601 long *zap_work, struct zap_details *details) 602{ 603 struct mm_struct *mm = tlb->mm; 604 pte_t *pte; 605 spinlock_t *ptl; 606 int file_rss = 0; 607 int anon_rss = 0; 608 609 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 610 do { 611 pte_t ptent = *pte; 612 if (pte_none(ptent)) { 613 (*zap_work)--; 614 continue; 615 } 616 if (pte_present(ptent)) { 617 struct page *page; 618 619 (*zap_work) -= PAGE_SIZE; 620 621 page = vm_normal_page(vma, addr, ptent); 622 if (unlikely(details) && page) { 623 /* 624 * unmap_shared_mapping_pages() wants to 625 * invalidate cache without truncating: 626 * unmap shared but keep private pages. 627 */ 628 if (details->check_mapping && 629 details->check_mapping != page->mapping) 630 continue; 631 /* 632 * Each page->index must be checked when 633 * invalidating or truncating nonlinear. 634 */ 635 if (details->nonlinear_vma && 636 (page->index < details->first_index || 637 page->index > details->last_index)) 638 continue; 639 } 640 ptent = ptep_get_and_clear_full(mm, addr, pte, 641 tlb->fullmm); 642 tlb_remove_tlb_entry(tlb, pte, addr); 643 if (unlikely(!page)) 644 continue; 645 if (unlikely(details) && details->nonlinear_vma 646 && linear_page_index(details->nonlinear_vma, 647 addr) != page->index) 648 set_pte_at(mm, addr, pte, 649 pgoff_to_pte(page->index)); 650 if (PageAnon(page)) 651 anon_rss--; 652 else { 653 if (pte_dirty(ptent)) 654 set_page_dirty(page); 655 if (pte_young(ptent)) 656 mark_page_accessed(page); 657 file_rss--; 658 } 659 page_remove_rmap(page); 660 tlb_remove_page(tlb, page); 661 continue; 662 } 663 /* 664 * If details->check_mapping, we leave swap entries; 665 * if details->nonlinear_vma, we leave file entries. 666 */ 667 if (unlikely(details)) 668 continue; 669 if (!pte_file(ptent)) 670 free_swap_and_cache(pte_to_swp_entry(ptent)); 671 pte_clear_full(mm, addr, pte, tlb->fullmm); 672 } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0)); 673 674 add_mm_rss(mm, file_rss, anon_rss); 675 pte_unmap_unlock(pte - 1, ptl); 676 677 return addr; 678} 679 680static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, 681 struct vm_area_struct *vma, pud_t *pud, 682 unsigned long addr, unsigned long end, 683 long *zap_work, struct zap_details *details) 684{ 685 pmd_t *pmd; 686 unsigned long next; 687 688 pmd = pmd_offset(pud, addr); 689 do { 690 next = pmd_addr_end(addr, end); 691 if (pmd_none_or_clear_bad(pmd)) { 692 (*zap_work)--; 693 continue; 694 } 695 next = zap_pte_range(tlb, vma, pmd, addr, next, 696 zap_work, details); 697 } while (pmd++, addr = next, (addr != end && *zap_work > 0)); 698 699 return addr; 700} 701 702static inline unsigned long zap_pud_range(struct mmu_gather *tlb, 703 struct vm_area_struct *vma, pgd_t *pgd, 704 unsigned long addr, unsigned long end, 705 long *zap_work, struct zap_details *details) 706{ 707 pud_t *pud; 708 unsigned long next; 709 710 pud = pud_offset(pgd, addr); 711 do { 712 next = pud_addr_end(addr, end); 713 if (pud_none_or_clear_bad(pud)) { 714 (*zap_work)--; 715 continue; 716 } 717 next = zap_pmd_range(tlb, vma, pud, addr, next, 718 zap_work, details); 719 } while (pud++, addr = next, (addr != end && *zap_work > 0)); 720 721 return addr; 722} 723 724static unsigned long unmap_page_range(struct mmu_gather *tlb, 725 struct vm_area_struct *vma, 726 unsigned long addr, unsigned long end, 727 long *zap_work, struct zap_details *details) 728{ 729 pgd_t *pgd; 730 unsigned long next; 731 732 if (details && !details->check_mapping && !details->nonlinear_vma) 733 details = NULL; 734 735 BUG_ON(addr >= end); 736 tlb_start_vma(tlb, vma); 737 pgd = pgd_offset(vma->vm_mm, addr); 738 do { 739 next = pgd_addr_end(addr, end); 740 if (pgd_none_or_clear_bad(pgd)) { 741 (*zap_work)--; 742 continue; 743 } 744 next = zap_pud_range(tlb, vma, pgd, addr, next, 745 zap_work, details); 746 } while (pgd++, addr = next, (addr != end && *zap_work > 0)); 747 tlb_end_vma(tlb, vma); 748 749 return addr; 750} 751 752#ifdef CONFIG_PREEMPT 753# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE) 754#else 755/* No preempt: go for improved straight-line efficiency */ 756# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE) 757#endif 758 759/** 760 * unmap_vmas - unmap a range of memory covered by a list of vma's 761 * @tlbp: address of the caller's struct mmu_gather 762 * @vma: the starting vma 763 * @start_addr: virtual address at which to start unmapping 764 * @end_addr: virtual address at which to end unmapping 765 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 766 * @details: details of nonlinear truncation or shared cache invalidation 767 * 768 * Returns the end address of the unmapping (restart addr if interrupted). 769 * 770 * Unmap all pages in the vma list. 771 * 772 * We aim to not hold locks for too long (for scheduling latency reasons). 773 * So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to 774 * return the ending mmu_gather to the caller. 775 * 776 * Only addresses between `start' and `end' will be unmapped. 777 * 778 * The VMA list must be sorted in ascending virtual address order. 779 * 780 * unmap_vmas() assumes that the caller will flush the whole unmapped address 781 * range after unmap_vmas() returns. So the only responsibility here is to 782 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 783 * drops the lock and schedules. 784 */ 785unsigned long unmap_vmas(struct mmu_gather **tlbp, 786 struct vm_area_struct *vma, unsigned long start_addr, 787 unsigned long end_addr, unsigned long *nr_accounted, 788 struct zap_details *details) 789{ 790 long zap_work = ZAP_BLOCK_SIZE; 791 unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 792 int tlb_start_valid = 0; 793 unsigned long start = start_addr; 794 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 795 int fullmm = (*tlbp)->fullmm; 796 797 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 798 unsigned long end; 799 800 start = max(vma->vm_start, start_addr); 801 if (start >= vma->vm_end) 802 continue; 803 end = min(vma->vm_end, end_addr); 804 if (end <= vma->vm_start) 805 continue; 806 807 if (vma->vm_flags & VM_ACCOUNT) 808 *nr_accounted += (end - start) >> PAGE_SHIFT; 809 810 while (start != end) { 811 if (!tlb_start_valid) { 812 tlb_start = start; 813 tlb_start_valid = 1; 814 } 815 816 if (unlikely(is_vm_hugetlb_page(vma))) { 817 unmap_hugepage_range(vma, start, end); 818 zap_work -= (end - start) / 819 (HPAGE_SIZE / PAGE_SIZE); 820 start = end; 821 } else 822 start = unmap_page_range(*tlbp, vma, 823 start, end, &zap_work, details); 824 825 if (zap_work > 0) { 826 BUG_ON(start != end); 827 break; 828 } 829 830 tlb_finish_mmu(*tlbp, tlb_start, start); 831 832 if (need_resched() || 833 (i_mmap_lock && need_lockbreak(i_mmap_lock))) { 834 if (i_mmap_lock) { 835 *tlbp = NULL; 836 goto out; 837 } 838 cond_resched(); 839 } 840 841 *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm); 842 tlb_start_valid = 0; 843 zap_work = ZAP_BLOCK_SIZE; 844 } 845 } 846out: 847 return start; /* which is now the end (or restart) address */ 848} 849 850/** 851 * zap_page_range - remove user pages in a given range 852 * @vma: vm_area_struct holding the applicable pages 853 * @address: starting address of pages to zap 854 * @size: number of bytes to zap 855 * @details: details of nonlinear truncation or shared cache invalidation 856 */ 857unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 858 unsigned long size, struct zap_details *details) 859{ 860 struct mm_struct *mm = vma->vm_mm; 861 struct mmu_gather *tlb; 862 unsigned long end = address + size; 863 unsigned long nr_accounted = 0; 864 865 lru_add_drain(); 866 tlb = tlb_gather_mmu(mm, 0); 867 update_hiwater_rss(mm); 868 end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); 869 if (tlb) 870 tlb_finish_mmu(tlb, address, end); 871 return end; 872} 873 874/* 875 * Do a quick page-table lookup for a single page. 876 */ 877struct page *follow_page(struct vm_area_struct *vma, unsigned long address, 878 unsigned int flags) 879{ 880 pgd_t *pgd; 881 pud_t *pud; 882 pmd_t *pmd; 883 pte_t *ptep, pte; 884 spinlock_t *ptl; 885 struct page *page; 886 struct mm_struct *mm = vma->vm_mm; 887 888 page = follow_huge_addr(mm, address, flags & FOLL_WRITE); 889 if (!IS_ERR(page)) { 890 BUG_ON(flags & FOLL_GET); 891 goto out; 892 } 893 894 page = NULL; 895 pgd = pgd_offset(mm, address); 896 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) 897 goto no_page_table; 898 899 pud = pud_offset(pgd, address); 900 if (pud_none(*pud) || unlikely(pud_bad(*pud))) 901 goto no_page_table; 902 903 pmd = pmd_offset(pud, address); 904 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) 905 goto no_page_table; 906 907 if (pmd_huge(*pmd)) { 908 BUG_ON(flags & FOLL_GET); 909 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE); 910 goto out; 911 } 912 913 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 914 if (!ptep) 915 goto out; 916 917 pte = *ptep; 918 if (!pte_present(pte)) 919 goto unlock; 920 if ((flags & FOLL_WRITE) && !pte_write(pte)) 921 goto unlock; 922 page = vm_normal_page(vma, address, pte); 923 if (unlikely(!page)) 924 goto unlock; 925 926 if (flags & FOLL_GET) 927 get_page(page); 928 if (flags & FOLL_TOUCH) { 929 if ((flags & FOLL_WRITE) && 930 !pte_dirty(pte) && !PageDirty(page)) 931 set_page_dirty(page); 932 mark_page_accessed(page); 933 } 934unlock: 935 pte_unmap_unlock(ptep, ptl); 936out: 937 return page; 938 939no_page_table: 940 /* 941 * When core dumping an enormous anonymous area that nobody 942 * has touched so far, we don't want to allocate page tables. 943 */ 944 if (flags & FOLL_ANON) { 945 page = ZERO_PAGE(address); 946 if (flags & FOLL_GET) 947 get_page(page); 948 BUG_ON(flags & FOLL_WRITE); 949 } 950 return page; 951} 952 953int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 954 unsigned long start, int len, int write, int force, 955 struct page **pages, struct vm_area_struct **vmas) 956{ 957 int i; 958 unsigned int vm_flags; 959 960 /* 961 * Require read or write permissions. 962 * If 'force' is set, we only require the "MAY" flags. 963 */ 964 vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD); 965 vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); 966 i = 0; 967 968 do { 969 struct vm_area_struct *vma; 970 unsigned int foll_flags; 971 972 vma = find_extend_vma(mm, start); 973 if (!vma && in_gate_area(tsk, start)) { 974 unsigned long pg = start & PAGE_MASK; 975 struct vm_area_struct *gate_vma = get_gate_vma(tsk); 976 pgd_t *pgd; 977 pud_t *pud; 978 pmd_t *pmd; 979 pte_t *pte; 980 if (write) /* user gate pages are read-only */ 981 return i ? : -EFAULT; 982 if (pg > TASK_SIZE) 983 pgd = pgd_offset_k(pg); 984 else 985 pgd = pgd_offset_gate(mm, pg); 986 BUG_ON(pgd_none(*pgd)); 987 pud = pud_offset(pgd, pg); 988 BUG_ON(pud_none(*pud)); 989 pmd = pmd_offset(pud, pg); 990 if (pmd_none(*pmd)) 991 return i ? : -EFAULT; 992 pte = pte_offset_map(pmd, pg); 993 if (pte_none(*pte)) { 994 pte_unmap(pte); 995 return i ? : -EFAULT; 996 } 997 if (pages) { 998 struct page *page = vm_normal_page(gate_vma, start, *pte); 999 pages[i] = page; 1000 if (page) 1001 get_page(page); 1002 } 1003 pte_unmap(pte); 1004 if (vmas) 1005 vmas[i] = gate_vma; 1006 i++; 1007 start += PAGE_SIZE; 1008 len--; 1009 continue; 1010 } 1011 1012 if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP)) 1013 || !(vm_flags & vma->vm_flags)) 1014 return i ? : -EFAULT; 1015 1016 if (is_vm_hugetlb_page(vma)) { 1017 i = follow_hugetlb_page(mm, vma, pages, vmas, 1018 &start, &len, i); 1019 continue; 1020 } 1021 1022 foll_flags = FOLL_TOUCH; 1023 if (pages) 1024 foll_flags |= FOLL_GET; 1025 if (!write && !(vma->vm_flags & VM_LOCKED) && 1026 (!vma->vm_ops || !vma->vm_ops->nopage)) 1027 foll_flags |= FOLL_ANON; 1028 1029 do { 1030 struct page *page; 1031 1032 if (write) 1033 foll_flags |= FOLL_WRITE; 1034 1035 cond_resched(); 1036 while (!(page = follow_page(vma, start, foll_flags))) { 1037 int ret; 1038 ret = __handle_mm_fault(mm, vma, start, 1039 foll_flags & FOLL_WRITE); 1040 /* 1041 * The VM_FAULT_WRITE bit tells us that do_wp_page has 1042 * broken COW when necessary, even if maybe_mkwrite 1043 * decided not to set pte_write. We can thus safely do 1044 * subsequent page lookups as if they were reads. 1045 */ 1046 if (ret & VM_FAULT_WRITE) 1047 foll_flags &= ~FOLL_WRITE; 1048 1049 switch (ret & ~VM_FAULT_WRITE) { 1050 case VM_FAULT_MINOR: 1051 tsk->min_flt++; 1052 break; 1053 case VM_FAULT_MAJOR: 1054 tsk->maj_flt++; 1055 break; 1056 case VM_FAULT_SIGBUS: 1057 return i ? i : -EFAULT; 1058 case VM_FAULT_OOM: 1059 return i ? i : -ENOMEM; 1060 default: 1061 BUG(); 1062 } 1063 } 1064 if (pages) { 1065 pages[i] = page; 1066 flush_dcache_page(page); 1067 } 1068 if (vmas) 1069 vmas[i] = vma; 1070 i++; 1071 start += PAGE_SIZE; 1072 len--; 1073 } while (len && start < vma->vm_end); 1074 } while (len); 1075 return i; 1076} 1077EXPORT_SYMBOL(get_user_pages); 1078 1079static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd, 1080 unsigned long addr, unsigned long end, pgprot_t prot) 1081{ 1082 pte_t *pte; 1083 spinlock_t *ptl; 1084 1085 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1086 if (!pte) 1087 return -ENOMEM; 1088 do { 1089 struct page *page = ZERO_PAGE(addr); 1090 pte_t zero_pte = pte_wrprotect(mk_pte(page, prot)); 1091 page_cache_get(page); 1092 page_add_file_rmap(page); 1093 inc_mm_counter(mm, file_rss); 1094 BUG_ON(!pte_none(*pte)); 1095 set_pte_at(mm, addr, pte, zero_pte); 1096 } while (pte++, addr += PAGE_SIZE, addr != end); 1097 pte_unmap_unlock(pte - 1, ptl); 1098 return 0; 1099} 1100 1101static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud, 1102 unsigned long addr, unsigned long end, pgprot_t prot) 1103{ 1104 pmd_t *pmd; 1105 unsigned long next; 1106 1107 pmd = pmd_alloc(mm, pud, addr); 1108 if (!pmd) 1109 return -ENOMEM; 1110 do { 1111 next = pmd_addr_end(addr, end); 1112 if (zeromap_pte_range(mm, pmd, addr, next, prot)) 1113 return -ENOMEM; 1114 } while (pmd++, addr = next, addr != end); 1115 return 0; 1116} 1117 1118static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1119 unsigned long addr, unsigned long end, pgprot_t prot) 1120{ 1121 pud_t *pud; 1122 unsigned long next; 1123 1124 pud = pud_alloc(mm, pgd, addr); 1125 if (!pud) 1126 return -ENOMEM; 1127 do { 1128 next = pud_addr_end(addr, end); 1129 if (zeromap_pmd_range(mm, pud, addr, next, prot)) 1130 return -ENOMEM; 1131 } while (pud++, addr = next, addr != end); 1132 return 0; 1133} 1134 1135int zeromap_page_range(struct vm_area_struct *vma, 1136 unsigned long addr, unsigned long size, pgprot_t prot) 1137{ 1138 pgd_t *pgd; 1139 unsigned long next; 1140 unsigned long end = addr + size; 1141 struct mm_struct *mm = vma->vm_mm; 1142 int err; 1143 1144 BUG_ON(addr >= end); 1145 pgd = pgd_offset(mm, addr); 1146 flush_cache_range(vma, addr, end); 1147 do { 1148 next = pgd_addr_end(addr, end); 1149 err = zeromap_pud_range(mm, pgd, addr, next, prot); 1150 if (err) 1151 break; 1152 } while (pgd++, addr = next, addr != end); 1153 return err; 1154} 1155 1156pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) 1157{ 1158 pgd_t * pgd = pgd_offset(mm, addr); 1159 pud_t * pud = pud_alloc(mm, pgd, addr); 1160 if (pud) { 1161 pmd_t * pmd = pmd_alloc(mm, pud, addr); 1162 if (pmd) 1163 return pte_alloc_map_lock(mm, pmd, addr, ptl); 1164 } 1165 return NULL; 1166} 1167 1168/* 1169 * This is the old fallback for page remapping. 1170 * 1171 * For historical reasons, it only allows reserved pages. Only 1172 * old drivers should use this, and they needed to mark their 1173 * pages reserved for the old functions anyway. 1174 */ 1175static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot) 1176{ 1177 int retval; 1178 pte_t *pte; 1179 spinlock_t *ptl; 1180 1181 retval = -EINVAL; 1182 if (PageAnon(page)) 1183 goto out; 1184 retval = -ENOMEM; 1185 flush_dcache_page(page); 1186 pte = get_locked_pte(mm, addr, &ptl); 1187 if (!pte) 1188 goto out; 1189 retval = -EBUSY; 1190 if (!pte_none(*pte)) 1191 goto out_unlock; 1192 1193 /* Ok, finally just insert the thing.. */ 1194 get_page(page); 1195 inc_mm_counter(mm, file_rss); 1196 page_add_file_rmap(page); 1197 set_pte_at(mm, addr, pte, mk_pte(page, prot)); 1198 1199 retval = 0; 1200out_unlock: 1201 pte_unmap_unlock(pte, ptl); 1202out: 1203 return retval; 1204} 1205 1206/* 1207 * This allows drivers to insert individual pages they've allocated 1208 * into a user vma. 1209 * 1210 * The page has to be a nice clean _individual_ kernel allocation. 1211 * If you allocate a compound page, you need to have marked it as 1212 * such (__GFP_COMP), or manually just split the page up yourself 1213 * (which is mainly an issue of doing "set_page_count(page, 1)" for 1214 * each sub-page, and then freeing them one by one when you free 1215 * them rather than freeing it as a compound page). 1216 * 1217 * NOTE! Traditionally this was done with "remap_pfn_range()" which 1218 * took an arbitrary page protection parameter. This doesn't allow 1219 * that. Your vma protection will have to be set up correctly, which 1220 * means that if you want a shared writable mapping, you'd better 1221 * ask for a shared writable mapping! 1222 * 1223 * The page does not need to be reserved. 1224 */ 1225int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) 1226{ 1227 if (addr < vma->vm_start || addr >= vma->vm_end) 1228 return -EFAULT; 1229 if (!page_count(page)) 1230 return -EINVAL; 1231 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot); 1232} 1233EXPORT_SYMBOL(vm_insert_page); 1234 1235/* 1236 * maps a range of physical memory into the requested pages. the old 1237 * mappings are removed. any references to nonexistent pages results 1238 * in null mappings (currently treated as "copy-on-access") 1239 */ 1240static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, 1241 unsigned long addr, unsigned long end, 1242 unsigned long pfn, pgprot_t prot) 1243{ 1244 pte_t *pte; 1245 spinlock_t *ptl; 1246 1247 pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); 1248 if (!pte) 1249 return -ENOMEM; 1250 do { 1251 BUG_ON(!pte_none(*pte)); 1252 set_pte_at(mm, addr, pte, pfn_pte(pfn, prot)); 1253 pfn++; 1254 } while (pte++, addr += PAGE_SIZE, addr != end); 1255 pte_unmap_unlock(pte - 1, ptl); 1256 return 0; 1257} 1258 1259static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, 1260 unsigned long addr, unsigned long end, 1261 unsigned long pfn, pgprot_t prot) 1262{ 1263 pmd_t *pmd; 1264 unsigned long next; 1265 1266 pfn -= addr >> PAGE_SHIFT; 1267 pmd = pmd_alloc(mm, pud, addr); 1268 if (!pmd) 1269 return -ENOMEM; 1270 do { 1271 next = pmd_addr_end(addr, end); 1272 if (remap_pte_range(mm, pmd, addr, next, 1273 pfn + (addr >> PAGE_SHIFT), prot)) 1274 return -ENOMEM; 1275 } while (pmd++, addr = next, addr != end); 1276 return 0; 1277} 1278 1279static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, 1280 unsigned long addr, unsigned long end, 1281 unsigned long pfn, pgprot_t prot) 1282{ 1283 pud_t *pud; 1284 unsigned long next; 1285 1286 pfn -= addr >> PAGE_SHIFT; 1287 pud = pud_alloc(mm, pgd, addr); 1288 if (!pud) 1289 return -ENOMEM; 1290 do { 1291 next = pud_addr_end(addr, end); 1292 if (remap_pmd_range(mm, pud, addr, next, 1293 pfn + (addr >> PAGE_SHIFT), prot)) 1294 return -ENOMEM; 1295 } while (pud++, addr = next, addr != end); 1296 return 0; 1297} 1298 1299/* Note: this is only safe if the mm semaphore is held when called. */ 1300int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, 1301 unsigned long pfn, unsigned long size, pgprot_t prot) 1302{ 1303 pgd_t *pgd; 1304 unsigned long next; 1305 unsigned long end = addr + PAGE_ALIGN(size); 1306 struct mm_struct *mm = vma->vm_mm; 1307 int err; 1308 1309 /* 1310 * Physically remapped pages are special. Tell the 1311 * rest of the world about it: 1312 * VM_IO tells people not to look at these pages 1313 * (accesses can have side effects). 1314 * VM_RESERVED is specified all over the place, because 1315 * in 2.4 it kept swapout's vma scan off this vma; but 1316 * in 2.6 the LRU scan won't even find its pages, so this 1317 * flag means no more than count its pages in reserved_vm, 1318 * and omit it from core dump, even when VM_IO turned off. 1319 * VM_PFNMAP tells the core MM that the base pages are just 1320 * raw PFN mappings, and do not have a "struct page" associated 1321 * with them. 1322 * 1323 * There's a horrible special case to handle copy-on-write 1324 * behaviour that some programs depend on. We mark the "original" 1325 * un-COW'ed pages by matching them up with "vma->vm_pgoff". 1326 */ 1327 if (is_cow_mapping(vma->vm_flags)) { 1328 if (addr != vma->vm_start || end != vma->vm_end) 1329 return -EINVAL; 1330 vma->vm_pgoff = pfn; 1331 } 1332 1333 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 1334 1335 BUG_ON(addr >= end); 1336 pfn -= addr >> PAGE_SHIFT; 1337 pgd = pgd_offset(mm, addr); 1338 flush_cache_range(vma, addr, end); 1339 do { 1340 next = pgd_addr_end(addr, end); 1341 err = remap_pud_range(mm, pgd, addr, next, 1342 pfn + (addr >> PAGE_SHIFT), prot); 1343 if (err) 1344 break; 1345 } while (pgd++, addr = next, addr != end); 1346 return err; 1347} 1348EXPORT_SYMBOL(remap_pfn_range); 1349 1350/* 1351 * handle_pte_fault chooses page fault handler according to an entry 1352 * which was read non-atomically. Before making any commitment, on 1353 * those architectures or configurations (e.g. i386 with PAE) which 1354 * might give a mix of unmatched parts, do_swap_page and do_file_page 1355 * must check under lock before unmapping the pte and proceeding 1356 * (but do_wp_page is only called after already making such a check; 1357 * and do_anonymous_page and do_no_page can safely check later on). 1358 */ 1359static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, 1360 pte_t *page_table, pte_t orig_pte) 1361{ 1362 int same = 1; 1363#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) 1364 if (sizeof(pte_t) > sizeof(unsigned long)) { 1365 spinlock_t *ptl = pte_lockptr(mm, pmd); 1366 spin_lock(ptl); 1367 same = pte_same(*page_table, orig_pte); 1368 spin_unlock(ptl); 1369 } 1370#endif 1371 pte_unmap(page_table); 1372 return same; 1373} 1374 1375/* 1376 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when 1377 * servicing faults for write access. In the normal case, do always want 1378 * pte_mkwrite. But get_user_pages can cause write faults for mappings 1379 * that do not have writing enabled, when used by access_process_vm. 1380 */ 1381static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) 1382{ 1383 if (likely(vma->vm_flags & VM_WRITE)) 1384 pte = pte_mkwrite(pte); 1385 return pte; 1386} 1387 1388static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va) 1389{ 1390 /* 1391 * If the source page was a PFN mapping, we don't have 1392 * a "struct page" for it. We do a best-effort copy by 1393 * just copying from the original user address. If that 1394 * fails, we just zero-fill it. Live with it. 1395 */ 1396 if (unlikely(!src)) { 1397 void *kaddr = kmap_atomic(dst, KM_USER0); 1398 void __user *uaddr = (void __user *)(va & PAGE_MASK); 1399 1400 /* 1401 * This really shouldn't fail, because the page is there 1402 * in the page tables. But it might just be unreadable, 1403 * in which case we just give up and fill the result with 1404 * zeroes. 1405 */ 1406 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 1407 memset(kaddr, 0, PAGE_SIZE); 1408 kunmap_atomic(kaddr, KM_USER0); 1409 return; 1410 1411 } 1412 copy_user_highpage(dst, src, va); 1413} 1414 1415/* 1416 * This routine handles present pages, when users try to write 1417 * to a shared page. It is done by copying the page to a new address 1418 * and decrementing the shared-page counter for the old page. 1419 * 1420 * Note that this routine assumes that the protection checks have been 1421 * done by the caller (the low-level page fault routine in most cases). 1422 * Thus we can safely just mark it writable once we've done any necessary 1423 * COW. 1424 * 1425 * We also mark the page dirty at this point even though the page will 1426 * change only once the write actually happens. This avoids a few races, 1427 * and potentially makes it more efficient. 1428 * 1429 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1430 * but allow concurrent faults), with pte both mapped and locked. 1431 * We return with mmap_sem still held, but pte unmapped and unlocked. 1432 */ 1433static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, 1434 unsigned long address, pte_t *page_table, pmd_t *pmd, 1435 spinlock_t *ptl, pte_t orig_pte) 1436{ 1437 struct page *old_page, *new_page; 1438 pte_t entry; 1439 int ret = VM_FAULT_MINOR; 1440 1441 old_page = vm_normal_page(vma, address, orig_pte); 1442 if (!old_page) 1443 goto gotten; 1444 1445 if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { 1446 int reuse = can_share_swap_page(old_page); 1447 unlock_page(old_page); 1448 if (reuse) { 1449 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1450 entry = pte_mkyoung(orig_pte); 1451 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1452 ptep_set_access_flags(vma, address, page_table, entry, 1); 1453 update_mmu_cache(vma, address, entry); 1454 lazy_mmu_prot_update(entry); 1455 ret |= VM_FAULT_WRITE; 1456 goto unlock; 1457 } 1458 } 1459 1460 /* 1461 * Ok, we need to copy. Oh, well.. 1462 */ 1463 page_cache_get(old_page); 1464gotten: 1465 pte_unmap_unlock(page_table, ptl); 1466 1467 if (unlikely(anon_vma_prepare(vma))) 1468 goto oom; 1469 if (old_page == ZERO_PAGE(address)) { 1470 new_page = alloc_zeroed_user_highpage(vma, address); 1471 if (!new_page) 1472 goto oom; 1473 } else { 1474 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); 1475 if (!new_page) 1476 goto oom; 1477 cow_user_page(new_page, old_page, address); 1478 } 1479 1480 /* 1481 * Re-check the pte - we dropped the lock 1482 */ 1483 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1484 if (likely(pte_same(*page_table, orig_pte))) { 1485 if (old_page) { 1486 page_remove_rmap(old_page); 1487 if (!PageAnon(old_page)) { 1488 dec_mm_counter(mm, file_rss); 1489 inc_mm_counter(mm, anon_rss); 1490 } 1491 } else 1492 inc_mm_counter(mm, anon_rss); 1493 flush_cache_page(vma, address, pte_pfn(orig_pte)); 1494 entry = mk_pte(new_page, vma->vm_page_prot); 1495 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1496 ptep_establish(vma, address, page_table, entry); 1497 update_mmu_cache(vma, address, entry); 1498 lazy_mmu_prot_update(entry); 1499 lru_cache_add_active(new_page); 1500 page_add_anon_rmap(new_page, vma, address); 1501 1502 /* Free the old page.. */ 1503 new_page = old_page; 1504 ret |= VM_FAULT_WRITE; 1505 } 1506 if (new_page) 1507 page_cache_release(new_page); 1508 if (old_page) 1509 page_cache_release(old_page); 1510unlock: 1511 pte_unmap_unlock(page_table, ptl); 1512 return ret; 1513oom: 1514 if (old_page) 1515 page_cache_release(old_page); 1516 return VM_FAULT_OOM; 1517} 1518 1519/* 1520 * Helper functions for unmap_mapping_range(). 1521 * 1522 * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __ 1523 * 1524 * We have to restart searching the prio_tree whenever we drop the lock, 1525 * since the iterator is only valid while the lock is held, and anyway 1526 * a later vma might be split and reinserted earlier while lock dropped. 1527 * 1528 * The list of nonlinear vmas could be handled more efficiently, using 1529 * a placeholder, but handle it in the same way until a need is shown. 1530 * It is important to search the prio_tree before nonlinear list: a vma 1531 * may become nonlinear and be shifted from prio_tree to nonlinear list 1532 * while the lock is dropped; but never shifted from list to prio_tree. 1533 * 1534 * In order to make forward progress despite restarting the search, 1535 * vm_truncate_count is used to mark a vma as now dealt with, so we can 1536 * quickly skip it next time around. Since the prio_tree search only 1537 * shows us those vmas affected by unmapping the range in question, we 1538 * can't efficiently keep all vmas in step with mapping->truncate_count: 1539 * so instead reset them all whenever it wraps back to 0 (then go to 1). 1540 * mapping->truncate_count and vma->vm_truncate_count are protected by 1541 * i_mmap_lock. 1542 * 1543 * In order to make forward progress despite repeatedly restarting some 1544 * large vma, note the restart_addr from unmap_vmas when it breaks out: 1545 * and restart from that address when we reach that vma again. It might 1546 * have been split or merged, shrunk or extended, but never shifted: so 1547 * restart_addr remains valid so long as it remains in the vma's range. 1548 * unmap_mapping_range forces truncate_count to leap over page-aligned 1549 * values so we can save vma's restart_addr in its truncate_count field. 1550 */ 1551#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK)) 1552 1553static void reset_vma_truncate_counts(struct address_space *mapping) 1554{ 1555 struct vm_area_struct *vma; 1556 struct prio_tree_iter iter; 1557 1558 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX) 1559 vma->vm_truncate_count = 0; 1560 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1561 vma->vm_truncate_count = 0; 1562} 1563 1564static int unmap_mapping_range_vma(struct vm_area_struct *vma, 1565 unsigned long start_addr, unsigned long end_addr, 1566 struct zap_details *details) 1567{ 1568 unsigned long restart_addr; 1569 int need_break; 1570 1571again: 1572 restart_addr = vma->vm_truncate_count; 1573 if (is_restart_addr(restart_addr) && start_addr < restart_addr) { 1574 start_addr = restart_addr; 1575 if (start_addr >= end_addr) { 1576 /* Top of vma has been split off since last time */ 1577 vma->vm_truncate_count = details->truncate_count; 1578 return 0; 1579 } 1580 } 1581 1582 restart_addr = zap_page_range(vma, start_addr, 1583 end_addr - start_addr, details); 1584 need_break = need_resched() || 1585 need_lockbreak(details->i_mmap_lock); 1586 1587 if (restart_addr >= end_addr) { 1588 /* We have now completed this vma: mark it so */ 1589 vma->vm_truncate_count = details->truncate_count; 1590 if (!need_break) 1591 return 0; 1592 } else { 1593 /* Note restart_addr in vma's truncate_count field */ 1594 vma->vm_truncate_count = restart_addr; 1595 if (!need_break) 1596 goto again; 1597 } 1598 1599 spin_unlock(details->i_mmap_lock); 1600 cond_resched(); 1601 spin_lock(details->i_mmap_lock); 1602 return -EINTR; 1603} 1604 1605static inline void unmap_mapping_range_tree(struct prio_tree_root *root, 1606 struct zap_details *details) 1607{ 1608 struct vm_area_struct *vma; 1609 struct prio_tree_iter iter; 1610 pgoff_t vba, vea, zba, zea; 1611 1612restart: 1613 vma_prio_tree_foreach(vma, &iter, root, 1614 details->first_index, details->last_index) { 1615 /* Skip quickly over those we have already dealt with */ 1616 if (vma->vm_truncate_count == details->truncate_count) 1617 continue; 1618 1619 vba = vma->vm_pgoff; 1620 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1; 1621 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ 1622 zba = details->first_index; 1623 if (zba < vba) 1624 zba = vba; 1625 zea = details->last_index; 1626 if (zea > vea) 1627 zea = vea; 1628 1629 if (unmap_mapping_range_vma(vma, 1630 ((zba - vba) << PAGE_SHIFT) + vma->vm_start, 1631 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start, 1632 details) < 0) 1633 goto restart; 1634 } 1635} 1636 1637static inline void unmap_mapping_range_list(struct list_head *head, 1638 struct zap_details *details) 1639{ 1640 struct vm_area_struct *vma; 1641 1642 /* 1643 * In nonlinear VMAs there is no correspondence between virtual address 1644 * offset and file offset. So we must perform an exhaustive search 1645 * across *all* the pages in each nonlinear VMA, not just the pages 1646 * whose virtual address lies outside the file truncation point. 1647 */ 1648restart: 1649 list_for_each_entry(vma, head, shared.vm_set.list) { 1650 /* Skip quickly over those we have already dealt with */ 1651 if (vma->vm_truncate_count == details->truncate_count) 1652 continue; 1653 details->nonlinear_vma = vma; 1654 if (unmap_mapping_range_vma(vma, vma->vm_start, 1655 vma->vm_end, details) < 0) 1656 goto restart; 1657 } 1658} 1659 1660/** 1661 * unmap_mapping_range - unmap the portion of all mmaps 1662 * in the specified address_space corresponding to the specified 1663 * page range in the underlying file. 1664 * @mapping: the address space containing mmaps to be unmapped. 1665 * @holebegin: byte in first page to unmap, relative to the start of 1666 * the underlying file. This will be rounded down to a PAGE_SIZE 1667 * boundary. Note that this is different from vmtruncate(), which 1668 * must keep the partial page. In contrast, we must get rid of 1669 * partial pages. 1670 * @holelen: size of prospective hole in bytes. This will be rounded 1671 * up to a PAGE_SIZE boundary. A holelen of zero truncates to the 1672 * end of the file. 1673 * @even_cows: 1 when truncating a file, unmap even private COWed pages; 1674 * but 0 when invalidating pagecache, don't throw away private data. 1675 */ 1676void unmap_mapping_range(struct address_space *mapping, 1677 loff_t const holebegin, loff_t const holelen, int even_cows) 1678{ 1679 struct zap_details details; 1680 pgoff_t hba = holebegin >> PAGE_SHIFT; 1681 pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 1682 1683 /* Check for overflow. */ 1684 if (sizeof(holelen) > sizeof(hlen)) { 1685 long long holeend = 1686 (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 1687 if (holeend & ~(long long)ULONG_MAX) 1688 hlen = ULONG_MAX - hba + 1; 1689 } 1690 1691 details.check_mapping = even_cows? NULL: mapping; 1692 details.nonlinear_vma = NULL; 1693 details.first_index = hba; 1694 details.last_index = hba + hlen - 1; 1695 if (details.last_index < details.first_index) 1696 details.last_index = ULONG_MAX; 1697 details.i_mmap_lock = &mapping->i_mmap_lock; 1698 1699 spin_lock(&mapping->i_mmap_lock); 1700 1701 /* serialize i_size write against truncate_count write */ 1702 smp_wmb(); 1703 /* Protect against page faults, and endless unmapping loops */ 1704 mapping->truncate_count++; 1705 /* 1706 * For archs where spin_lock has inclusive semantics like ia64 1707 * this smp_mb() will prevent to read pagetable contents 1708 * before the truncate_count increment is visible to 1709 * other cpus. 1710 */ 1711 smp_mb(); 1712 if (unlikely(is_restart_addr(mapping->truncate_count))) { 1713 if (mapping->truncate_count == 0) 1714 reset_vma_truncate_counts(mapping); 1715 mapping->truncate_count++; 1716 } 1717 details.truncate_count = mapping->truncate_count; 1718 1719 if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 1720 unmap_mapping_range_tree(&mapping->i_mmap, &details); 1721 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 1722 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 1723 spin_unlock(&mapping->i_mmap_lock); 1724} 1725EXPORT_SYMBOL(unmap_mapping_range); 1726 1727/* 1728 * Handle all mappings that got truncated by a "truncate()" 1729 * system call. 1730 * 1731 * NOTE! We have to be ready to update the memory sharing 1732 * between the file and the memory map for a potential last 1733 * incomplete page. Ugly, but necessary. 1734 */ 1735int vmtruncate(struct inode * inode, loff_t offset) 1736{ 1737 struct address_space *mapping = inode->i_mapping; 1738 unsigned long limit; 1739 1740 if (inode->i_size < offset) 1741 goto do_expand; 1742 /* 1743 * truncation of in-use swapfiles is disallowed - it would cause 1744 * subsequent swapout to scribble on the now-freed blocks. 1745 */ 1746 if (IS_SWAPFILE(inode)) 1747 goto out_busy; 1748 i_size_write(inode, offset); 1749 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1); 1750 truncate_inode_pages(mapping, offset); 1751 goto out_truncate; 1752 1753do_expand: 1754 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 1755 if (limit != RLIM_INFINITY && offset > limit) 1756 goto out_sig; 1757 if (offset > inode->i_sb->s_maxbytes) 1758 goto out_big; 1759 i_size_write(inode, offset); 1760 1761out_truncate: 1762 if (inode->i_op && inode->i_op->truncate) 1763 inode->i_op->truncate(inode); 1764 return 0; 1765out_sig: 1766 send_sig(SIGXFSZ, current, 0); 1767out_big: 1768 return -EFBIG; 1769out_busy: 1770 return -ETXTBSY; 1771} 1772 1773EXPORT_SYMBOL(vmtruncate); 1774 1775/* 1776 * Primitive swap readahead code. We simply read an aligned block of 1777 * (1 << page_cluster) entries in the swap area. This method is chosen 1778 * because it doesn't cost us any seek time. We also make sure to queue 1779 * the 'original' request together with the readahead ones... 1780 * 1781 * This has been extended to use the NUMA policies from the mm triggering 1782 * the readahead. 1783 * 1784 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 1785 */ 1786void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma) 1787{ 1788#ifdef CONFIG_NUMA 1789 struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL; 1790#endif 1791 int i, num; 1792 struct page *new_page; 1793 unsigned long offset; 1794 1795 /* 1796 * Get the number of handles we should do readahead io to. 1797 */ 1798 num = valid_swaphandles(entry, &offset); 1799 for (i = 0; i < num; offset++, i++) { 1800 /* Ok, do the async read-ahead now */ 1801 new_page = read_swap_cache_async(swp_entry(swp_type(entry), 1802 offset), vma, addr); 1803 if (!new_page) 1804 break; 1805 page_cache_release(new_page); 1806#ifdef CONFIG_NUMA 1807 /* 1808 * Find the next applicable VMA for the NUMA policy. 1809 */ 1810 addr += PAGE_SIZE; 1811 if (addr == 0) 1812 vma = NULL; 1813 if (vma) { 1814 if (addr >= vma->vm_end) { 1815 vma = next_vma; 1816 next_vma = vma ? vma->vm_next : NULL; 1817 } 1818 if (vma && addr < vma->vm_start) 1819 vma = NULL; 1820 } else { 1821 if (next_vma && addr >= next_vma->vm_start) { 1822 vma = next_vma; 1823 next_vma = vma->vm_next; 1824 } 1825 } 1826#endif 1827 } 1828 lru_add_drain(); /* Push any new pages onto the LRU now */ 1829} 1830 1831/* 1832 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1833 * but allow concurrent faults), and pte mapped but not yet locked. 1834 * We return with mmap_sem still held, but pte unmapped and unlocked. 1835 */ 1836static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, 1837 unsigned long address, pte_t *page_table, pmd_t *pmd, 1838 int write_access, pte_t orig_pte) 1839{ 1840 spinlock_t *ptl; 1841 struct page *page; 1842 swp_entry_t entry; 1843 pte_t pte; 1844 int ret = VM_FAULT_MINOR; 1845 1846 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 1847 goto out; 1848 1849 entry = pte_to_swp_entry(orig_pte); 1850 page = lookup_swap_cache(entry); 1851 if (!page) { 1852 swapin_readahead(entry, address, vma); 1853 page = read_swap_cache_async(entry, vma, address); 1854 if (!page) { 1855 /* 1856 * Back out if somebody else faulted in this pte 1857 * while we released the pte lock. 1858 */ 1859 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1860 if (likely(pte_same(*page_table, orig_pte))) 1861 ret = VM_FAULT_OOM; 1862 goto unlock; 1863 } 1864 1865 /* Had to read the page from swap area: Major fault */ 1866 ret = VM_FAULT_MAJOR; 1867 inc_page_state(pgmajfault); 1868 grab_swap_token(); 1869 } 1870 1871 mark_page_accessed(page); 1872 lock_page(page); 1873 1874 /* 1875 * Back out if somebody else already faulted in this pte. 1876 */ 1877 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1878 if (unlikely(!pte_same(*page_table, orig_pte))) 1879 goto out_nomap; 1880 1881 if (unlikely(!PageUptodate(page))) { 1882 ret = VM_FAULT_SIGBUS; 1883 goto out_nomap; 1884 } 1885 1886 /* The page isn't present yet, go ahead with the fault. */ 1887 1888 inc_mm_counter(mm, anon_rss); 1889 pte = mk_pte(page, vma->vm_page_prot); 1890 if (write_access && can_share_swap_page(page)) { 1891 pte = maybe_mkwrite(pte_mkdirty(pte), vma); 1892 write_access = 0; 1893 } 1894 1895 flush_icache_page(vma, page); 1896 set_pte_at(mm, address, page_table, pte); 1897 page_add_anon_rmap(page, vma, address); 1898 1899 swap_free(entry); 1900 if (vm_swap_full()) 1901 remove_exclusive_swap_page(page); 1902 unlock_page(page); 1903 1904 if (write_access) { 1905 if (do_wp_page(mm, vma, address, 1906 page_table, pmd, ptl, pte) == VM_FAULT_OOM) 1907 ret = VM_FAULT_OOM; 1908 goto out; 1909 } 1910 1911 /* No need to invalidate - it was non-present before */ 1912 update_mmu_cache(vma, address, pte); 1913 lazy_mmu_prot_update(pte); 1914unlock: 1915 pte_unmap_unlock(page_table, ptl); 1916out: 1917 return ret; 1918out_nomap: 1919 pte_unmap_unlock(page_table, ptl); 1920 unlock_page(page); 1921 page_cache_release(page); 1922 return ret; 1923} 1924 1925/* 1926 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1927 * but allow concurrent faults), and pte mapped but not yet locked. 1928 * We return with mmap_sem still held, but pte unmapped and unlocked. 1929 */ 1930static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, 1931 unsigned long address, pte_t *page_table, pmd_t *pmd, 1932 int write_access) 1933{ 1934 struct page *page; 1935 spinlock_t *ptl; 1936 pte_t entry; 1937 1938 if (write_access) { 1939 /* Allocate our own private page. */ 1940 pte_unmap(page_table); 1941 1942 if (unlikely(anon_vma_prepare(vma))) 1943 goto oom; 1944 page = alloc_zeroed_user_highpage(vma, address); 1945 if (!page) 1946 goto oom; 1947 1948 entry = mk_pte(page, vma->vm_page_prot); 1949 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1950 1951 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 1952 if (!pte_none(*page_table)) 1953 goto release; 1954 inc_mm_counter(mm, anon_rss); 1955 lru_cache_add_active(page); 1956 SetPageReferenced(page); 1957 page_add_anon_rmap(page, vma, address); 1958 } else { 1959 /* Map the ZERO_PAGE - vm_page_prot is readonly */ 1960 page = ZERO_PAGE(address); 1961 page_cache_get(page); 1962 entry = mk_pte(page, vma->vm_page_prot); 1963 1964 ptl = pte_lockptr(mm, pmd); 1965 spin_lock(ptl); 1966 if (!pte_none(*page_table)) 1967 goto release; 1968 inc_mm_counter(mm, file_rss); 1969 page_add_file_rmap(page); 1970 } 1971 1972 set_pte_at(mm, address, page_table, entry); 1973 1974 /* No need to invalidate - it was non-present before */ 1975 update_mmu_cache(vma, address, entry); 1976 lazy_mmu_prot_update(entry); 1977unlock: 1978 pte_unmap_unlock(page_table, ptl); 1979 return VM_FAULT_MINOR; 1980release: 1981 page_cache_release(page); 1982 goto unlock; 1983oom: 1984 return VM_FAULT_OOM; 1985} 1986 1987/* 1988 * do_no_page() tries to create a new page mapping. It aggressively 1989 * tries to share with existing pages, but makes a separate copy if 1990 * the "write_access" parameter is true in order to avoid the next 1991 * page fault. 1992 * 1993 * As this is called only for pages that do not currently exist, we 1994 * do not need to flush old virtual caches or the TLB. 1995 * 1996 * We enter with non-exclusive mmap_sem (to exclude vma changes, 1997 * but allow concurrent faults), and pte mapped but not yet locked. 1998 * We return with mmap_sem still held, but pte unmapped and unlocked. 1999 */ 2000static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 2001 unsigned long address, pte_t *page_table, pmd_t *pmd, 2002 int write_access) 2003{ 2004 spinlock_t *ptl; 2005 struct page *new_page; 2006 struct address_space *mapping = NULL; 2007 pte_t entry; 2008 unsigned int sequence = 0; 2009 int ret = VM_FAULT_MINOR; 2010 int anon = 0; 2011 2012 pte_unmap(page_table); 2013 BUG_ON(vma->vm_flags & VM_PFNMAP); 2014 2015 if (vma->vm_file) { 2016 mapping = vma->vm_file->f_mapping; 2017 sequence = mapping->truncate_count; 2018 smp_rmb(); /* serializes i_size against truncate_count */ 2019 } 2020retry: 2021 new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret); 2022 /* 2023 * No smp_rmb is needed here as long as there's a full 2024 * spin_lock/unlock sequence inside the ->nopage callback 2025 * (for the pagecache lookup) that acts as an implicit 2026 * smp_mb() and prevents the i_size read to happen 2027 * after the next truncate_count read. 2028 */ 2029 2030 /* no page was available -- either SIGBUS or OOM */ 2031 if (new_page == NOPAGE_SIGBUS) 2032 return VM_FAULT_SIGBUS; 2033 if (new_page == NOPAGE_OOM) 2034 return VM_FAULT_OOM; 2035 2036 /* 2037 * Should we do an early C-O-W break? 2038 */ 2039 if (write_access && !(vma->vm_flags & VM_SHARED)) { 2040 struct page *page; 2041 2042 if (unlikely(anon_vma_prepare(vma))) 2043 goto oom; 2044 page = alloc_page_vma(GFP_HIGHUSER, vma, address); 2045 if (!page) 2046 goto oom; 2047 copy_user_highpage(page, new_page, address); 2048 page_cache_release(new_page); 2049 new_page = page; 2050 anon = 1; 2051 } 2052 2053 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2054 /* 2055 * For a file-backed vma, someone could have truncated or otherwise 2056 * invalidated this page. If unmap_mapping_range got called, 2057 * retry getting the page. 2058 */ 2059 if (mapping && unlikely(sequence != mapping->truncate_count)) { 2060 pte_unmap_unlock(page_table, ptl); 2061 page_cache_release(new_page); 2062 cond_resched(); 2063 sequence = mapping->truncate_count; 2064 smp_rmb(); 2065 goto retry; 2066 } 2067 2068 /* 2069 * This silly early PAGE_DIRTY setting removes a race 2070 * due to the bad i386 page protection. But it's valid 2071 * for other architectures too. 2072 * 2073 * Note that if write_access is true, we either now have 2074 * an exclusive copy of the page, or this is a shared mapping, 2075 * so we can make it writable and dirty to avoid having to 2076 * handle that later. 2077 */ 2078 /* Only go through if we didn't race with anybody else... */ 2079 if (pte_none(*page_table)) { 2080 flush_icache_page(vma, new_page); 2081 entry = mk_pte(new_page, vma->vm_page_prot); 2082 if (write_access) 2083 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 2084 set_pte_at(mm, address, page_table, entry); 2085 if (anon) { 2086 inc_mm_counter(mm, anon_rss); 2087 lru_cache_add_active(new_page); 2088 page_add_anon_rmap(new_page, vma, address); 2089 } else { 2090 inc_mm_counter(mm, file_rss); 2091 page_add_file_rmap(new_page); 2092 } 2093 } else { 2094 /* One of our sibling threads was faster, back out. */ 2095 page_cache_release(new_page); 2096 goto unlock; 2097 } 2098 2099 /* no need to invalidate: a not-present page shouldn't be cached */ 2100 update_mmu_cache(vma, address, entry); 2101 lazy_mmu_prot_update(entry); 2102unlock: 2103 pte_unmap_unlock(page_table, ptl); 2104 return ret; 2105oom: 2106 page_cache_release(new_page); 2107 return VM_FAULT_OOM; 2108} 2109 2110/* 2111 * Fault of a previously existing named mapping. Repopulate the pte 2112 * from the encoded file_pte if possible. This enables swappable 2113 * nonlinear vmas. 2114 * 2115 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2116 * but allow concurrent faults), and pte mapped but not yet locked. 2117 * We return with mmap_sem still held, but pte unmapped and unlocked. 2118 */ 2119static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma, 2120 unsigned long address, pte_t *page_table, pmd_t *pmd, 2121 int write_access, pte_t orig_pte) 2122{ 2123 pgoff_t pgoff; 2124 int err; 2125 2126 if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) 2127 return VM_FAULT_MINOR; 2128 2129 if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) { 2130 /* 2131 * Page table corrupted: show pte and kill process. 2132 */ 2133 print_bad_pte(vma, orig_pte, address); 2134 return VM_FAULT_OOM; 2135 } 2136 /* We can then assume vm->vm_ops && vma->vm_ops->populate */ 2137 2138 pgoff = pte_to_pgoff(orig_pte); 2139 err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE, 2140 vma->vm_page_prot, pgoff, 0); 2141 if (err == -ENOMEM) 2142 return VM_FAULT_OOM; 2143 if (err) 2144 return VM_FAULT_SIGBUS; 2145 return VM_FAULT_MAJOR; 2146} 2147 2148/* 2149 * These routines also need to handle stuff like marking pages dirty 2150 * and/or accessed for architectures that don't do it in hardware (most 2151 * RISC architectures). The early dirtying is also good on the i386. 2152 * 2153 * There is also a hook called "update_mmu_cache()" that architectures 2154 * with external mmu caches can use to update those (ie the Sparc or 2155 * PowerPC hashed page tables that act as extended TLBs). 2156 * 2157 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2158 * but allow concurrent faults), and pte mapped but not yet locked. 2159 * We return with mmap_sem still held, but pte unmapped and unlocked. 2160 */ 2161static inline int handle_pte_fault(struct mm_struct *mm, 2162 struct vm_area_struct *vma, unsigned long address, 2163 pte_t *pte, pmd_t *pmd, int write_access) 2164{ 2165 pte_t entry; 2166 pte_t old_entry; 2167 spinlock_t *ptl; 2168 2169 old_entry = entry = *pte; 2170 if (!pte_present(entry)) { 2171 if (pte_none(entry)) { 2172 if (!vma->vm_ops || !vma->vm_ops->nopage) 2173 return do_anonymous_page(mm, vma, address, 2174 pte, pmd, write_access); 2175 return do_no_page(mm, vma, address, 2176 pte, pmd, write_access); 2177 } 2178 if (pte_file(entry)) 2179 return do_file_page(mm, vma, address, 2180 pte, pmd, write_access, entry); 2181 return do_swap_page(mm, vma, address, 2182 pte, pmd, write_access, entry); 2183 } 2184 2185 ptl = pte_lockptr(mm, pmd); 2186 spin_lock(ptl); 2187 if (unlikely(!pte_same(*pte, entry))) 2188 goto unlock; 2189 if (write_access) { 2190 if (!pte_write(entry)) 2191 return do_wp_page(mm, vma, address, 2192 pte, pmd, ptl, entry); 2193 entry = pte_mkdirty(entry); 2194 } 2195 entry = pte_mkyoung(entry); 2196 if (!pte_same(old_entry, entry)) { 2197 ptep_set_access_flags(vma, address, pte, entry, write_access); 2198 update_mmu_cache(vma, address, entry); 2199 lazy_mmu_prot_update(entry); 2200 } else { 2201 /* 2202 * This is needed only for protection faults but the arch code 2203 * is not yet telling us if this is a protection fault or not. 2204 * This still avoids useless tlb flushes for .text page faults 2205 * with threads. 2206 */ 2207 if (write_access) 2208 flush_tlb_page(vma, address); 2209 } 2210unlock: 2211 pte_unmap_unlock(pte, ptl); 2212 return VM_FAULT_MINOR; 2213} 2214 2215/* 2216 * By the time we get here, we already hold the mm semaphore 2217 */ 2218int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, 2219 unsigned long address, int write_access) 2220{ 2221 pgd_t *pgd; 2222 pud_t *pud; 2223 pmd_t *pmd; 2224 pte_t *pte; 2225 2226 __set_current_state(TASK_RUNNING); 2227 2228 inc_page_state(pgfault); 2229 2230 if (unlikely(is_vm_hugetlb_page(vma))) 2231 return hugetlb_fault(mm, vma, address, write_access); 2232 2233 pgd = pgd_offset(mm, address); 2234 pud = pud_alloc(mm, pgd, address); 2235 if (!pud) 2236 return VM_FAULT_OOM; 2237 pmd = pmd_alloc(mm, pud, address); 2238 if (!pmd) 2239 return VM_FAULT_OOM; 2240 pte = pte_alloc_map(mm, pmd, address); 2241 if (!pte) 2242 return VM_FAULT_OOM; 2243 2244 return handle_pte_fault(mm, vma, address, pte, pmd, write_access); 2245} 2246 2247#ifndef __PAGETABLE_PUD_FOLDED 2248/* 2249 * Allocate page upper directory. 2250 * We've already handled the fast-path in-line. 2251 */ 2252int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 2253{ 2254 pud_t *new = pud_alloc_one(mm, address); 2255 if (!new) 2256 return -ENOMEM; 2257 2258 spin_lock(&mm->page_table_lock); 2259 if (pgd_present(*pgd)) /* Another has populated it */ 2260 pud_free(new); 2261 else 2262 pgd_populate(mm, pgd, new); 2263 spin_unlock(&mm->page_table_lock); 2264 return 0; 2265} 2266#else 2267/* Workaround for gcc 2.96 */ 2268int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) 2269{ 2270 return 0; 2271} 2272#endif /* __PAGETABLE_PUD_FOLDED */ 2273 2274#ifndef __PAGETABLE_PMD_FOLDED 2275/* 2276 * Allocate page middle directory. 2277 * We've already handled the fast-path in-line. 2278 */ 2279int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2280{ 2281 pmd_t *new = pmd_alloc_one(mm, address); 2282 if (!new) 2283 return -ENOMEM; 2284 2285 spin_lock(&mm->page_table_lock); 2286#ifndef __ARCH_HAS_4LEVEL_HACK 2287 if (pud_present(*pud)) /* Another has populated it */ 2288 pmd_free(new); 2289 else 2290 pud_populate(mm, pud, new); 2291#else 2292 if (pgd_present(*pud)) /* Another has populated it */ 2293 pmd_free(new); 2294 else 2295 pgd_populate(mm, pud, new); 2296#endif /* __ARCH_HAS_4LEVEL_HACK */ 2297 spin_unlock(&mm->page_table_lock); 2298 return 0; 2299} 2300#else 2301/* Workaround for gcc 2.96 */ 2302int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) 2303{ 2304 return 0; 2305} 2306#endif /* __PAGETABLE_PMD_FOLDED */ 2307 2308int make_pages_present(unsigned long addr, unsigned long end) 2309{ 2310 int ret, len, write; 2311 struct vm_area_struct * vma; 2312 2313 vma = find_vma(current->mm, addr); 2314 if (!vma) 2315 return -1; 2316 write = (vma->vm_flags & VM_WRITE) != 0; 2317 if (addr >= end) 2318 BUG(); 2319 if (end > vma->vm_end) 2320 BUG(); 2321 len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE; 2322 ret = get_user_pages(current, current->mm, addr, 2323 len, write, 0, NULL, NULL); 2324 if (ret < 0) 2325 return ret; 2326 return ret == len ? 0 : -1; 2327} 2328 2329/* 2330 * Map a vmalloc()-space virtual address to the physical page. 2331 */ 2332struct page * vmalloc_to_page(void * vmalloc_addr) 2333{ 2334 unsigned long addr = (unsigned long) vmalloc_addr; 2335 struct page *page = NULL; 2336 pgd_t *pgd = pgd_offset_k(addr); 2337 pud_t *pud; 2338 pmd_t *pmd; 2339 pte_t *ptep, pte; 2340 2341 if (!pgd_none(*pgd)) { 2342 pud = pud_offset(pgd, addr); 2343 if (!pud_none(*pud)) { 2344 pmd = pmd_offset(pud, addr); 2345 if (!pmd_none(*pmd)) { 2346 ptep = pte_offset_map(pmd, addr); 2347 pte = *ptep; 2348 if (pte_present(pte)) 2349 page = pte_page(pte); 2350 pte_unmap(ptep); 2351 } 2352 } 2353 } 2354 return page; 2355} 2356 2357EXPORT_SYMBOL(vmalloc_to_page); 2358 2359/* 2360 * Map a vmalloc()-space virtual address to the physical page frame number. 2361 */ 2362unsigned long vmalloc_to_pfn(void * vmalloc_addr) 2363{ 2364 return page_to_pfn(vmalloc_to_page(vmalloc_addr)); 2365} 2366 2367EXPORT_SYMBOL(vmalloc_to_pfn); 2368 2369#if !defined(__HAVE_ARCH_GATE_AREA) 2370 2371#if defined(AT_SYSINFO_EHDR) 2372static struct vm_area_struct gate_vma; 2373 2374static int __init gate_vma_init(void) 2375{ 2376 gate_vma.vm_mm = NULL; 2377 gate_vma.vm_start = FIXADDR_USER_START; 2378 gate_vma.vm_end = FIXADDR_USER_END; 2379 gate_vma.vm_page_prot = PAGE_READONLY; 2380 gate_vma.vm_flags = 0; 2381 return 0; 2382} 2383__initcall(gate_vma_init); 2384#endif 2385 2386struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 2387{ 2388#ifdef AT_SYSINFO_EHDR 2389 return &gate_vma; 2390#else 2391 return NULL; 2392#endif 2393} 2394 2395int in_gate_area_no_task(unsigned long addr) 2396{ 2397#ifdef AT_SYSINFO_EHDR 2398 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 2399 return 1; 2400#endif 2401 return 0; 2402} 2403 2404#endif /* __HAVE_ARCH_GATE_AREA */ 2405