hugetlb.c revision b45b5bd65f668a665db40d093e4e1fe563533608
1/* 2 * Generic hugetlb support. 3 * (C) William Irwin, April 2004 4 */ 5#include <linux/gfp.h> 6#include <linux/list.h> 7#include <linux/init.h> 8#include <linux/module.h> 9#include <linux/mm.h> 10#include <linux/sysctl.h> 11#include <linux/highmem.h> 12#include <linux/nodemask.h> 13#include <linux/pagemap.h> 14#include <linux/mempolicy.h> 15#include <linux/cpuset.h> 16#include <linux/mutex.h> 17 18#include <asm/page.h> 19#include <asm/pgtable.h> 20 21#include <linux/hugetlb.h> 22#include "internal.h" 23 24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; 25static unsigned long nr_huge_pages, free_huge_pages, reserved_huge_pages; 26unsigned long max_huge_pages; 27static struct list_head hugepage_freelists[MAX_NUMNODES]; 28static unsigned int nr_huge_pages_node[MAX_NUMNODES]; 29static unsigned int free_huge_pages_node[MAX_NUMNODES]; 30/* 31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages 32 */ 33static DEFINE_SPINLOCK(hugetlb_lock); 34 35static void clear_huge_page(struct page *page, unsigned long addr) 36{ 37 int i; 38 39 might_sleep(); 40 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) { 41 cond_resched(); 42 clear_user_highpage(page + i, addr); 43 } 44} 45 46static void copy_huge_page(struct page *dst, struct page *src, 47 unsigned long addr) 48{ 49 int i; 50 51 might_sleep(); 52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) { 53 cond_resched(); 54 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE); 55 } 56} 57 58static void enqueue_huge_page(struct page *page) 59{ 60 int nid = page_to_nid(page); 61 list_add(&page->lru, &hugepage_freelists[nid]); 62 free_huge_pages++; 63 free_huge_pages_node[nid]++; 64} 65 66static struct page *dequeue_huge_page(struct vm_area_struct *vma, 67 unsigned long address) 68{ 69 int nid = numa_node_id(); 70 struct page *page = NULL; 71 struct zonelist *zonelist = huge_zonelist(vma, address); 72 struct zone **z; 73 74 for (z = zonelist->zones; *z; z++) { 75 nid = (*z)->zone_pgdat->node_id; 76 if (cpuset_zone_allowed(*z, GFP_HIGHUSER) && 77 !list_empty(&hugepage_freelists[nid])) 78 break; 79 } 80 81 if (*z) { 82 page = list_entry(hugepage_freelists[nid].next, 83 struct page, lru); 84 list_del(&page->lru); 85 free_huge_pages--; 86 free_huge_pages_node[nid]--; 87 } 88 return page; 89} 90 91static int alloc_fresh_huge_page(void) 92{ 93 static int nid = 0; 94 struct page *page; 95 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN, 96 HUGETLB_PAGE_ORDER); 97 nid = (nid + 1) % num_online_nodes(); 98 if (page) { 99 page[1].lru.next = (void *)free_huge_page; /* dtor */ 100 spin_lock(&hugetlb_lock); 101 nr_huge_pages++; 102 nr_huge_pages_node[page_to_nid(page)]++; 103 spin_unlock(&hugetlb_lock); 104 put_page(page); /* free it into the hugepage allocator */ 105 return 1; 106 } 107 return 0; 108} 109 110void free_huge_page(struct page *page) 111{ 112 BUG_ON(page_count(page)); 113 114 INIT_LIST_HEAD(&page->lru); 115 116 spin_lock(&hugetlb_lock); 117 enqueue_huge_page(page); 118 spin_unlock(&hugetlb_lock); 119} 120 121struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) 122{ 123 struct inode *inode = vma->vm_file->f_dentry->d_inode; 124 struct page *page; 125 int use_reserve = 0; 126 unsigned long idx; 127 128 spin_lock(&hugetlb_lock); 129 130 if (vma->vm_flags & VM_MAYSHARE) { 131 132 /* idx = radix tree index, i.e. offset into file in 133 * HPAGE_SIZE units */ 134 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) 135 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 136 137 /* The hugetlbfs specific inode info stores the number 138 * of "guaranteed available" (huge) pages. That is, 139 * the first 'prereserved_hpages' pages of the inode 140 * are either already instantiated, or have been 141 * pre-reserved (by hugetlb_reserve_for_inode()). Here 142 * we're in the process of instantiating the page, so 143 * we use this to determine whether to draw from the 144 * pre-reserved pool or the truly free pool. */ 145 if (idx < HUGETLBFS_I(inode)->prereserved_hpages) 146 use_reserve = 1; 147 } 148 149 if (!use_reserve) { 150 if (free_huge_pages <= reserved_huge_pages) 151 goto fail; 152 } else { 153 BUG_ON(reserved_huge_pages == 0); 154 reserved_huge_pages--; 155 } 156 157 page = dequeue_huge_page(vma, addr); 158 if (!page) 159 goto fail; 160 161 spin_unlock(&hugetlb_lock); 162 set_page_refcounted(page); 163 return page; 164 165 fail: 166 WARN_ON(use_reserve); /* reserved allocations shouldn't fail */ 167 spin_unlock(&hugetlb_lock); 168 return NULL; 169} 170 171/* hugetlb_extend_reservation() 172 * 173 * Ensure that at least 'atleast' hugepages are, and will remain, 174 * available to instantiate the first 'atleast' pages of the given 175 * inode. If the inode doesn't already have this many pages reserved 176 * or instantiated, set aside some hugepages in the reserved pool to 177 * satisfy later faults (or fail now if there aren't enough, rather 178 * than getting the SIGBUS later). 179 */ 180int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info, 181 unsigned long atleast) 182{ 183 struct inode *inode = &info->vfs_inode; 184 unsigned long change_in_reserve = 0; 185 int ret = 0; 186 187 spin_lock(&hugetlb_lock); 188 read_lock_irq(&inode->i_mapping->tree_lock); 189 190 if (info->prereserved_hpages >= atleast) 191 goto out; 192 193 /* Because we always call this on shared mappings, none of the 194 * pages beyond info->prereserved_hpages can have been 195 * instantiated, so we need to reserve all of them now. */ 196 change_in_reserve = atleast - info->prereserved_hpages; 197 198 if ((reserved_huge_pages + change_in_reserve) > free_huge_pages) { 199 ret = -ENOMEM; 200 goto out; 201 } 202 203 reserved_huge_pages += change_in_reserve; 204 info->prereserved_hpages = atleast; 205 206 out: 207 read_unlock_irq(&inode->i_mapping->tree_lock); 208 spin_unlock(&hugetlb_lock); 209 210 return ret; 211} 212 213/* hugetlb_truncate_reservation() 214 * 215 * This returns pages reserved for the given inode to the general free 216 * hugepage pool. If the inode has any pages prereserved, but not 217 * instantiated, beyond offset (atmost << HPAGE_SIZE), then release 218 * them. 219 */ 220void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info, 221 unsigned long atmost) 222{ 223 struct inode *inode = &info->vfs_inode; 224 struct address_space *mapping = inode->i_mapping; 225 unsigned long idx; 226 unsigned long change_in_reserve = 0; 227 struct page *page; 228 229 spin_lock(&hugetlb_lock); 230 read_lock_irq(&inode->i_mapping->tree_lock); 231 232 if (info->prereserved_hpages <= atmost) 233 goto out; 234 235 /* Count pages which were reserved, but not instantiated, and 236 * which we can now release. */ 237 for (idx = atmost; idx < info->prereserved_hpages; idx++) { 238 page = radix_tree_lookup(&mapping->page_tree, idx); 239 if (!page) 240 /* Pages which are already instantiated can't 241 * be unreserved (and in fact have already 242 * been removed from the reserved pool) */ 243 change_in_reserve++; 244 } 245 246 BUG_ON(reserved_huge_pages < change_in_reserve); 247 reserved_huge_pages -= change_in_reserve; 248 info->prereserved_hpages = atmost; 249 250 out: 251 read_unlock_irq(&inode->i_mapping->tree_lock); 252 spin_unlock(&hugetlb_lock); 253} 254 255static int __init hugetlb_init(void) 256{ 257 unsigned long i; 258 259 if (HPAGE_SHIFT == 0) 260 return 0; 261 262 for (i = 0; i < MAX_NUMNODES; ++i) 263 INIT_LIST_HEAD(&hugepage_freelists[i]); 264 265 for (i = 0; i < max_huge_pages; ++i) { 266 if (!alloc_fresh_huge_page()) 267 break; 268 } 269 max_huge_pages = free_huge_pages = nr_huge_pages = i; 270 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages); 271 return 0; 272} 273module_init(hugetlb_init); 274 275static int __init hugetlb_setup(char *s) 276{ 277 if (sscanf(s, "%lu", &max_huge_pages) <= 0) 278 max_huge_pages = 0; 279 return 1; 280} 281__setup("hugepages=", hugetlb_setup); 282 283#ifdef CONFIG_SYSCTL 284static void update_and_free_page(struct page *page) 285{ 286 int i; 287 nr_huge_pages--; 288 nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--; 289 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) { 290 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced | 291 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved | 292 1 << PG_private | 1<< PG_writeback); 293 } 294 page[1].lru.next = NULL; 295 set_page_refcounted(page); 296 __free_pages(page, HUGETLB_PAGE_ORDER); 297} 298 299#ifdef CONFIG_HIGHMEM 300static void try_to_free_low(unsigned long count) 301{ 302 int i, nid; 303 for (i = 0; i < MAX_NUMNODES; ++i) { 304 struct page *page, *next; 305 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) { 306 if (PageHighMem(page)) 307 continue; 308 list_del(&page->lru); 309 update_and_free_page(page); 310 nid = page_zone(page)->zone_pgdat->node_id; 311 free_huge_pages--; 312 free_huge_pages_node[nid]--; 313 if (count >= nr_huge_pages) 314 return; 315 } 316 } 317} 318#else 319static inline void try_to_free_low(unsigned long count) 320{ 321} 322#endif 323 324static unsigned long set_max_huge_pages(unsigned long count) 325{ 326 while (count > nr_huge_pages) { 327 if (!alloc_fresh_huge_page()) 328 return nr_huge_pages; 329 } 330 if (count >= nr_huge_pages) 331 return nr_huge_pages; 332 333 spin_lock(&hugetlb_lock); 334 try_to_free_low(count); 335 while (count < nr_huge_pages) { 336 struct page *page = dequeue_huge_page(NULL, 0); 337 if (!page) 338 break; 339 update_and_free_page(page); 340 } 341 spin_unlock(&hugetlb_lock); 342 return nr_huge_pages; 343} 344 345int hugetlb_sysctl_handler(struct ctl_table *table, int write, 346 struct file *file, void __user *buffer, 347 size_t *length, loff_t *ppos) 348{ 349 proc_doulongvec_minmax(table, write, file, buffer, length, ppos); 350 max_huge_pages = set_max_huge_pages(max_huge_pages); 351 return 0; 352} 353#endif /* CONFIG_SYSCTL */ 354 355int hugetlb_report_meminfo(char *buf) 356{ 357 return sprintf(buf, 358 "HugePages_Total: %5lu\n" 359 "HugePages_Free: %5lu\n" 360 "HugePages_Rsvd: %5lu\n" 361 "Hugepagesize: %5lu kB\n", 362 nr_huge_pages, 363 free_huge_pages, 364 reserved_huge_pages, 365 HPAGE_SIZE/1024); 366} 367 368int hugetlb_report_node_meminfo(int nid, char *buf) 369{ 370 return sprintf(buf, 371 "Node %d HugePages_Total: %5u\n" 372 "Node %d HugePages_Free: %5u\n", 373 nid, nr_huge_pages_node[nid], 374 nid, free_huge_pages_node[nid]); 375} 376 377/* Return the number pages of memory we physically have, in PAGE_SIZE units. */ 378unsigned long hugetlb_total_pages(void) 379{ 380 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE); 381} 382 383/* 384 * We cannot handle pagefaults against hugetlb pages at all. They cause 385 * handle_mm_fault() to try to instantiate regular-sized pages in the 386 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get 387 * this far. 388 */ 389static struct page *hugetlb_nopage(struct vm_area_struct *vma, 390 unsigned long address, int *unused) 391{ 392 BUG(); 393 return NULL; 394} 395 396struct vm_operations_struct hugetlb_vm_ops = { 397 .nopage = hugetlb_nopage, 398}; 399 400static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, 401 int writable) 402{ 403 pte_t entry; 404 405 if (writable) { 406 entry = 407 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); 408 } else { 409 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); 410 } 411 entry = pte_mkyoung(entry); 412 entry = pte_mkhuge(entry); 413 414 return entry; 415} 416 417static void set_huge_ptep_writable(struct vm_area_struct *vma, 418 unsigned long address, pte_t *ptep) 419{ 420 pte_t entry; 421 422 entry = pte_mkwrite(pte_mkdirty(*ptep)); 423 ptep_set_access_flags(vma, address, ptep, entry, 1); 424 update_mmu_cache(vma, address, entry); 425 lazy_mmu_prot_update(entry); 426} 427 428 429int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 430 struct vm_area_struct *vma) 431{ 432 pte_t *src_pte, *dst_pte, entry; 433 struct page *ptepage; 434 unsigned long addr; 435 int cow; 436 437 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; 438 439 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { 440 src_pte = huge_pte_offset(src, addr); 441 if (!src_pte) 442 continue; 443 dst_pte = huge_pte_alloc(dst, addr); 444 if (!dst_pte) 445 goto nomem; 446 spin_lock(&dst->page_table_lock); 447 spin_lock(&src->page_table_lock); 448 if (!pte_none(*src_pte)) { 449 if (cow) 450 ptep_set_wrprotect(src, addr, src_pte); 451 entry = *src_pte; 452 ptepage = pte_page(entry); 453 get_page(ptepage); 454 add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE); 455 set_huge_pte_at(dst, addr, dst_pte, entry); 456 } 457 spin_unlock(&src->page_table_lock); 458 spin_unlock(&dst->page_table_lock); 459 } 460 return 0; 461 462nomem: 463 return -ENOMEM; 464} 465 466void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 467 unsigned long end) 468{ 469 struct mm_struct *mm = vma->vm_mm; 470 unsigned long address; 471 pte_t *ptep; 472 pte_t pte; 473 struct page *page; 474 475 WARN_ON(!is_vm_hugetlb_page(vma)); 476 BUG_ON(start & ~HPAGE_MASK); 477 BUG_ON(end & ~HPAGE_MASK); 478 479 spin_lock(&mm->page_table_lock); 480 481 /* Update high watermark before we lower rss */ 482 update_hiwater_rss(mm); 483 484 for (address = start; address < end; address += HPAGE_SIZE) { 485 ptep = huge_pte_offset(mm, address); 486 if (!ptep) 487 continue; 488 489 pte = huge_ptep_get_and_clear(mm, address, ptep); 490 if (pte_none(pte)) 491 continue; 492 493 page = pte_page(pte); 494 put_page(page); 495 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE)); 496 } 497 498 spin_unlock(&mm->page_table_lock); 499 flush_tlb_range(vma, start, end); 500} 501 502static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, 503 unsigned long address, pte_t *ptep, pte_t pte) 504{ 505 struct page *old_page, *new_page; 506 int avoidcopy; 507 508 old_page = pte_page(pte); 509 510 /* If no-one else is actually using this page, avoid the copy 511 * and just make the page writable */ 512 avoidcopy = (page_count(old_page) == 1); 513 if (avoidcopy) { 514 set_huge_ptep_writable(vma, address, ptep); 515 return VM_FAULT_MINOR; 516 } 517 518 page_cache_get(old_page); 519 new_page = alloc_huge_page(vma, address); 520 521 if (!new_page) { 522 page_cache_release(old_page); 523 return VM_FAULT_OOM; 524 } 525 526 spin_unlock(&mm->page_table_lock); 527 copy_huge_page(new_page, old_page, address); 528 spin_lock(&mm->page_table_lock); 529 530 ptep = huge_pte_offset(mm, address & HPAGE_MASK); 531 if (likely(pte_same(*ptep, pte))) { 532 /* Break COW */ 533 set_huge_pte_at(mm, address, ptep, 534 make_huge_pte(vma, new_page, 1)); 535 /* Make the old page be freed below */ 536 new_page = old_page; 537 } 538 page_cache_release(new_page); 539 page_cache_release(old_page); 540 return VM_FAULT_MINOR; 541} 542 543int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, 544 unsigned long address, pte_t *ptep, int write_access) 545{ 546 int ret = VM_FAULT_SIGBUS; 547 unsigned long idx; 548 unsigned long size; 549 struct page *page; 550 struct address_space *mapping; 551 pte_t new_pte; 552 553 mapping = vma->vm_file->f_mapping; 554 idx = ((address - vma->vm_start) >> HPAGE_SHIFT) 555 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); 556 557 /* 558 * Use page lock to guard against racing truncation 559 * before we get page_table_lock. 560 */ 561retry: 562 page = find_lock_page(mapping, idx); 563 if (!page) { 564 if (hugetlb_get_quota(mapping)) 565 goto out; 566 page = alloc_huge_page(vma, address); 567 if (!page) { 568 hugetlb_put_quota(mapping); 569 ret = VM_FAULT_OOM; 570 goto out; 571 } 572 clear_huge_page(page, address); 573 574 if (vma->vm_flags & VM_SHARED) { 575 int err; 576 577 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); 578 if (err) { 579 put_page(page); 580 hugetlb_put_quota(mapping); 581 if (err == -EEXIST) 582 goto retry; 583 goto out; 584 } 585 } else 586 lock_page(page); 587 } 588 589 spin_lock(&mm->page_table_lock); 590 size = i_size_read(mapping->host) >> HPAGE_SHIFT; 591 if (idx >= size) 592 goto backout; 593 594 ret = VM_FAULT_MINOR; 595 if (!pte_none(*ptep)) 596 goto backout; 597 598 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE); 599 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) 600 && (vma->vm_flags & VM_SHARED))); 601 set_huge_pte_at(mm, address, ptep, new_pte); 602 603 if (write_access && !(vma->vm_flags & VM_SHARED)) { 604 /* Optimization, do the COW without a second fault */ 605 ret = hugetlb_cow(mm, vma, address, ptep, new_pte); 606 } 607 608 spin_unlock(&mm->page_table_lock); 609 unlock_page(page); 610out: 611 return ret; 612 613backout: 614 spin_unlock(&mm->page_table_lock); 615 hugetlb_put_quota(mapping); 616 unlock_page(page); 617 put_page(page); 618 goto out; 619} 620 621int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 622 unsigned long address, int write_access) 623{ 624 pte_t *ptep; 625 pte_t entry; 626 int ret; 627 static DEFINE_MUTEX(hugetlb_instantiation_mutex); 628 629 ptep = huge_pte_alloc(mm, address); 630 if (!ptep) 631 return VM_FAULT_OOM; 632 633 /* 634 * Serialize hugepage allocation and instantiation, so that we don't 635 * get spurious allocation failures if two CPUs race to instantiate 636 * the same page in the page cache. 637 */ 638 mutex_lock(&hugetlb_instantiation_mutex); 639 entry = *ptep; 640 if (pte_none(entry)) { 641 ret = hugetlb_no_page(mm, vma, address, ptep, write_access); 642 mutex_unlock(&hugetlb_instantiation_mutex); 643 return ret; 644 } 645 646 ret = VM_FAULT_MINOR; 647 648 spin_lock(&mm->page_table_lock); 649 /* Check for a racing update before calling hugetlb_cow */ 650 if (likely(pte_same(entry, *ptep))) 651 if (write_access && !pte_write(entry)) 652 ret = hugetlb_cow(mm, vma, address, ptep, entry); 653 spin_unlock(&mm->page_table_lock); 654 mutex_unlock(&hugetlb_instantiation_mutex); 655 656 return ret; 657} 658 659int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, 660 struct page **pages, struct vm_area_struct **vmas, 661 unsigned long *position, int *length, int i) 662{ 663 unsigned long vpfn, vaddr = *position; 664 int remainder = *length; 665 666 vpfn = vaddr/PAGE_SIZE; 667 spin_lock(&mm->page_table_lock); 668 while (vaddr < vma->vm_end && remainder) { 669 pte_t *pte; 670 struct page *page; 671 672 /* 673 * Some archs (sparc64, sh*) have multiple pte_ts to 674 * each hugepage. We have to make * sure we get the 675 * first, for the page indexing below to work. 676 */ 677 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); 678 679 if (!pte || pte_none(*pte)) { 680 int ret; 681 682 spin_unlock(&mm->page_table_lock); 683 ret = hugetlb_fault(mm, vma, vaddr, 0); 684 spin_lock(&mm->page_table_lock); 685 if (ret == VM_FAULT_MINOR) 686 continue; 687 688 remainder = 0; 689 if (!i) 690 i = -EFAULT; 691 break; 692 } 693 694 if (pages) { 695 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; 696 get_page(page); 697 pages[i] = page; 698 } 699 700 if (vmas) 701 vmas[i] = vma; 702 703 vaddr += PAGE_SIZE; 704 ++vpfn; 705 --remainder; 706 ++i; 707 } 708 spin_unlock(&mm->page_table_lock); 709 *length = remainder; 710 *position = vaddr; 711 712 return i; 713} 714 715void hugetlb_change_protection(struct vm_area_struct *vma, 716 unsigned long address, unsigned long end, pgprot_t newprot) 717{ 718 struct mm_struct *mm = vma->vm_mm; 719 unsigned long start = address; 720 pte_t *ptep; 721 pte_t pte; 722 723 BUG_ON(address >= end); 724 flush_cache_range(vma, address, end); 725 726 spin_lock(&mm->page_table_lock); 727 for (; address < end; address += HPAGE_SIZE) { 728 ptep = huge_pte_offset(mm, address); 729 if (!ptep) 730 continue; 731 if (!pte_none(*ptep)) { 732 pte = huge_ptep_get_and_clear(mm, address, ptep); 733 pte = pte_mkhuge(pte_modify(pte, newprot)); 734 set_huge_pte_at(mm, address, ptep, pte); 735 lazy_mmu_prot_update(pte); 736 } 737 } 738 spin_unlock(&mm->page_table_lock); 739 740 flush_tlb_range(vma, start, end); 741} 742 743