Lines Matching refs:vma

137 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
139 return subpool_inode(file_inode(vma->vm_file));
323 * Convert the address within this vma to the page offset within
327 struct vm_area_struct *vma, unsigned long address)
329 return ((address - vma->vm_start) >> huge_page_shift(h)) +
330 (vma->vm_pgoff >> huge_page_order(h));
333 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
336 return vma_hugecache_offset(hstate_vma(vma), vma, address);
343 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
347 if (!is_vm_hugetlb_page(vma))
350 hstate = hstate_vma(vma);
363 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
365 return vma_kernel_pagesize(vma);
397 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
399 return (unsigned long)vma->vm_private_data;
402 static void set_vma_private_data(struct vm_area_struct *vma,
405 vma->vm_private_data = (void *)value;
435 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
437 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
438 if (vma->vm_flags & VM_MAYSHARE) {
439 struct address_space *mapping = vma->vm_file->f_mapping;
445 return (struct resv_map *)(get_vma_private_data(vma) &
450 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
452 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
453 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
455 set_vma_private_data(vma, (get_vma_private_data(vma) &
459 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
461 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
462 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
464 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
467 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
469 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
471 return (get_vma_private_data(vma) & flag) != 0;
475 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
477 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
478 if (!(vma->vm_flags & VM_MAYSHARE))
479 vma->vm_private_data = (void *)0;
483 static int vma_has_reserves(struct vm_area_struct *vma, long chg)
485 if (vma->vm_flags & VM_NORESERVE) {
495 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
502 if (vma->vm_flags & VM_MAYSHARE)
509 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
553 struct vm_area_struct *vma,
570 if (!vma_has_reserves(vma, chg) &&
580 zonelist = huge_zonelist(vma, address,
590 if (!vma_has_reserves(vma, chg))
1181 * This allocation function is useful in the context where vma is irrelevant.
1322 * Determine if the huge page at addr within the vma has an associated
1332 struct vm_area_struct *vma, unsigned long addr)
1338 resv = vma_resv_map(vma);
1342 idx = vma_hugecache_offset(h, vma, addr);
1345 if (vma->vm_flags & VM_MAYSHARE)
1351 struct vm_area_struct *vma, unsigned long addr)
1356 resv = vma_resv_map(vma);
1360 idx = vma_hugecache_offset(h, vma, addr);
1364 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1367 struct hugepage_subpool *spool = subpool_vma(vma);
1368 struct hstate *h = hstate_vma(vma);
1383 chg = vma_needs_reservation(h, vma, addr);
1395 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1411 vma_commit_reservation(h, vma, addr);
1427 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1430 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
2433 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2435 struct resv_map *resv = vma_resv_map(vma);
2445 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2449 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2451 struct hstate *h = hstate_vma(vma);
2452 struct resv_map *resv = vma_resv_map(vma);
2453 struct hugepage_subpool *spool = subpool_vma(vma);
2456 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2459 start = vma_hugecache_offset(h, vma, vma->vm_start);
2460 end = vma_hugecache_offset(h, vma, vma->vm_end);
2478 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2490 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2497 vma->vm_page_prot)));
2500 vma->vm_page_prot));
2504 entry = arch_make_huge_pte(entry, vma, page, writable);
2509 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2515 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2516 update_mmu_cache(vma, address, ptep);
2546 struct vm_area_struct *vma)
2552 struct hstate *h = hstate_vma(vma);
2558 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2560 mmun_start = vma->vm_start;
2561 mmun_end = vma->vm_end;
2565 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2619 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2624 struct mm_struct *mm = vma->vm_mm;
2630 struct hstate *h = hstate_vma(vma);
2635 WARN_ON(!is_vm_hugetlb_page(vma));
2639 tlb_start_vma(tlb, vma);
2678 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2712 tlb_end_vma(tlb, vma);
2716 struct vm_area_struct *vma, unsigned long start,
2719 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2723 * test will fail on a vma being torn down, and not grab a page table
2731 vma->vm_flags &= ~VM_MAYSHARE;
2734 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2740 mm = vma->vm_mm;
2743 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2753 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2756 struct hstate *h = hstate_vma(vma);
2766 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2767 vma->vm_pgoff;
2768 mapping = file_inode(vma->vm_file)->i_mapping;
2778 if (iter_vma == vma)
2801 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2805 struct hstate *h = hstate_vma(vma);
2817 page_move_anon_rmap(old_page, vma, address);
2818 set_huge_ptep_writable(vma, address, ptep);
2831 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2842 new_page = alloc_huge_page(vma, address, outside_reserve);
2855 unmap_ref_private(mm, vma, old_page, address);
2878 if (unlikely(anon_vma_prepare(vma))) {
2883 copy_user_huge_page(new_page, old_page, address, vma,
2901 huge_ptep_clear_flush(vma, address, ptep);
2903 make_huge_pte(vma, new_page, 1));
2905 hugepage_add_new_anon_rmap(new_page, vma, address);
2922 struct vm_area_struct *vma, unsigned long address)
2927 mapping = vma->vm_file->f_mapping;
2928 idx = vma_hugecache_offset(h, vma, address);
2938 struct vm_area_struct *vma, unsigned long address)
2944 mapping = vma->vm_file->f_mapping;
2945 idx = vma_hugecache_offset(h, vma, address);
2953 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2957 struct hstate *h = hstate_vma(vma);
2970 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2986 page = alloc_huge_page(vma, address, 0);
2998 if (vma->vm_flags & VM_MAYSHARE) {
3016 if (unlikely(anon_vma_prepare(vma))) {
3041 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
3042 if (vma_needs_reservation(h, vma, address) < 0) {
3059 hugepage_add_new_anon_rmap(page, vma, address);
3062 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3063 && (vma->vm_flags & VM_SHARED)));
3066 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3068 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3086 struct vm_area_struct *vma,
3093 if (vma->vm_flags & VM_SHARED) {
3111 struct vm_area_struct *vma,
3119 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3129 struct hstate *h = hstate_vma(vma);
3138 migration_entry_wait_huge(vma, mm, ptep);
3149 mapping = vma->vm_file->f_mapping;
3150 idx = vma_hugecache_offset(h, vma, address);
3157 hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3162 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3177 if (vma_needs_reservation(h, vma, address) < 0) {
3182 if (!(vma->vm_flags & VM_MAYSHARE))
3184 vma, address);
3208 ret = hugetlb_cow(mm, vma, address, ptep, entry,
3215 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3217 update_mmu_cache(vma, address, ptep);
3235 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3243 struct hstate *h = hstate_vma(vma);
3245 while (vaddr < vma->vm_end && remainder) {
3271 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3295 ret = hugetlb_fault(mm, vma, vaddr,
3313 vmas[i] = vma;
3319 if (vaddr < vma->vm_end && remainder &&
3335 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3338 struct mm_struct *mm = vma->vm_mm;
3342 struct hstate *h = hstate_vma(vma);
3346 flush_cache_range(vma, address, end);
3349 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3364 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3376 flush_tlb_range(vma, start, end);
3377 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3385 struct vm_area_struct *vma,
3405 * called to make the mapping read-write. Assume !vma is a shm mapping
3407 if (!vma || vma->vm_flags & VM_MAYSHARE) {
3419 set_vma_resv_map(vma, resv_map);
3420 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3455 if (!vma || vma->vm_flags & VM_MAYSHARE)
3459 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3483 struct vm_area_struct *vma,
3492 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3507 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3515 if (vma->vm_flags & VM_MAYSHARE &&
3516 vma->vm_start <= base && end <= vma->vm_end)
3532 struct vm_area_struct *vma = find_vma(mm, addr);
3533 struct address_space *mapping = vma->vm_file->f_mapping;
3534 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3535 vma->vm_pgoff;
3542 if (!vma_shareable(vma, addr))
3547 if (svma == vma)
3550 saddr = page_table_shareable(svma, vma, addr, idx);
3563 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);