Searched refs:vma (Results 1 - 25 of 33) sorted by relevance

12

/mm/
H A Dmmap.c61 struct vm_area_struct *vma, struct vm_area_struct *prev,
97 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
98 void vma_set_page_prot(struct vm_area_struct *vma) argument
100 unsigned long vm_flags = vma->vm_flags;
102 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
103 if (vma_wants_writenotify(vma)) {
105 vma->vm_page_prot = vm_pgprot_modify(vma
237 __remove_shared_vm_struct(struct vm_area_struct *vma, struct file *file, struct address_space *mapping) argument
257 unlink_file_vma(struct vm_area_struct *vma) argument
272 remove_vma(struct vm_area_struct *vma) argument
358 vma_compute_subtree_gap(struct vm_area_struct *vma) argument
387 struct vm_area_struct *vma; local
430 struct vm_area_struct *vma; local
443 struct vm_area_struct *vma = mm->mmap; local
486 vma_gap_update(struct vm_area_struct *vma) argument
495 vma_rb_insert(struct vm_area_struct *vma, struct rb_root *root) argument
504 vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) argument
535 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) argument
544 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) argument
590 struct vm_area_struct *vma; local
614 __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, struct rb_node **rb_link, struct rb_node *rb_parent) argument
638 __vma_link_file(struct vm_area_struct *vma) argument
661 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) argument
669 vma_link(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node **rb_link, struct rb_node *rb_parent) argument
694 __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) argument
707 __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev) argument
728 vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert) argument
930 is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags, const char __user *anon_name) argument
953 is_mergeable_anon_vma(struct anon_vma *anon_vma1, struct anon_vma *anon_vma2, struct vm_area_struct *vma) argument
979 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, const char __user *anon_name) argument
999 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, const char __user *anon_name) argument
1181 find_mergeable_anon_vma(struct vm_area_struct *vma) argument
1494 vma_wants_writenotify(struct vm_area_struct *vma) argument
1545 struct vm_area_struct *vma, *prev; local
1720 struct vm_area_struct *vma; local
1814 struct vm_area_struct *vma; local
1926 struct vm_area_struct *vma; local
1962 struct vm_area_struct *vma; local
2046 struct vm_area_struct *vma; local
2084 struct vm_area_struct *vma; local
2105 acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) argument
2155 expand_upwards(struct vm_area_struct *vma, unsigned long address) argument
2230 expand_downwards(struct vm_area_struct *vma, unsigned long address) argument
2307 expand_stack(struct vm_area_struct *vma, unsigned long address) argument
2323 struct vm_area_struct *vma, *prev; local
2336 expand_stack(struct vm_area_struct *vma, unsigned long address) argument
2352 struct vm_area_struct *vma; local
2378 remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) argument
2401 unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end) argument
2422 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long end) argument
2452 __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
2520 split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
2537 struct vm_area_struct *vma, *prev, *last; local
2653 struct vm_area_struct *vma, *prev; local
2751 struct vm_area_struct *vma; local
2801 insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) argument
2841 struct vm_area_struct *vma = *vmap; local
2935 special_mapping_close(struct vm_area_struct *vma) argument
2939 special_mapping_name(struct vm_area_struct *vma) argument
2955 special_mapping_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
2995 struct vm_area_struct *vma; local
3049 struct vm_area_struct *vma = __install_special_mapping( local
3133 struct vm_area_struct *vma; local
3204 struct vm_area_struct *vma; local
[all...]
H A Dmadvise.c24 * Any behaviour which results in changes to the vma->vm_flags needs to
45 static long madvise_behavior(struct vm_area_struct *vma, argument
49 struct mm_struct *mm = vma->vm_mm;
52 unsigned long new_flags = vma->vm_flags;
68 if (vma->vm_flags & VM_IO) {
86 error = ksm_madvise(vma, start, end, behavior, &new_flags);
92 error = hugepage_madvise(vma, &new_flags, behavior);
98 if (new_flags == vma->vm_flags) {
99 *prev = vma;
103 pgoff = vma
143 struct vm_area_struct *vma = walk->private; local
174 force_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
188 force_shm_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) argument
219 madvise_willneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
274 madvise_dontneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
297 madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument
377 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument
466 struct vm_area_struct *vma, *prev; local
[all...]
H A Dmprotect.c39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, argument
47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
49 pmdl = pmd_lock(vma->vm_mm, pmd);
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, argument
64 struct mm_struct *mm = vma->vm_mm;
69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
91 !(vma->vm_flags & VM_SOFTDIRTY)))
98 page = vm_normal_page(vma, addr, oldpte);
133 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, argument
190 change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
210 change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
240 change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
255 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) argument
339 struct vm_area_struct *vma, *prev; local
[all...]
H A Dpgtable-generic.c47 int ptep_set_access_flags(struct vm_area_struct *vma, argument
53 set_pte_at(vma->vm_mm, address, ptep, entry);
54 flush_tlb_fix_spurious_fault(vma, address);
61 int pmdp_set_access_flags(struct vm_area_struct *vma, argument
69 set_pmd_at(vma->vm_mm, address, pmdp, entry);
70 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
81 int ptep_clear_flush_young(struct vm_area_struct *vma, argument
85 young = ptep_test_and_clear_young(vma, address, ptep);
87 flush_tlb_page(vma, address);
93 int pmdp_clear_flush_young(struct vm_area_struct *vma, argument
110 ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
124 pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument
138 pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument
192 pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument
[all...]
H A Dmsync.c35 struct vm_area_struct *vma; local
58 vma = find_vma(mm, start);
65 if (!vma)
67 /* Here start < vma->vm_end. */
68 if (start < vma->vm_start) {
69 start = vma->vm_start;
74 /* Here vma->vm_start <= start < vma->vm_end. */
76 (vma->vm_flags & VM_LOCKED)) {
80 file = vma
[all...]
H A Dfremap.c31 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, argument
39 flush_cache_page(vma, addr, pte_pfn(pte));
40 pte = ptep_clear_flush(vma, addr, ptep);
41 page = vm_normal_page(vma, addr, pte);
72 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, argument
86 zap_pte(mm, vma, addr, pte);
93 * be mapped there when there's a fault (in a non-linear vma where
102 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, argument
105 struct mm_struct *mm = vma->vm_mm;
109 err = install_file_pte(mm, vma, add
147 struct vm_area_struct *vma; local
[all...]
H A Dvmacache.c9 * Flush vma caches for threads that share a given mm.
12 * exclusively and other threads accessing the vma cache will
14 * is required to maintain the vma cache.
94 struct vm_area_struct *vma = current->vmacache[i]; local
96 if (!vma)
98 if (WARN_ON_ONCE(vma->vm_mm != mm))
100 if (vma->vm_start <= addr && vma->vm_end > addr) {
102 return vma;
122 struct vm_area_struct *vma local
[all...]
H A Dmremap.c52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, argument
90 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, argument
97 struct mm_struct *mm = vma->vm_mm;
110 * - During exec() shift_arg_pages(), we use a specially tagged vma
113 * - During mremap(), new_vma is often known to be placed after vma
120 if (vma->vm_file) {
121 mapping = vma->vm_file->f_mapping;
124 if (vma->anon_vma) {
125 anon_vma = vma->anon_vma;
164 unsigned long move_page_tables(struct vm_area_struct *vma, argument
237 move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr, bool *locked) argument
340 struct vm_area_struct *vma = find_vma(mm, addr); local
399 struct vm_area_struct *vma; local
456 vma_expandable(struct vm_area_struct *vma, unsigned long delta) argument
481 struct vm_area_struct *vma; local
[all...]
H A Dnommu.c135 struct vm_area_struct *vma; local
137 vma = find_vma(current->mm, (unsigned long)objp);
138 if (vma)
139 return vma->vm_end - vma->vm_start;
154 struct vm_area_struct *vma; local
167 vma = find_vma(mm, start);
168 if (!vma)
172 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
173 !(vm_flags & vma
226 follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) argument
262 struct vm_area_struct *vma; local
496 vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument
687 protect_vma(struct vm_area_struct *vma, unsigned long flags) argument
706 add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) argument
774 delete_vma_from_mm(struct vm_area_struct *vma) argument
820 delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) argument
837 struct vm_area_struct *vma; local
872 expand_stack(struct vm_area_struct *vma, unsigned long address) argument
885 struct vm_area_struct *vma; local
1126 do_mmap_shared_file(struct vm_area_struct *vma) argument
1147 do_mmap_private(struct vm_area_struct *vma, struct vm_region *region, unsigned long len, unsigned long capabilities) argument
1269 struct vm_area_struct *vma; local
1546 split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
1612 shrink_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long from, unsigned long to) argument
1655 struct vm_area_struct *vma; local
1751 struct vm_area_struct *vma; local
1789 struct vm_area_struct *vma; local
1833 follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) argument
1841 remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument
1852 vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) argument
1862 remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) argument
1984 filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
1991 filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) argument
1997 generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, unsigned long size, pgoff_t pgoff) argument
2008 struct vm_area_struct *vma; local
2088 struct vm_area_struct *vma; local
[all...]
H A Dmincore.c22 static void mincore_hugetlb_page_range(struct vm_area_struct *vma, argument
29 h = hstate_vma(vma);
97 static void mincore_unmapped_range(struct vm_area_struct *vma, argument
104 if (vma->vm_file) {
107 pgoff = linear_page_index(vma, addr);
109 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
116 static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, argument
124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
131 mincore_unmapped_range(vma, addr, next, vec);
136 *vec = mincore_page(vma
159 mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned char *vec) argument
184 mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned char *vec) argument
202 mincore_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
227 struct vm_area_struct *vma; local
[all...]
H A Drmap.c125 static void anon_vma_chain_link(struct vm_area_struct *vma, argument
129 avc->vma = vma;
131 list_add(&avc->same_vma, &vma->anon_vma_chain);
137 * @vma: the memory region in question
139 * This makes sure the memory mapping described by 'vma' has
146 * reason for splitting a vma has been mprotect()), or we
149 * Anon-vma allocations are very subtle, because we may have
152 * allocated vma (it depends on RCU to make sure that the
162 int anon_vma_prepare(struct vm_area_struct *vma) argument
273 anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) argument
325 unlink_anon_vmas(struct vm_area_struct *vma) argument
520 __vma_address(struct page *page, struct vm_area_struct *vma) argument
527 vma_address(struct page *page, struct vm_area_struct *vma) argument
541 page_address_in_vma(struct page *page, struct vm_area_struct *vma) argument
650 page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) argument
676 page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument
749 invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) argument
818 page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument
852 invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) argument
896 page_move_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument
916 __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument
945 __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument
977 page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument
988 do_page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument
1026 page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument
1138 try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument
1305 try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, struct vm_area_struct *vma, struct page *check_page) argument
1412 struct vm_area_struct *vma; local
1483 is_vma_temporary_stack(struct vm_area_struct *vma) argument
1497 invalid_migration_vma(struct vm_area_struct *vma, void *arg) argument
1647 struct vm_area_struct *vma = avc->vma; local
1680 struct vm_area_struct *vma; local
1736 __hugepage_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument
1753 hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument
1767 hugepage_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument
[all...]
H A Dmlock.c48 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
50 * the mmap_sem for read, and verify that the vma really is locked
145 * the page back to the unevictable list if some other vma has it mlocked.
156 * munlock_vma_page - munlock a vma page
163 * When we munlock a page, because the vma where we found the page is being
209 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
210 * @vma: target vma
219 * vma->vm_mm->mmap_sem must be held.
227 long __mlock_vma_pages_range(struct vm_area_struct *vma, argument
415 __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) argument
478 munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
554 mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) argument
616 struct vm_area_struct * vma, * prev; local
675 struct vm_area_struct *vma = NULL; local
775 struct vm_area_struct * vma, * prev = NULL; local
[all...]
H A Dmemory.c528 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, argument
531 while (vma) {
532 struct vm_area_struct *next = vma->vm_next;
533 unsigned long addr = vma->vm_start;
536 * Hide vma from rmap and truncate_pagecache before freeing
539 unlink_anon_vmas(vma);
540 unlink_file_vma(vma);
542 if (is_vm_hugetlb_page(vma)) {
543 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
549 while (next && next->vm_start <= vma
563 __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, unsigned long address) argument
646 print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) argument
750 vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument
805 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) argument
888 copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
951 copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
984 copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
1006 copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *vma) argument
1075 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) argument
1222 zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) argument
1266 zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) argument
1285 unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) argument
1309 unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) argument
1370 unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) argument
1391 zap_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long size, struct zap_details *details) argument
1417 zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) argument
1445 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) argument
1478 insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) argument
1540 vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument
1556 insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) argument
1601 vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) argument
1629 vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) argument
1731 remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument
1802 vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) argument
1962 cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) argument
1996 do_page_mkwrite(struct vm_area_struct *vma, struct page *page, unsigned long address) argument
2307 unmap_mapping_range_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) argument
2317 struct vm_area_struct *vma; local
2343 struct vm_area_struct *vma; local
2411 do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument
2594 check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) argument
2628 do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) argument
2704 __do_fault(struct vm_area_struct *vma, unsigned long address, pgoff_t pgoff, unsigned int flags, struct page **page) argument
2750 do_set_pte(struct vm_area_struct *vma, unsigned long address, struct page *page, pte_t *pte, bool write, bool anon) argument
2838 do_fault_around(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pgoff_t pgoff, unsigned int flags) argument
2881 do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument
2922 do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument
2971 do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument
3035 do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument
3063 do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument
3092 numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags) argument
3107 do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) argument
3198 handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) argument
3260 __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument
3346 handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument
3495 follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) argument
3515 follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys) argument
3543 generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) argument
3573 struct vm_area_struct *vma; local
3673 struct vm_area_struct *vma; local
3762 copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) argument
3781 copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) argument
[all...]
H A Dgup.c19 static struct page *no_page_table(struct vm_area_struct *vma, argument
30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
35 static struct page *follow_page_pte(struct vm_area_struct *vma, argument
38 struct mm_struct *mm = vma->vm_mm;
45 return no_page_table(vma, flags);
74 page = vm_normal_page(vma, address, pte);
95 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
127 return no_page_table(vma, flags);
132 * @vma
143 follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) argument
221 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) argument
270 faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) argument
331 check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) argument
432 struct vm_area_struct *vma = NULL; local
556 struct vm_area_struct *vma; local
672 struct vm_area_struct *vma; local
[all...]
H A Dhuge_memory.c63 * it would have happened if the vma was large enough during page
698 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) argument
700 if (likely(vma->vm_flags & VM_WRITE))
714 struct vm_area_struct *vma,
749 entry = mk_huge_pmd(page, vma->vm_page_prot);
750 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
751 page_add_new_anon_rmap(page, vma, haddr);
753 lru_cache_add_active_or_unevictable(page, vma);
770 struct vm_area_struct *vma,
775 HPAGE_PMD_ORDER, vma, hadd
713 __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *page) argument
769 alloc_hugepage_vma(int defrag, struct vm_area_struct *vma, unsigned long haddr, int nd, gfp_t extra_gfp) argument
779 set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) argument
795 do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) argument
849 copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma) argument
924 huge_pmd_set_accessed(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, int dirty) argument
978 do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) argument
1087 do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd) argument
1208 follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) argument
1264 do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, pmd_t *pmdp) argument
1387 zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) argument
1426 mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) argument
1446 move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, pmd_t *old_pmd, pmd_t *new_pmd) argument
1504 change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa) argument
1550 __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t **ptl) argument
1623 __split_huge_page_splitting(struct page *page, struct vm_area_struct *vma, unsigned long address) argument
1775 __split_huge_page_map(struct page *page, struct vm_area_struct *vma, unsigned long address) argument
1865 struct vm_area_struct *vma = avc->vma; local
1890 struct vm_area_struct *vma = avc->vma; local
1947 hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) argument
2074 khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags) argument
2144 __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, pte_t *pte) argument
2206 __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl) argument
2321 khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) argument
2390 khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) argument
2400 hugepage_vma_check(struct vm_area_struct *vma) argument
2414 collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, int node) argument
2545 khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) argument
2643 struct vm_area_struct *vma; variable in typeref:struct:vm_area_struct
2829 __split_huge_zero_page_pmd(struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd) argument
2857 __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) argument
2907 struct vm_area_struct *vma; local
2941 __vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) argument
[all...]
H A Dmempolicy.c444 * Rebind each vma in mm to new nodemask.
451 struct vm_area_struct *vma; local
454 for (vma = mm->mmap; vma; vma = vma->vm_next)
455 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
484 static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, argument
493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
500 page = vm_normal_page(vma, add
522 queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd, const nodemask_t *nodes, unsigned long flags, void *private) argument
551 queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
579 queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
601 queue_pages_pgd_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
631 change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
643 change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
662 struct vm_area_struct *vma, *prev; local
709 vma_replace_policy(struct vm_area_struct *vma, struct mempolicy *pol) argument
747 struct vm_area_struct *vma; local
885 struct vm_area_struct *vma = NULL; local
1150 struct vm_area_struct *vma; local
1595 __get_vma_policy(struct vm_area_struct *vma, unsigned long addr) argument
1632 get_vma_policy(struct vm_area_struct *vma, unsigned long addr) argument
1643 vma_policy_mof(struct vm_area_struct *vma) argument
1788 offset_il_node(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long off) argument
1808 interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) argument
1860 huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) argument
2013 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node) argument
2274 mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) argument
2495 mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) argument
[all...]
H A Ddebug.c152 void dump_vma(const struct vm_area_struct *vma) argument
154 pr_emerg("vma %p start %p end %p\n"
158 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next,
159 vma->vm_prev, vma->vm_mm,
160 (unsigned long)pgprot_val(vma->vm_page_prot),
161 vma
[all...]
H A Dpagewalk.c108 static int walk_hugetlb_range(struct vm_area_struct *vma, argument
112 struct hstate *h = hstate_vma(vma);
131 static int walk_hugetlb_range(struct vm_area_struct *vma, argument
184 struct vm_area_struct *vma = NULL; local
189 * This function was not intended to be vma based.
190 * But there are vma special cases to be handled:
191 * - hugetlb vma's
192 * - VM_PFNMAP vma's
194 vma = find_vma(walk->mm, addr);
195 if (vma) {
[all...]
H A Dhugetlb.c137 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) argument
139 return subpool_inode(file_inode(vma->vm_file));
323 * Convert the address within this vma to the page offset within
327 struct vm_area_struct *vma, unsigned long address)
329 return ((address - vma->vm_start) >> huge_page_shift(h)) +
330 (vma->vm_pgoff >> huge_page_order(h));
333 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, argument
336 return vma_hugecache_offset(hstate_vma(vma), vma, address);
343 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) argument
326 vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument
363 vma_mmu_pagesize(struct vm_area_struct *vma) argument
397 get_vma_private_data(struct vm_area_struct *vma) argument
402 set_vma_private_data(struct vm_area_struct *vma, unsigned long value) argument
435 vma_resv_map(struct vm_area_struct *vma) argument
450 set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) argument
459 set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) argument
467 is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) argument
475 reset_vma_resv_huge_pages(struct vm_area_struct *vma) argument
483 vma_has_reserves(struct vm_area_struct *vma, long chg) argument
552 dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, long chg) argument
1331 vma_needs_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
1350 vma_commit_reservation(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) argument
1364 alloc_huge_page(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) argument
1427 alloc_huge_page_noerr(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) argument
2433 hugetlb_vm_op_open(struct vm_area_struct *vma) argument
2449 hugetlb_vm_op_close(struct vm_area_struct *vma) argument
2478 hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
2490 make_huge_pte(struct vm_area_struct *vma, struct page *page, int writable) argument
2509 set_huge_ptep_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument
2545 copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma) argument
2619 __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) argument
2715 __unmap_hugepage_range_final(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) argument
2734 unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct page *ref_page) argument
2753 unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) argument
2801 hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t pte, struct page *pagecache_page, spinlock_t *ptl) argument
2921 hugetlbfs_pagecache_page(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument
2937 hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument
2953 hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address, pte_t *ptep, unsigned int flags) argument
3085 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) argument
3110 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) argument
3119 hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument
3235 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, unsigned long *nr_pages, long i, unsigned int flags) argument
3335 hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) argument
3383 hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) argument
3482 page_table_shareable(struct vm_area_struct *svma, struct vm_area_struct *vma, unsigned long addr, pgoff_t idx) argument
3507 vma_shareable(struct vm_area_struct *vma, unsigned long addr) argument
3532 struct vm_area_struct *vma = find_vma(mm, addr); local
[all...]
H A Dksm.c358 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
363 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) argument
370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION);
374 ret = handle_mm_fault(vma->vm_mm, vma, addr,
385 * COW has been broken, even if the vma does not permit VM_WRITE;
414 struct vm_area_struct *vma; local
417 vma = find_vma(mm, addr);
418 if (!vma || vma
429 struct vm_area_struct *vma; local
462 struct vm_area_struct *vma; local
681 unmerge_ksm_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument
771 struct vm_area_struct *vma; local
855 write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) argument
929 replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) argument
1015 try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) argument
1086 struct vm_area_struct *vma; local
1539 struct vm_area_struct *vma; local
1738 ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) argument
1862 ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) argument
1915 struct vm_area_struct *vma; local
[all...]
H A Dfilemap_xip.c168 struct vm_area_struct *vma; local
186 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
187 mm = vma->vm_mm;
188 address = vma->vm_start +
189 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
190 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
194 flush_cache_page(vma, address, pte_pfn(*pte));
195 pteval = ptep_clear_flush(vma, address, pte);
217 * xip_fault() is invoked via the vma operation
222 xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument
312 xip_file_mmap(struct file * file, struct vm_area_struct * vma) argument
[all...]
H A Dmigrate.c105 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, argument
108 struct mm_struct *mm = vma->vm_mm;
118 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
146 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
152 pte = maybe_mkwrite(pte, vma);
157 pte = arch_make_huge_pte(pte, vma, new, 0);
165 hugepage_add_anon_rmap(new, vma, addr);
169 page_add_anon_rmap(new, vma, addr);
174 update_mmu_cache(vma, addr, ptep);
183 * mm/fremap.c's remap_file_pages() accepts any range within a single vma t
197 struct vm_area_struct *vma; local
275 migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte) argument
1226 struct vm_area_struct *vma; local
1395 struct vm_area_struct *vma; local
1539 struct vm_area_struct *vma; local
1712 migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) argument
1768 migrate_misplaced_transhuge_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, pmd_t entry, unsigned long address, struct page *page, int node) argument
[all...]
H A Dutil.c141 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, argument
146 vma->vm_prev = prev;
149 prev->vm_next = vma;
151 mm->mmap = vma;
158 vma->vm_next = next;
160 next->vm_prev = vma;
163 /* Check if the vma is being used as a stack by this task */
165 struct vm_area_struct *vma)
167 return (vma->vm_start <= KSTK_ESP(t) && vma
164 vm_is_stack_for_task(struct task_struct *t, struct vm_area_struct *vma) argument
176 task_of_stack(struct task_struct *task, struct vm_area_struct *vma, bool in_group) argument
[all...]
H A Dinternal.h220 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
224 extern long __mlock_vma_pages_range(struct vm_area_struct *vma,
226 extern void munlock_vma_pages_range(struct vm_area_struct *vma,
228 static inline void munlock_vma_pages_all(struct vm_area_struct *vma) argument
230 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
234 * must be called with vma's mmap_sem held for read or write, and page locked.
268 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
272 struct vm_area_struct *vma);
[all...]
H A Dinterval_tree.c64 return vma_start_pgoff(avc->vma);
69 return vma_last_pgoff(avc->vma);

Completed in 4557 milliseconds

12