/mm/ |
H A D | msync.c | 35 struct vm_area_struct *vma; local 58 vma = find_vma(mm, start); 65 if (!vma) 67 /* Here start < vma->vm_end. */ 68 if (start < vma->vm_start) { 69 start = vma->vm_start; 74 /* Here vma->vm_start <= start < vma->vm_end. */ 76 (vma->vm_flags & VM_LOCKED)) { 80 file = vma [all...] |
H A D | debug.c | 152 void dump_vma(const struct vm_area_struct *vma) argument 154 pr_emerg("vma %p start %p end %p\n" 158 vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, 159 vma->vm_prev, vma->vm_mm, 160 (unsigned long)pgprot_val(vma->vm_page_prot), 161 vma [all...] |
H A D | pagewalk.c | 108 static int walk_hugetlb_range(struct vm_area_struct *vma, argument 112 struct hstate *h = hstate_vma(vma); 131 static int walk_hugetlb_range(struct vm_area_struct *vma, argument 184 struct vm_area_struct *vma = NULL; local 189 * This function was not intended to be vma based. 190 * But there are vma special cases to be handled: 191 * - hugetlb vma's 192 * - VM_PFNMAP vma's 194 vma = find_vma(walk->mm, addr); 195 if (vma) { [all...] |
H A D | vmacache.c | 9 * Flush vma caches for threads that share a given mm. 12 * exclusively and other threads accessing the vma cache will 14 * is required to maintain the vma cache. 94 struct vm_area_struct *vma = current->vmacache[i]; local 96 if (!vma) 98 if (WARN_ON_ONCE(vma->vm_mm != mm)) 100 if (vma->vm_start <= addr && vma->vm_end > addr) { 102 return vma; 122 struct vm_area_struct *vma local [all...] |
H A D | fremap.c | 31 static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, argument 39 flush_cache_page(vma, addr, pte_pfn(pte)); 40 pte = ptep_clear_flush(vma, addr, ptep); 41 page = vm_normal_page(vma, addr, pte); 72 static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, argument 86 zap_pte(mm, vma, addr, pte); 93 * be mapped there when there's a fault (in a non-linear vma where 102 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, argument 105 struct mm_struct *mm = vma->vm_mm; 109 err = install_file_pte(mm, vma, add 147 struct vm_area_struct *vma; local [all...] |
H A D | pgtable-generic.c | 47 int ptep_set_access_flags(struct vm_area_struct *vma, argument 53 set_pte_at(vma->vm_mm, address, ptep, entry); 54 flush_tlb_fix_spurious_fault(vma, address); 61 int pmdp_set_access_flags(struct vm_area_struct *vma, argument 69 set_pmd_at(vma->vm_mm, address, pmdp, entry); 70 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 81 int ptep_clear_flush_young(struct vm_area_struct *vma, argument 85 young = ptep_test_and_clear_young(vma, address, ptep); 87 flush_tlb_page(vma, address); 93 int pmdp_clear_flush_young(struct vm_area_struct *vma, argument 110 ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument 124 pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument 138 pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument 192 pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument [all...] |
H A D | mincore.c | 22 static void mincore_hugetlb_page_range(struct vm_area_struct *vma, argument 29 h = hstate_vma(vma); 97 static void mincore_unmapped_range(struct vm_area_struct *vma, argument 104 if (vma->vm_file) { 107 pgoff = linear_page_index(vma, addr); 109 vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff); 116 static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, argument 124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 131 mincore_unmapped_range(vma, addr, next, vec); 136 *vec = mincore_page(vma 159 mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned char *vec) argument 184 mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned char *vec) argument 202 mincore_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument 227 struct vm_area_struct *vma; local [all...] |
H A D | filemap_xip.c | 168 struct vm_area_struct *vma; local 186 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 187 mm = vma->vm_mm; 188 address = vma->vm_start + 189 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 190 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 194 flush_cache_page(vma, address, pte_pfn(*pte)); 195 pteval = ptep_clear_flush(vma, address, pte); 217 * xip_fault() is invoked via the vma operation 222 xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf) argument 312 xip_file_mmap(struct file * file, struct vm_area_struct * vma) argument [all...] |
H A D | gup.c | 19 static struct page *no_page_table(struct vm_area_struct *vma, argument 30 if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) 35 static struct page *follow_page_pte(struct vm_area_struct *vma, argument 38 struct mm_struct *mm = vma->vm_mm; 45 return no_page_table(vma, flags); 74 page = vm_normal_page(vma, address, pte); 95 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 127 return no_page_table(vma, flags); 132 * @vma 143 follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) argument 221 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) argument 270 faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) argument 331 check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) argument 432 struct vm_area_struct *vma = NULL; local 556 struct vm_area_struct *vma; local 672 struct vm_area_struct *vma; local [all...] |
H A D | madvise.c | 24 * Any behaviour which results in changes to the vma->vm_flags needs to 45 static long madvise_behavior(struct vm_area_struct *vma, argument 49 struct mm_struct *mm = vma->vm_mm; 52 unsigned long new_flags = vma->vm_flags; 68 if (vma->vm_flags & VM_IO) { 86 error = ksm_madvise(vma, start, end, behavior, &new_flags); 92 error = hugepage_madvise(vma, &new_flags, behavior); 98 if (new_flags == vma->vm_flags) { 99 *prev = vma; 103 pgoff = vma 143 struct vm_area_struct *vma = walk->private; local 174 force_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument 188 force_shm_swapin_readahead(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) argument 219 madvise_willneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument 274 madvise_dontneed(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument 297 madvise_remove(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end) argument 377 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, int behavior) argument 466 struct vm_area_struct *vma, *prev; local [all...] |
H A D | mprotect.c | 39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, argument 47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); 49 pmdl = pmd_lock(vma->vm_mm, pmd); 55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); 60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, argument 64 struct mm_struct *mm = vma->vm_mm; 69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); 91 !(vma->vm_flags & VM_SOFTDIRTY))) 98 page = vm_normal_page(vma, addr, oldpte); 133 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, argument 190 change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument 210 change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument 240 change_protection(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument 255 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) argument 339 struct vm_area_struct *vma, *prev; local [all...] |
H A D | mremap.c | 52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, argument 90 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, argument 97 struct mm_struct *mm = vma->vm_mm; 110 * - During exec() shift_arg_pages(), we use a specially tagged vma 113 * - During mremap(), new_vma is often known to be placed after vma 120 if (vma->vm_file) { 121 mapping = vma->vm_file->f_mapping; 124 if (vma->anon_vma) { 125 anon_vma = vma->anon_vma; 164 unsigned long move_page_tables(struct vm_area_struct *vma, argument 237 move_vma(struct vm_area_struct *vma, unsigned long old_addr, unsigned long old_len, unsigned long new_len, unsigned long new_addr, bool *locked) argument 340 struct vm_area_struct *vma = find_vma(mm, addr); local 399 struct vm_area_struct *vma; local 456 vma_expandable(struct vm_area_struct *vma, unsigned long delta) argument 481 struct vm_area_struct *vma; local [all...] |
H A D | swap_state.c | 305 struct vm_area_struct *vma, unsigned long addr) 325 new_page = alloc_page_vma(gfp_mask, vma, addr); 442 * @vma: user vma this address belongs to 455 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 458 struct vm_area_struct *vma, unsigned long addr) 481 gfp_mask, vma, addr); 492 return read_swap_cache_async(entry, gfp_mask, vma, addr); 304 read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) argument 457 swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) argument
|
H A D | util.c | 141 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, argument 146 vma->vm_prev = prev; 149 prev->vm_next = vma; 151 mm->mmap = vma; 158 vma->vm_next = next; 160 next->vm_prev = vma; 163 /* Check if the vma is being used as a stack by this task */ 165 struct vm_area_struct *vma) 167 return (vma->vm_start <= KSTK_ESP(t) && vma 164 vm_is_stack_for_task(struct task_struct *t, struct vm_area_struct *vma) argument 176 task_of_stack(struct task_struct *task, struct vm_area_struct *vma, bool in_group) argument [all...] |
H A D | internal.h | 220 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, 224 extern long __mlock_vma_pages_range(struct vm_area_struct *vma, 226 extern void munlock_vma_pages_range(struct vm_area_struct *vma, 228 static inline void munlock_vma_pages_all(struct vm_area_struct *vma) argument 230 munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end); 234 * must be called with vma's mmap_sem held for read or write, and page locked. 268 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); 272 struct vm_area_struct *vma); [all...] |
H A D | mlock.c | 48 * vma's VM_LOCKED status is not concurrently being modified, otherwise we 50 * the mmap_sem for read, and verify that the vma really is locked 145 * the page back to the unevictable list if some other vma has it mlocked. 156 * munlock_vma_page - munlock a vma page 163 * When we munlock a page, because the vma where we found the page is being 209 * __mlock_vma_pages_range() - mlock a range of pages in the vma. 210 * @vma: target vma 219 * vma->vm_mm->mmap_sem must be held. 227 long __mlock_vma_pages_range(struct vm_area_struct *vma, argument 415 __munlock_pagevec_fill(struct pagevec *pvec, struct vm_area_struct *vma, int zoneid, unsigned long start, unsigned long end) argument 478 munlock_vma_pages_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument 554 mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, unsigned long start, unsigned long end, vm_flags_t newflags) argument 616 struct vm_area_struct * vma, * prev; local 675 struct vm_area_struct *vma = NULL; local 775 struct vm_area_struct * vma, * prev = NULL; local [all...] |
H A D | swap.c | 694 * @vma: vma in which page is mapped for determining reclaimability 702 struct vm_area_struct *vma) 706 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { 701 lru_cache_add_active_or_unevictable(struct page *page, struct vm_area_struct *vma) argument
|
H A D | filemap.c | 1783 static void do_sync_mmap_readahead(struct vm_area_struct *vma, argument 1792 if (vma->vm_flags & VM_RAND_READ) 1797 if (vma->vm_flags & VM_SEQ_READ) { 1828 static void do_async_mmap_readahead(struct vm_area_struct *vma, argument 1837 if (vma->vm_flags & VM_RAND_READ) 1848 * @vma: vma in which the fault was taken 1851 * filemap_fault() is invoked via the vma operations vector for a 1858 * vma->vm_mm->mmap_sem must be held on entry. 1870 int filemap_fault(struct vm_area_struct *vma, struc argument 1991 filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf) argument 2063 filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) argument 2099 generic_file_mmap(struct file * file, struct vm_area_struct * vma) argument 2113 generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) argument 2120 generic_file_mmap(struct file * file, struct vm_area_struct * vma) argument 2124 generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) argument [all...] |
H A D | memory-failure.c | 302 struct vm_area_struct *vma, 319 tk->addr = page_address_in_vma(p, vma); 429 struct vm_area_struct *vma; local 448 vma = vmac->vma; 449 if (!page_mapped_in_vma(page, vma)) 451 if (vma->vm_mm == t->mm) 452 add_to_kill(t, page, vma, to_kill, tkc); 465 struct vm_area_struct *vma; local 477 vma_interval_tree_foreach(vma, 301 add_to_kill(struct task_struct *tsk, struct page *p, struct vm_area_struct *vma, struct list_head *to_kill, struct to_kill **tkc) argument [all...] |
H A D | migrate.c | 105 static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, argument 108 struct mm_struct *mm = vma->vm_mm; 118 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep); 146 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 152 pte = maybe_mkwrite(pte, vma); 157 pte = arch_make_huge_pte(pte, vma, new, 0); 165 hugepage_add_anon_rmap(new, vma, addr); 169 page_add_anon_rmap(new, vma, addr); 174 update_mmu_cache(vma, addr, ptep); 183 * mm/fremap.c's remap_file_pages() accepts any range within a single vma t 197 struct vm_area_struct *vma; local 275 migration_entry_wait_huge(struct vm_area_struct *vma, struct mm_struct *mm, pte_t *pte) argument 1226 struct vm_area_struct *vma; local 1395 struct vm_area_struct *vma; local 1539 struct vm_area_struct *vma; local 1712 migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, int node) argument 1768 migrate_misplaced_transhuge_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, pmd_t entry, unsigned long address, struct page *page, int node) argument [all...] |
H A D | rmap.c | 125 static void anon_vma_chain_link(struct vm_area_struct *vma, argument 129 avc->vma = vma; 131 list_add(&avc->same_vma, &vma->anon_vma_chain); 137 * @vma: the memory region in question 139 * This makes sure the memory mapping described by 'vma' has 146 * reason for splitting a vma has been mprotect()), or we 149 * Anon-vma allocations are very subtle, because we may have 152 * allocated vma (it depends on RCU to make sure that the 162 int anon_vma_prepare(struct vm_area_struct *vma) argument 273 anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) argument 325 unlink_anon_vmas(struct vm_area_struct *vma) argument 520 __vma_address(struct page *page, struct vm_area_struct *vma) argument 527 vma_address(struct page *page, struct vm_area_struct *vma) argument 541 page_address_in_vma(struct page *page, struct vm_area_struct *vma) argument 650 page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) argument 676 page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument 749 invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) argument 818 page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument 852 invalid_mkclean_vma(struct vm_area_struct *vma, void *arg) argument 896 page_move_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 916 __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument 945 __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 977 page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 988 do_page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument 1026 page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1138 try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument 1305 try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, struct vm_area_struct *vma, struct page *check_page) argument 1412 struct vm_area_struct *vma; local 1483 is_vma_temporary_stack(struct vm_area_struct *vma) argument 1497 invalid_migration_vma(struct vm_area_struct *vma, void *arg) argument 1647 struct vm_area_struct *vma = avc->vma; local 1680 struct vm_area_struct *vma; local 1736 __hugepage_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument 1753 hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1767 hugepage_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument [all...] |
H A D | huge_memory.c | 63 * it would have happened if the vma was large enough during page 698 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) argument 700 if (likely(vma->vm_flags & VM_WRITE)) 714 struct vm_area_struct *vma, 749 entry = mk_huge_pmd(page, vma->vm_page_prot); 750 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 751 page_add_new_anon_rmap(page, vma, haddr); 753 lru_cache_add_active_or_unevictable(page, vma); 770 struct vm_area_struct *vma, 775 HPAGE_PMD_ORDER, vma, hadd 713 __do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *page) argument 769 alloc_hugepage_vma(int defrag, struct vm_area_struct *vma, unsigned long haddr, int nd, gfp_t extra_gfp) argument 779 set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, struct page *zero_page) argument 795 do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) argument 849 copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, struct vm_area_struct *vma) argument 924 huge_pmd_set_accessed(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, int dirty) argument 978 do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) argument 1087 do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd) argument 1208 follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) argument 1264 do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, pmd_t *pmdp) argument 1387 zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr) argument 1426 mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) argument 1446 move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, unsigned long old_addr, unsigned long new_addr, unsigned long old_end, pmd_t *old_pmd, pmd_t *new_pmd) argument 1504 change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, int prot_numa) argument 1550 __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t **ptl) argument 1623 __split_huge_page_splitting(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1775 __split_huge_page_map(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1865 struct vm_area_struct *vma = avc->vma; local 1890 struct vm_area_struct *vma = avc->vma; local 1947 hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) argument 2074 khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags) argument 2144 __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, pte_t *pte) argument 2206 __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl) argument 2321 khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) argument 2390 khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) argument 2400 hugepage_vma_check(struct vm_area_struct *vma) argument 2414 collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, int node) argument 2545 khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) argument 2643 struct vm_area_struct *vma; variable in typeref:struct:vm_area_struct 2829 __split_huge_zero_page_pmd(struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd) argument 2857 __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) argument 2907 struct vm_area_struct *vma; local 2941 __vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, long adjust_next) argument [all...] |
H A D | ksm.c | 358 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, 363 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) argument 370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); 374 ret = handle_mm_fault(vma->vm_mm, vma, addr, 385 * COW has been broken, even if the vma does not permit VM_WRITE; 414 struct vm_area_struct *vma; local 417 vma = find_vma(mm, addr); 418 if (!vma || vma 429 struct vm_area_struct *vma; local 462 struct vm_area_struct *vma; local 681 unmerge_ksm_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end) argument 771 struct vm_area_struct *vma; local 855 write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) argument 929 replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) argument 1015 try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) argument 1086 struct vm_area_struct *vma; local 1539 struct vm_area_struct *vma; local 1738 ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) argument 1862 ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1915 struct vm_area_struct *vma; local [all...] |
H A D | memory.c | 528 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, argument 531 while (vma) { 532 struct vm_area_struct *next = vma->vm_next; 533 unsigned long addr = vma->vm_start; 536 * Hide vma from rmap and truncate_pagecache before freeing 539 unlink_anon_vmas(vma); 540 unlink_file_vma(vma); 542 if (is_vm_hugetlb_page(vma)) { 543 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, 549 while (next && next->vm_start <= vma 563 __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, unsigned long address) argument 646 print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) argument 750 vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument 805 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) argument 888 copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument 951 copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument 984 copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument 1006 copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *vma) argument 1075 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) argument 1222 zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) argument 1266 zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) argument 1285 unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) argument 1309 unmap_single_vma(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) argument 1370 unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr) argument 1391 zap_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long size, struct zap_details *details) argument 1417 zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) argument 1445 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) argument 1478 insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) argument 1540 vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument 1556 insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) argument 1601 vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) argument 1629 vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) argument 1731 remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument 1802 vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) argument 1962 cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma) argument 1996 do_page_mkwrite(struct vm_area_struct *vma, struct page *page, unsigned long address) argument 2307 unmap_mapping_range_vma(struct vm_area_struct *vma, unsigned long start_addr, unsigned long end_addr, struct zap_details *details) argument 2317 struct vm_area_struct *vma; local 2343 struct vm_area_struct *vma; local 2411 do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument 2594 check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) argument 2628 do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) argument 2704 __do_fault(struct vm_area_struct *vma, unsigned long address, pgoff_t pgoff, unsigned int flags, struct page **page) argument 2750 do_set_pte(struct vm_area_struct *vma, unsigned long address, struct page *page, pte_t *pte, bool write, bool anon) argument 2838 do_fault_around(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pgoff_t pgoff, unsigned int flags) argument 2881 do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument 2922 do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument 2971 do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument 3035 do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument 3063 do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument 3092 numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags) argument 3107 do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) argument 3198 handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) argument 3260 __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument 3346 handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument 3495 follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) argument 3515 follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys) argument 3543 generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) argument 3573 struct vm_area_struct *vma; local 3673 struct vm_area_struct *vma; local 3762 copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) argument 3781 copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) argument [all...] |
H A D | mempolicy.c | 444 * Rebind each vma in mm to new nodemask. 451 struct vm_area_struct *vma; local 454 for (vma = mm->mmap; vma; vma = vma->vm_next) 455 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE); 484 static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, argument 493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 500 page = vm_normal_page(vma, add 522 queue_pages_hugetlb_pmd_range(struct vm_area_struct *vma, pmd_t *pmd, const nodemask_t *nodes, unsigned long flags, void *private) argument 551 queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument 579 queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument 601 queue_pages_pgd_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument 631 change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument 643 change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument 662 struct vm_area_struct *vma, *prev; local 709 vma_replace_policy(struct vm_area_struct *vma, struct mempolicy *pol) argument 747 struct vm_area_struct *vma; local 885 struct vm_area_struct *vma = NULL; local 1150 struct vm_area_struct *vma; local 1595 __get_vma_policy(struct vm_area_struct *vma, unsigned long addr) argument 1632 get_vma_policy(struct vm_area_struct *vma, unsigned long addr) argument 1643 vma_policy_mof(struct vm_area_struct *vma) argument 1788 offset_il_node(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long off) argument 1808 interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) argument 1860 huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) argument 2013 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node) argument 2274 mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) argument 2495 mpol_set_shared_policy(struct shared_policy *info, struct vm_area_struct *vma, struct mempolicy *npol) argument [all...] |