Searched refs:new_vma (Results 1 - 3 of 3) sorted by relevance

/mm/
H A Dmremap.c92 struct vm_area_struct *new_vma, pmd_t *new_pmd,
113 * - During mremap(), new_vma is often known to be placed after vma
146 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
165 unsigned long old_addr, struct vm_area_struct *new_vma,
203 err = move_huge_pmd(vma, new_vma, old_addr,
217 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
226 new_vma, new_pmd, new_addr, need_rmap_locks);
242 struct vm_area_struct *new_vma; local
264 * so KSM can come around to merge on vma and new_vma afterward
90 move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) argument
164 move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, unsigned long new_addr, unsigned long len, bool need_rmap_locks) argument
[all...]
H A Dmmap.c2844 struct vm_area_struct *new_vma, *prev; local
2859 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2862 if (new_vma) {
2864 * Source vma may have been merged into new_vma
2866 if (unlikely(vma_start >= new_vma->vm_start &&
2867 vma_start < new_vma->vm_end)) {
2880 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
2881 *vmap = vma = new_vma;
2883 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
2885 new_vma
[all...]
H A Dhuge_memory.c1446 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, argument
1460 (new_vma->vm_flags & VM_NOHUGEPAGE))

Completed in 390 milliseconds