Lines Matching defs:vma

39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd,
47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
49 pmdl = pmd_lock(vma->vm_mm, pmd);
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
64 struct mm_struct *mm = vma->vm_mm;
69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
91 !(vma->vm_flags & VM_SOFTDIRTY)))
98 page = vm_normal_page(vma, addr, oldpte);
133 static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
138 struct mm_struct *mm = vma->vm_mm;
160 split_huge_page_pmd(vma, addr, pmd);
162 int nr_ptes = change_huge_pmd(vma, pmd, addr,
177 this_pages = change_pte_range(vma, pmd, addr, next, newprot,
190 static inline unsigned long change_pud_range(struct vm_area_struct *vma,
203 pages += change_pmd_range(vma, pud, addr, next, newprot,
210 static unsigned long change_protection_range(struct vm_area_struct *vma,
214 struct mm_struct *mm = vma->vm_mm;
222 flush_cache_range(vma, addr, end);
228 pages += change_pud_range(vma, pgd, addr, next, newprot,
234 flush_tlb_range(vma, start, end);
240 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
246 if (is_vm_hugetlb_page(vma))
247 pages = hugetlb_change_protection(vma, start, end, newprot);
249 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
255 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
258 struct mm_struct *mm = vma->vm_mm;
259 unsigned long oldflags = vma->vm_flags;
267 *pprev = vma;
288 * First try to merge with previous and/or next vma.
290 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
292 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
293 vma_get_anon_name(vma));
295 vma = *pprev;
299 *pprev = vma;
301 if (start != vma->vm_start) {
302 error = split_vma(mm, vma, start, 1);
307 if (end != vma->vm_end) {
308 error = split_vma(mm, vma, end, 0);
318 vma->vm_flags = newflags;
319 dirty_accountable = vma_wants_writenotify(vma);
320 vma_set_page_prot(vma);
322 change_protection(vma, start, end, vma->vm_page_prot,
325 vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
326 vm_stat_account(mm, newflags, vma->vm_file, nrpages);
327 perf_event_mmap(vma);
339 struct vm_area_struct *vma, *prev;
368 vma = find_vma(current->mm, start);
370 if (!vma)
372 prev = vma->vm_prev;
374 if (vma->vm_start >= end)
376 start = vma->vm_start;
378 if (!(vma->vm_flags & VM_GROWSDOWN))
381 if (vma->vm_start > start)
384 end = vma->vm_end;
386 if (!(vma->vm_flags & VM_GROWSUP))
390 if (start > vma->vm_start)
391 prev = vma;
396 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
399 newflags |= (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
407 error = security_file_mprotect(vma, reqprot, prot);
411 tmp = vma->vm_end;
414 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
424 vma = prev->vm_next;
425 if (!vma || vma->vm_start != nstart) {