Searched defs:vm_flags (Results 1 - 12 of 12) sorted by relevance

/mm/
H A Dfremap.c150 vm_flags_t vm_flags = 0; local
178 /* We need down_write() to change vma->vm_flags. */
188 if (!vma || !(vma->vm_flags & VM_SHARED))
198 if (!(vma->vm_flags & VM_NONLINEAR)) {
229 vm_flags = vma->vm_flags;
231 addr = mmap_region(file, start, size, vm_flags, pgoff);
243 vma->vm_flags |= VM_NONLINEAR;
250 if (vma->vm_flags & VM_LOCKED) {
256 vm_flags
[all...]
H A Dgup.c95 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
169 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
181 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
326 if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
333 vm_flags_t vm_flags = vma->vm_flags; local
335 if (vm_flags & (VM_IO | VM_PFNMAP))
339 if (!(vm_flags & VM_WRITE)) {
351 if (!is_cow_mapping(vm_flags)) {
352 WARN_ON_ONCE(vm_flags
557 vm_flags_t vm_flags; local
[all...]
H A Dmprotect.c91 !(vma->vm_flags & VM_SOFTDIRTY)))
259 unsigned long oldflags = vma->vm_flags;
315 * vm_flags and vm_page_prot are protected by the mmap_sem
318 vma->vm_flags = newflags;
338 unsigned long vm_flags, nstart, end, tmp, reqprot; local
364 vm_flags = calc_vm_prot_bits(prot);
378 if (!(vma->vm_flags & VM_GROWSDOWN))
386 if (!(vma->vm_flags & VM_GROWSUP))
398 newflags = vm_flags;
399 newflags |= (vma->vm_flags
[all...]
H A Dmremap.c243 unsigned long vm_flags = vma->vm_flags; local
263 * pages recently unmapped. But leave vma->vm_flags as it was,
267 MADV_UNMERGEABLE, &vm_flags);
294 if (vm_flags & VM_ACCOUNT) {
295 vma->vm_flags &= ~VM_ACCOUNT;
312 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
323 vma->vm_flags |= VM_ACCOUNT;
325 vma->vm_next->vm_flags |= VM_ACCOUNT;
328 if (vm_flags
[all...]
H A Drmap.c553 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
670 unsigned long vm_flags; member in struct:page_referenced_arg
696 if (vma->vm_flags & VM_LOCKED) {
698 pra->vm_flags |= VM_LOCKED;
717 if (vma->vm_flags & VM_LOCKED) {
719 pra->vm_flags |= VM_LOCKED;
731 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
739 pra->vm_flags |= vma->vm_flags;
765 * @vm_flags
770 page_referenced(struct page *page, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags) argument
[all...]
H A Dhuge_memory.c700 if (likely(vma->vm_flags & VM_WRITE))
806 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
1246 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1460 (new_vma->vm_flags & VM_NOHUGEPAGE))
1948 unsigned long *vm_flags, int advice)
1964 if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
1966 *vm_flags &= ~VM_NOHUGEPAGE;
1967 *vm_flags |= VM_HUGEPAGE;
1973 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
1980 if (*vm_flags
1947 hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) argument
2074 khugepaged_enter_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags) argument
[all...]
H A Dksm.c420 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
786 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
1024 if (!(vma->vm_flags & VM_MERGEABLE))
1061 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
1607 if (!(vma->vm_flags & VM_MERGEABLE))
1739 unsigned long end, int advice, unsigned long *vm_flags)
1749 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
1755 if (*vm_flags & VM_SAO)
1765 *vm_flags |= VM_MERGEABLE;
1769 if (!(*vm_flags
1738 ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) argument
[all...]
H A Dmemory.c688 "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
689 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
758 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
767 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
768 if (vma->vm_flags & VM_MIXEDMAP) {
777 if (!is_cow_mapping(vma->vm_flags))
809 unsigned long vm_flags = vma->vm_flags; local
840 is_cow_mapping(vm_flags)) {
860 if (is_cow_mapping(vm_flags)) {
[all...]
H A Dmmap.c84 pgprot_t vm_get_page_prot(unsigned long vm_flags) argument
86 return __pgprot(pgprot_val(protection_map[vm_flags &
88 pgprot_val(arch_vm_get_page_prot(vm_flags)));
92 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) argument
94 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
97 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
100 unsigned long vm_flags = vma->vm_flags; local
102 vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
104 vm_flags
930 is_mergeable_vma(struct vm_area_struct *vma, struct file *file, unsigned long vm_flags, const char __user *anon_name) argument
979 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, const char __user *anon_name) argument
999 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff, const char __user *anon_name) argument
1042 vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, const char __user *anon_name) argument
1272 vm_flags_t vm_flags; local
1496 vm_flags_t vm_flags = vma->vm_flags; local
1529 accountable_mapping(struct file *file, vm_flags_t vm_flags) argument
1541 mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) argument
2988 __install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_operations_struct *ops, void *priv) argument
3036 _install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) argument
3045 install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) argument
[all...]
H A Dnommu.c155 unsigned long vm_flags; local
161 vm_flags = (foll_flags & FOLL_WRITE) ?
163 vm_flags &= (foll_flags & FOLL_FORCE) ?
172 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
173 !(vm_flags & vma->vm_flags))
229 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
267 vma->vm_flags |= VM_USERMAP;
665 if (region->vm_flags & VM_MAPPED_COPY) {
719 protect_vma(vma, vma->vm_flags);
1093 unsigned long vm_flags; local
1272 unsigned long capabilities, vm_flags, result; local
[all...]
H A Dvmscan.c762 unsigned long vm_flags; local
765 &vm_flags);
772 if (vm_flags & VM_LOCKED)
800 if (vm_flags & VM_EXEC)
1725 unsigned long vm_flags; local
1776 &vm_flags)) {
1787 if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
H A Dhugetlb.c438 if (vma->vm_flags & VM_MAYSHARE) {
453 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
462 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
478 if (!(vma->vm_flags & VM_MAYSHARE))
485 if (vma->vm_flags & VM_NORESERVE) {
495 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
502 if (vma->vm_flags & VM_MAYSHARE)
1345 if (vma->vm_flags & VM_MAYSHARE)
2558 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2731 vma->vm_flags
3383 hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags) argument
3492 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED; local
[all...]

Completed in 3725 milliseconds