Searched refs:vma (Results 1 - 25 of 332) sorted by relevance

1234567891011>>

/arch/sparc/include/asm/
H A Dtlb_32.h4 #define tlb_start_vma(tlb, vma) \
6 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
9 #define tlb_end_vma(tlb, vma) \
11 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
/arch/avr32/include/asm/
H A Dtlb.h11 #define tlb_start_vma(tlb, vma) \
12 flush_cache_range(vma, vma->vm_start, vma->vm_end)
14 #define tlb_end_vma(tlb, vma) \
15 flush_tlb_range(vma, vma->vm_start, vma->vm_end)
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, argument
11 vma->vm_page_prot = __pgprot((pgprot_val(vma->vm_page_prot)
/arch/ia64/include/asm/
H A Dfb.h9 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, argument
12 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
13 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
15 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/arch/xtensa/include/asm/
H A Dtlb.h21 # define tlb_start_vma(tlb,vma) do { } while (0)
22 # define tlb_end_vma(tlb,vma) do { } while (0)
26 # define tlb_start_vma(tlb, vma) \
29 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
32 # define tlb_end_vma(tlb, vma) \
35 flush_tlb_range(vma, vma->vm_start, vma
[all...]
/arch/mips/include/asm/
H A Dtlb.h5 * MIPS doesn't need any special per-pte or per-vma handling, except
8 #define tlb_start_vma(tlb, vma) \
11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
13 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, argument
11 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
H A Dtlbflush.h11 * - flush_tlb_page(vma, vmaddr) flushes one page
12 * - flush_tlb_range(vma, start, end) flushes a range of pages
17 extern void local_flush_tlb_range(struct vm_area_struct *vma,
21 extern void local_flush_tlb_page(struct vm_area_struct *vma,
29 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long,
39 #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
42 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
/arch/powerpc/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, argument
11 vma->vm_page_prot = phys_mem_access_prot(file, off >> PAGE_SHIFT,
12 vma->vm_end - vma->vm_start,
13 vma->vm_page_prot);
/arch/x86/um/
H A Dmem_64.c5 const char *arch_vma_name(struct vm_area_struct *vma) argument
7 if (vma->vm_mm && vma->vm_start == um_vdso_addr)
/arch/parisc/include/asm/
H A Dtlb.h9 #define tlb_start_vma(tlb, vma) \
11 flush_cache_range(vma, vma->vm_start, vma->vm_end); \
14 #define tlb_end_vma(tlb, vma) \
16 flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
/arch/m68k/include/asm/
H A Dfb.h11 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, argument
14 pgprot_val(vma->vm_page_prot) |= SUN3_PAGE_NOCACHE;
17 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, argument
21 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE030;
23 pgprot_val(vma->vm_page_prot) &= _CACHEMASK040;
25 pgprot_val(vma->vm_page_prot) |= _PAGE_NOCACHE_S;
H A Dtlb.h6 * per-vma handling..
8 #define tlb_start_vma(tlb, vma) do { } while (0)
9 #define tlb_end_vma(tlb, vma) do { } while (0)
/arch/blackfin/include/asm/
H A Dtlb.h10 #define tlb_start_vma(tlb, vma) do { } while (0)
11 #define tlb_end_vma(tlb, vma) do { } while (0)
/arch/m32r/include/asm/
H A Dtlb.h6 * per-vma handling..
8 #define tlb_start_vma(tlb, vma) do { } while (0)
9 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, argument
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
H A Dtlbflush.h12 * - flush_tlb_page(vma, vmaddr) flushes one page
13 * - flush_tlb_range(vma, start, end) flushes a range of pages
27 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
28 #define flush_tlb_range(vma, start, end) \
29 local_flush_tlb_range(vma, start, end)
34 #define flush_tlb_page(vma, vmaddr) do { } while (0)
35 #define flush_tlb_range(vma, start, end) do { } while (0)
46 #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, pag
[all...]
/arch/sh/include/asm/
H A Dtlbflush.h9 * - flush_tlb_page(vma, vmaddr) flushes one page
10 * - flush_tlb_range(vma, start, end) flushes a range of pages
15 extern void local_flush_tlb_range(struct vm_area_struct *vma,
18 extern void local_flush_tlb_page(struct vm_area_struct *vma,
30 extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
32 extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
40 #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
43 #define flush_tlb_range(vma, start, end) \
44 local_flush_tlb_range(vma, star
[all...]
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, argument
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
/arch/tile/include/asm/
H A Dtlb.h18 #define tlb_start_vma(tlb, vma) do { } while (0)
19 #define tlb_end_vma(tlb, vma) do { } while (0)
H A Dtlbflush.h41 static inline unsigned long hv_page_size(const struct vm_area_struct *vma) argument
43 return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE;
46 /* Pass as vma pointer for non-executable mapping, if no vma available. */
50 static inline void local_flush_tlb_page(const struct vm_area_struct *vma, argument
58 if (!vma || (vma != FLUSH_NONEXEC && (vma->vm_flags & VM_EXEC)))
63 static inline void local_flush_tlb_pages(const struct vm_area_struct *vma, argument
72 if (!vma || (vm
[all...]
/arch/x86/include/asm/
H A Dtlb.h4 #define tlb_start_vma(tlb, vma) do { } while (0)
5 #define tlb_end_vma(tlb, vma) do { } while (0)
/arch/arm/include/asm/
H A Dfb.h8 static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma, argument
11 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
/arch/alpha/include/asm/
H A Dcacheflush.h10 #define flush_cache_range(vma, start, end) do { } while (0)
11 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
50 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, argument
53 if (vma->vm_flags & VM_EXEC) {
54 struct mm_struct *mm = vma->vm_mm;
62 extern void flush_icache_user_range(struct vm_area_struct *vma,
67 #define flush_icache_page(vma, page) \
68 flush_icache_user_range((vma), (page), 0, 0)
70 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
72 flush_icache_user_range(vma, pag
[all...]
/arch/frv/mm/
H A Delf-fdpic.c62 struct vm_area_struct *vma; local
75 vma = find_vma(current->mm, addr);
77 (!vma || addr + len <= vma->vm_start))
88 vma = find_vma(current->mm, PAGE_SIZE);
89 for (; vma; vma = vma->vm_next) {
92 if (addr + len <= vma->vm_start)
94 addr = vma
[all...]

Completed in 654 milliseconds

1234567891011>>