Lines Matching defs:addr

50 #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
217 int (*access)(struct vm_area_struct *vma, unsigned long addr,
231 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
233 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
235 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
240 unsigned long addr);
293 struct page *vmalloc_to_page(const void *addr);
294 unsigned long vmalloc_to_pfn(const void *addr);
305 unsigned long addr = (unsigned long)x;
307 return addr >= VMALLOC_START && addr < VMALLOC_END;
893 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
933 int walk_page_range(unsigned long addr, unsigned long end,
935 void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
945 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
988 extern int make_pages_present(unsigned long addr, unsigned long end);
989 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
990 extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1002 struct page *get_dump_page(unsigned long addr);
1018 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1020 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1024 unsigned long addr)
1027 (vma->vm_start == addr) &&
1028 !vma_growsdown(vma->vm_prev, addr);
1032 static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1034 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1038 unsigned long addr)
1041 (vma->vm_end == addr) &&
1042 !vma_growsup(vma->vm_next, addr);
1051 extern unsigned long do_mremap(unsigned long addr,
1145 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1147 static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1151 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1367 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1372 struct vm_area_struct *, unsigned long addr, int new_below);
1378 unsigned long addr, unsigned long len, pgoff_t pgoff);
1392 unsigned long addr, unsigned long len,
1397 extern unsigned long mmap_region(struct file *file, unsigned long addr,
1461 /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1462 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1463 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1503 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1504 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1506 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1507 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1509 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1525 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1554 int in_gate_area_no_mm(unsigned long addr);
1555 int in_gate_area(struct mm_struct *mm, unsigned long addr);
1557 int in_gate_area_no_mm(unsigned long addr);
1558 #define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1583 pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
1584 pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
1585 pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
1586 pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
1613 unsigned long addr,
1616 unsigned long addr, struct vm_area_struct *vma,