Searched refs:addr (Results 1 - 25 of 42) sorted by relevance

12

/mm/
H A Dmincore.c23 unsigned long addr, unsigned long end,
38 addr & huge_page_mask(h));
43 addr += PAGE_SIZE;
44 if (addr == end)
47 if (!(addr & ~huge_page_mask(h)))
98 unsigned long addr, unsigned long end,
101 unsigned long nr = (end - addr) >> PAGE_SHIFT;
107 pgoff = linear_page_index(vma, addr);
117 unsigned long addr, unsigned long end,
124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr,
22 mincore_hugetlb_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
97 mincore_unmapped_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
116 mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) argument
159 mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned char *vec) argument
184 mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned char *vec) argument
202 mincore_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
225 do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) argument
[all...]
H A Dpagewalk.c6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument
12 pte = pte_offset_map(pmd, addr);
14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
17 addr += PAGE_SIZE;
18 if (addr == end)
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, argument
34 pmd = pmd_offset(pud, addr);
37 next = pmd_addr_end(addr, end);
40 err = walk->pte_hole(addr, nex
72 walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
101 hugetlb_entry_end(struct hstate *h, unsigned long addr, unsigned long end) argument
108 walk_hugetlb_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
131 walk_hugetlb_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
167 walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk) argument
[all...]
H A Dvmalloc.c58 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) argument
62 pte = pte_offset_kernel(pmd, addr);
64 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66 } while (pte++, addr += PAGE_SIZE, addr != end);
69 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) argument
74 pmd = pmd_offset(pud, addr);
76 next = pmd_addr_end(addr, end);
79 vunmap_pte_range(pmd, addr, next);
80 } while (pmd++, addr
83 vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) argument
97 vunmap_page_range(unsigned long addr, unsigned long end) argument
112 vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) argument
138 vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) argument
155 vmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) argument
183 unsigned long addr = start; local
217 unsigned long addr = (unsigned long)x; local
229 unsigned long addr = (unsigned long) vmalloc_addr; local
287 __find_vmap_area(unsigned long addr) argument
351 unsigned long addr; local
702 find_vmap_area(unsigned long addr) argument
713 free_unmap_vmap_area_addr(unsigned long addr) argument
788 addr_to_vb_idx(unsigned long addr) argument
909 unsigned long addr = 0; local
963 vb_free(const void *addr, unsigned long size) argument
1064 unsigned long addr = (unsigned long)mem; local
1099 unsigned long addr; local
1167 unsigned long addr; local
1229 map_kernel_range_noflush(unsigned long addr, unsigned long size, pgprot_t prot, struct page **pages) argument
1249 unmap_kernel_range_noflush(unsigned long addr, unsigned long size) argument
1263 unmap_kernel_range(unsigned long addr, unsigned long size) argument
1275 unsigned long addr = (unsigned long)area->addr; local
1391 find_vm_area(const void *addr) argument
1410 remove_vm_area(const void *addr) argument
1432 __vunmap(const void *addr, int deallocate_pages) argument
1487 vfree(const void *addr) argument
1513 vunmap(const void *addr) argument
1637 void *addr; local
1876 aligned_vread(char *buf, char *addr, unsigned long count) argument
1915 aligned_vwrite(char *buf, char *addr, unsigned long count) argument
1978 vread(char *buf, char *addr, unsigned long count) argument
2059 vwrite(char *buf, char *addr, unsigned long count) argument
2178 remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) argument
2196 f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) argument
2322 unsigned long addr; local
2689 unsigned long addr = va->va_start; local
[all...]
H A Ddebug-pagealloc.c26 void *addr = kmap_atomic(page); local
29 memset(addr, PAGE_POISON, PAGE_SIZE);
30 kunmap_atomic(addr);
77 void *addr; local
82 addr = kmap_atomic(page);
83 check_poison_mem(addr, PAGE_SIZE);
85 kunmap_atomic(addr);
H A Dmprotect.c40 unsigned long addr, int prot_numa, spinlock_t **ptl)
47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
61 unsigned long addr, unsigned long end, pgprot_t newprot,
69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
81 ptent = ptep_modify_prot_start(mm, addr, pte);
93 ptep_modify_prot_commit(mm, addr, pte, ptent);
98 page = vm_normal_page(vma, addr, oldpte);
101 ptep_set_numa(mm, addr, pte);
121 set_pte_at(mm, addr, pt
39 lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, int prot_numa, spinlock_t **ptl) argument
60 change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
133 change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
190 change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
210 change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
[all...]
H A Dsparse-vmemmap.c101 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) argument
103 pte_t *pte = pte_offset_kernel(pmd, addr);
110 set_pte_at(&init_mm, addr, pte, entry);
115 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) argument
117 pmd_t *pmd = pmd_offset(pud, addr);
127 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) argument
129 pud_t *pud = pud_offset(pgd, addr);
139 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) argument
141 pgd_t *pgd = pgd_offset_k(addr);
154 unsigned long addr local
[all...]
H A Dfremap.c32 unsigned long addr, pte_t *ptep)
39 flush_cache_page(vma, addr, pte_pfn(pte));
40 pte = ptep_clear_flush(vma, addr, ptep);
41 page = vm_normal_page(vma, addr, pte);
64 pte_clear_not_present_full(mm, addr, ptep, 0);
73 unsigned long addr, unsigned long pgoff, pgprot_t prot)
79 pte = get_locked_pte(mm, addr, &ptl);
86 zap_pte(mm, vma, addr, pte);
88 set_pte_at(mm, addr, pte, pte_file_mksoft_dirty(ptfile));
102 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, argument
31 zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) argument
72 install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot) argument
226 unsigned long addr; local
[all...]
H A Dvmacache.c58 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) argument
61 current->vmacache[VMACACHE_HASH(addr)] = newvma;
84 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) argument
100 if (vma->vm_start <= addr && vma->vm_end > addr) {
H A Dmemory.c393 unsigned long addr)
397 pte_free_tlb(tlb, token, addr);
402 unsigned long addr, unsigned long end,
409 start = addr;
410 pmd = pmd_offset(pud, addr);
412 next = pmd_addr_end(addr, end);
415 free_pte_range(tlb, pmd, addr);
416 } while (pmd++, addr = next, addr != end);
435 unsigned long addr, unsigne
392 free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) argument
401 free_pmd_range(struct mmu_gather *tlb, pud_t *pud, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
434 free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
470 free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling) argument
533 unsigned long addr = vma->vm_start; local
646 print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) argument
750 vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument
805 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) argument
888 copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
951 copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
984 copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
1011 unsigned long addr = vma->vm_start; local
1075 zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, struct zap_details *details) argument
1222 zap_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, struct zap_details *details) argument
1266 zap_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, struct zap_details *details) argument
1285 unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details) argument
1456 __get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) argument
1478 insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot) argument
1540 vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument
1556 insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t prot) argument
1601 vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) argument
1629 vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) argument
1659 remap_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument
1680 remap_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument
1701 remap_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long pfn, pgprot_t prot) argument
1731 remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument
1836 apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) argument
1870 apply_to_pmd_range(struct mm_struct *mm, pud_t *pud, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) argument
1892 apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) argument
1916 apply_to_page_range(struct mm_struct *mm, unsigned long addr, unsigned long size, pte_fn_t fn, void *data) argument
3092 numa_migrate_prep(struct page *page, struct vm_area_struct *vma, unsigned long addr, int page_nid, int *flags) argument
3107 do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t pte, pte_t *ptep, pmd_t *pmd) argument
3543 generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) argument
3570 __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
3640 access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
3651 access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) argument
3731 clear_gigantic_page(struct page *page, unsigned long addr, unsigned int pages_per_huge_page) argument
3745 clear_huge_page(struct page *page, unsigned long addr, unsigned int pages_per_huge_page) argument
3762 copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) argument
3781 copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) argument
[all...]
H A Dgup.c657 * @addr: user address
670 struct page *get_dump_page(unsigned long addr) argument
675 if (__get_user_pages(current, current->mm, addr, 1,
679 flush_cache_page(vma, addr, page_to_pfn(page));
723 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, argument
729 ptem = ptep = pte_offset_map(&pmd, addr);
763 } while (ptep++, addr += PAGE_SIZE, addr != end);
782 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, argument
789 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, argument
836 gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
878 gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
912 gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
944 unsigned long addr, len, end; local
[all...]
H A Dearly_ioremap.c159 void __init early_iounmap(void __iomem *addr, unsigned long size) argument
169 if (prev_map[i] == addr) {
176 addr, size))
181 addr, size, slot, prev_size[slot]))
185 addr, size, slot);
187 virt_addr = (unsigned long)addr;
235 void __init early_iounmap(void __iomem *addr, unsigned long size) argument
242 void __init early_memunmap(void *addr, unsigned long size) argument
244 early_iounmap((__force void __iomem *)addr, size);
H A Dmmap.c53 #define arch_mmap_check(addr, len, flags) (0)
57 #define arch_rebalance_pgtables(addr, len) (addr)
286 static unsigned long do_brk(unsigned long addr, unsigned long len);
552 static int find_vma_links(struct mm_struct *mm, unsigned long addr, argument
567 if (vma_tmp->vm_end > addr) {
587 unsigned long addr, unsigned long end)
593 vma = find_vma_intersection(mm, addr, end);
598 max(addr, vma->vm_start)) >> PAGE_SHIFT;
1014 * Given a mapping request (addr,en
586 count_vma_pages_range(struct mm_struct *mm, unsigned long addr, unsigned long end) argument
1042 vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t pgoff, struct mempolicy *policy, const char __user *anon_name) argument
1266 do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate) argument
1466 unsigned long addr; member in struct:mmap_arg_struct
1541 mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff) argument
1922 arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) argument
1964 unsigned long addr = addr0; local
2009 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) argument
2043 find_vma(struct mm_struct *mm, unsigned long addr) argument
2081 find_vma_prev(struct mm_struct *mm, unsigned long addr, struct vm_area_struct **pprev) argument
2321 find_extend_vma(struct mm_struct *mm, unsigned long addr) argument
2350 find_extend_vma(struct mm_struct *mm, unsigned long addr) argument
2452 __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
2520 split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
2650 do_brk(unsigned long addr, unsigned long len) argument
2731 vm_brk(unsigned long addr, unsigned long len) argument
2837 copy_vma(struct vm_area_struct **vmap, unsigned long addr, unsigned long len, pgoff_t pgoff, bool *need_rmap_locks) argument
2988 __install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_operations_struct *ops, void *priv) argument
3036 _install_special_mapping( struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, const struct vm_special_mapping *spec) argument
3045 install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long vm_flags, struct page **pages) argument
[all...]
H A Dmremap.c31 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) argument
37 pgd = pgd_offset(mm, addr);
41 pud = pud_offset(pgd, addr);
45 pmd = pmd_offset(pud, addr);
53 unsigned long addr)
59 pgd = pgd_offset(mm, addr);
60 pud = pud_alloc(mm, pgd, addr);
64 pmd = pmd_alloc(mm, pud, addr);
336 static struct vm_area_struct *vma_to_resize(unsigned long addr, argument
340 struct vm_area_struct *vma = find_vma(mm, addr);
52 alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) argument
395 mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len, bool *locked) argument
[all...]
H A Dnommu.c239 void vfree(const void *addr) argument
241 kfree(addr);
275 struct page *vmalloc_to_page(const void *addr) argument
277 return virt_to_page(addr);
281 unsigned long vmalloc_to_pfn(const void *addr) argument
283 return page_to_pfn(virt_to_page(addr));
287 long vread(char *buf, char *addr, unsigned long count) argument
293 memcpy(buf, addr, count);
297 long vwrite(char *buf, char *addr, unsigned long count) argument
300 if ((unsigned long) addr
439 vunmap(const void *addr) argument
496 vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) argument
835 find_vma(struct mm_struct *mm, unsigned long addr) argument
863 find_extend_vma(struct mm_struct *mm, unsigned long addr) argument
881 find_vma_exact(struct mm_struct *mm, unsigned long addr, unsigned long len) argument
913 validate_mmap_request(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *_capabilities) argument
1261 do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate) argument
1520 unsigned long addr; member in struct:mmap_arg_struct
1546 split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) argument
1729 vm_munmap(unsigned long addr, size_t len) argument
1770 vm_brk(unsigned long addr, unsigned long len) argument
1785 do_mremap(unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, unsigned long new_addr) argument
1841 remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t prot) argument
1862 remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) argument
1877 arch_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) argument
1997 generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, unsigned long size, pgoff_t pgoff) argument
2005 __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
2047 access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, int write) argument
2057 access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) argument
[all...]
H A Dnobootmem.c39 u64 addr; local
44 addr = memblock_find_in_range_node(size, align, goal, limit, nid);
45 if (!addr)
48 if (memblock_reserve(addr, size))
51 ptr = phys_to_virt(addr);
63 * @addr: starting address of the range
70 void __init free_bootmem_late(unsigned long addr, unsigned long size) argument
74 kmemleak_free_part(__va(addr), size);
76 cursor = PFN_UP(addr);
77 end = PFN_DOWN(addr
216 free_bootmem(unsigned long addr, unsigned long size) argument
[all...]
H A Dpercpu-km.c87 static struct page *pcpu_addr_to_page(void *addr) argument
89 return virt_to_page(addr);
H A Dpage_cgroup.c112 void *addr = NULL; local
114 addr = alloc_pages_exact_nid(nid, size, flags);
115 if (addr) {
116 kmemleak_alloc(addr, size, 1, flags);
117 return addr;
121 addr = vzalloc_node(size, nid);
123 addr = vzalloc(size);
125 return addr;
164 static void free_page_cgroup(void *addr) argument
166 if (is_vmalloc_addr(addr)) {
[all...]
H A Dpercpu-vm.c133 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) argument
135 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
191 static int __pcpu_map_pages(unsigned long addr, struct page **pages, argument
194 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
210 * reverse lookup (addr -> chunk).
346 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
357 static struct page *pcpu_addr_to_page(void *addr) argument
359 return vmalloc_to_page(addr);
H A Dksm.c355 * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1)
359 * in case the application has unmapped and remapped mm,addr meanwhile.
363 static int break_ksm(struct vm_area_struct *vma, unsigned long addr) argument
370 page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION);
374 ret = handle_mm_fault(vma->vm_mm, vma, addr,
412 unsigned long addr)
417 vma = find_vma(mm, addr);
418 if (!vma || vma->vm_start > addr)
428 unsigned long addr = rmap_item->address; local
438 vma = find_mergeable_vma(mm, addr);
411 find_mergeable_vma(struct mm_struct *mm, unsigned long addr) argument
461 unsigned long addr = rmap_item->address; local
684 unsigned long addr; local
831 void *addr = kmap_atomic(page); local
859 unsigned long addr; local
936 unsigned long addr; local
1507 get_next_rmap_item(struct mm_slot *mm_slot, struct rmap_item **rmap_list, unsigned long addr) argument
[all...]
H A Dutil.c254 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, argument
265 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
274 unsigned long vm_mmap(struct file *file, unsigned long addr, argument
283 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
287 void kvfree(const void *addr) argument
289 if (is_vmalloc_addr(addr))
290 vfree(addr);
292 kfree(addr);
H A Dmempolicy.c485 unsigned long addr, unsigned long end,
493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
500 page = vm_normal_page(vma, addr, *pte);
517 } while (pte++, addr += PAGE_SIZE, addr != end);
519 return addr != end;
552 unsigned long addr, unsigned long end,
559 pmd = pmd_offset(pud, addr);
561 next = pmd_addr_end(addr, end);
569 split_huge_page_pmd(vma, addr, pm
484 queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
551 queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
579 queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
601 queue_pages_pgd_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, const nodemask_t *nodes, unsigned long flags, void *private) argument
631 change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
643 change_prot_numa(struct vm_area_struct *vma, unsigned long addr, unsigned long end) argument
866 lookup_node(struct mm_struct *mm, unsigned long addr) argument
880 do_get_mempolicy(int *policy, nodemask_t *nmask, unsigned long addr, unsigned long flags) argument
1595 __get_vma_policy(struct vm_area_struct *vma, unsigned long addr) argument
1632 get_vma_policy(struct vm_area_struct *vma, unsigned long addr) argument
1808 interleave_nid(struct mempolicy *pol, struct vm_area_struct *vma, unsigned long addr, int shift) argument
1860 huge_zonelist(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol, nodemask_t **nodemask) argument
2013 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, int node) argument
2274 mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr) argument
[all...]
H A Dpercpu.c85 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
87 #define __addr_to_pcpu_ptr(addr) \
88 (void __percpu *)((unsigned long)(addr) - \
100 #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
190 static bool pcpu_addr_in_first_chunk(void *addr) argument
194 return addr >= first_start && addr < first_start + pcpu_unit_size;
197 static bool pcpu_addr_in_reserved_chunk(void *addr) argument
201 return addr >
839 pcpu_chunk_addr_search(void *addr) argument
1233 void *addr; local
1281 is_kernel_percpu_address(unsigned long addr) argument
1322 per_cpu_ptr_to_phys(void *addr) argument
[all...]
H A Dhighmem.c156 unsigned long addr = (unsigned long)vaddr; local
158 if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
159 int i = PKMAP_NR(addr);
163 return virt_to_page(addr);
H A Dcma.c281 phys_addr_t addr = 0; local
290 addr = memblock_alloc_range(size, alignment,
295 if (!addr) {
296 addr = memblock_alloc_range(size, alignment, base,
298 if (!addr) {
304 base = addr;
H A Dzsmalloc.c765 area->vm_addr = area->vm->addr;
772 unsigned long addr = (unsigned long)area->vm_addr; local
774 unmap_kernel_range(addr, PAGE_SIZE * 2);
804 void *addr; local
818 addr = kmap_atomic(pages[0]);
819 memcpy(buf, addr + off, sizes[0]);
820 kunmap_atomic(addr);
821 addr = kmap_atomic(pages[1]);
822 memcpy(buf + sizes[0], addr, sizes[1]);
823 kunmap_atomic(addr);
832 void *addr; local
[all...]

Completed in 2798 milliseconds

12