Searched defs:addr (Results 1 - 25 of 42) sorted by relevance

12

/mm/
H A Dpercpu-km.c87 static struct page *pcpu_addr_to_page(void *addr) argument
89 return virt_to_page(addr);
H A Dpagewalk.c6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, argument
12 pte = pte_offset_map(pmd, addr);
14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
17 addr += PAGE_SIZE;
18 if (addr == end)
27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, argument
34 pmd = pmd_offset(pud, addr);
37 next = pmd_addr_end(addr, end);
40 err = walk->pte_hole(addr, nex
72 walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
101 hugetlb_entry_end(struct hstate *h, unsigned long addr, unsigned long end) argument
108 walk_hugetlb_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
131 walk_hugetlb_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct mm_walk *walk) argument
167 walk_page_range(unsigned long addr, unsigned long end, struct mm_walk *walk) argument
[all...]
H A Dvmacache.c58 void vmacache_update(unsigned long addr, struct vm_area_struct *newvma) argument
61 current->vmacache[VMACACHE_HASH(addr)] = newvma;
84 struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr) argument
100 if (vma->vm_start <= addr && vma->vm_end > addr) {
H A Ddebug-pagealloc.c26 void *addr = kmap_atomic(page); local
29 memset(addr, PAGE_POISON, PAGE_SIZE);
30 kunmap_atomic(addr);
77 void *addr; local
82 addr = kmap_atomic(page);
83 check_poison_mem(addr, PAGE_SIZE);
85 kunmap_atomic(addr);
H A Dearly_ioremap.c159 void __init early_iounmap(void __iomem *addr, unsigned long size) argument
169 if (prev_map[i] == addr) {
176 addr, size))
181 addr, size, slot, prev_size[slot]))
185 addr, size, slot);
187 virt_addr = (unsigned long)addr;
235 void __init early_iounmap(void __iomem *addr, unsigned long size) argument
242 void __init early_memunmap(void *addr, unsigned long size) argument
244 early_iounmap((__force void __iomem *)addr, size);
H A Dfremap.c32 unsigned long addr, pte_t *ptep)
39 flush_cache_page(vma, addr, pte_pfn(pte));
40 pte = ptep_clear_flush(vma, addr, ptep);
41 page = vm_normal_page(vma, addr, pte);
64 pte_clear_not_present_full(mm, addr, ptep, 0);
73 unsigned long addr, unsigned long pgoff, pgprot_t prot)
79 pte = get_locked_pte(mm, addr, &ptl);
86 zap_pte(mm, vma, addr, pte);
88 set_pte_at(mm, addr, pte, pte_file_mksoft_dirty(ptfile));
102 int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr, argument
31 zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) argument
72 install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot) argument
226 unsigned long addr; local
[all...]
H A Dmincore.c23 unsigned long addr, unsigned long end,
38 addr & huge_page_mask(h));
43 addr += PAGE_SIZE;
44 if (addr == end)
47 if (!(addr & ~huge_page_mask(h)))
98 unsigned long addr, unsigned long end,
101 unsigned long nr = (end - addr) >> PAGE_SHIFT;
107 pgoff = linear_page_index(vma, addr);
117 unsigned long addr, unsigned long end,
124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr,
22 mincore_hugetlb_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
97 mincore_unmapped_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
116 mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) argument
159 mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned char *vec) argument
184 mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned char *vec) argument
202 mincore_page_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, unsigned char *vec) argument
225 do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec) argument
[all...]
H A Dpercpu-vm.c133 static void __pcpu_unmap_pages(unsigned long addr, int nr_pages) argument
135 unmap_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT);
191 static int __pcpu_map_pages(unsigned long addr, struct page **pages, argument
194 return map_kernel_range_noflush(addr, nr_pages << PAGE_SHIFT,
210 * reverse lookup (addr -> chunk).
346 chunk->base_addr = vms[0]->addr - pcpu_group_offsets[0];
357 static struct page *pcpu_addr_to_page(void *addr) argument
359 return vmalloc_to_page(addr);
H A Dsparse-vmemmap.c101 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) argument
103 pte_t *pte = pte_offset_kernel(pmd, addr);
110 set_pte_at(&init_mm, addr, pte, entry);
115 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) argument
117 pmd_t *pmd = pmd_offset(pud, addr);
127 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) argument
129 pud_t *pud = pud_offset(pgd, addr);
139 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) argument
141 pgd_t *pgd = pgd_offset_k(addr);
154 unsigned long addr local
[all...]
H A Dcma.c281 phys_addr_t addr = 0; local
290 addr = memblock_alloc_range(size, alignment,
295 if (!addr) {
296 addr = memblock_alloc_range(size, alignment, base,
298 if (!addr) {
304 base = addr;
H A Dgup.c657 * @addr: user address
670 struct page *get_dump_page(unsigned long addr) argument
675 if (__get_user_pages(current, current->mm, addr, 1,
679 flush_cache_page(vma, addr, page_to_pfn(page));
723 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, argument
729 ptem = ptep = pte_offset_map(&pmd, addr);
763 } while (ptep++, addr += PAGE_SIZE, addr != end);
782 static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, argument
789 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, argument
836 gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
878 gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
912 gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) argument
944 unsigned long addr, len, end; local
[all...]
H A Dhighmem.c156 unsigned long addr = (unsigned long)vaddr; local
158 if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
159 int i = PKMAP_NR(addr);
163 return virt_to_page(addr);
H A Dmprotect.c40 unsigned long addr, int prot_numa, spinlock_t **ptl)
47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl);
61 unsigned long addr, unsigned long end, pgprot_t newprot,
69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
81 ptent = ptep_modify_prot_start(mm, addr, pte);
93 ptep_modify_prot_commit(mm, addr, pte, ptent);
98 page = vm_normal_page(vma, addr, oldpte);
101 ptep_set_numa(mm, addr, pte);
121 set_pte_at(mm, addr, pt
39 lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, int prot_numa, spinlock_t **ptl) argument
60 change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
133 change_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
190 change_pud_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
210 change_protection_range(struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable, int prot_numa) argument
[all...]
H A Dmremap.c31 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr) argument
37 pgd = pgd_offset(mm, addr);
41 pud = pud_offset(pgd, addr);
45 pmd = pmd_offset(pud, addr);
53 unsigned long addr)
59 pgd = pgd_offset(mm, addr);
60 pud = pud_alloc(mm, pgd, addr);
64 pmd = pmd_alloc(mm, pud, addr);
336 static struct vm_area_struct *vma_to_resize(unsigned long addr, argument
340 struct vm_area_struct *vma = find_vma(mm, addr);
52 alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr) argument
395 mremap_to(unsigned long addr, unsigned long old_len, unsigned long new_addr, unsigned long new_len, bool *locked) argument
[all...]
H A Dnobootmem.c39 u64 addr; local
44 addr = memblock_find_in_range_node(size, align, goal, limit, nid);
45 if (!addr)
48 if (memblock_reserve(addr, size))
51 ptr = phys_to_virt(addr);
63 * @addr: starting address of the range
70 void __init free_bootmem_late(unsigned long addr, unsigned long size) argument
74 kmemleak_free_part(__va(addr), size);
76 cursor = PFN_UP(addr);
77 end = PFN_DOWN(addr
216 free_bootmem(unsigned long addr, unsigned long size) argument
[all...]
H A Dprocess_vm_access.c67 * @addr: start memory address of target process
77 static int process_vm_rw_single_vec(unsigned long addr, argument
85 unsigned long pa = addr & PAGE_MASK;
86 unsigned long start_offset = addr - pa;
95 nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
H A Dswap_state.c305 struct vm_area_struct *vma, unsigned long addr)
325 new_page = alloc_page_vma(gfp_mask, vma, addr);
443 * @addr: target address for mempolicy
445 * Returns the struct page for entry and addr, after queueing swapin.
458 struct vm_area_struct *vma, unsigned long addr)
481 gfp_mask, vma, addr);
492 return read_swap_cache_async(entry, gfp_mask, vma, addr);
304 read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) argument
457 swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr) argument
H A Dutil.c254 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, argument
265 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
274 unsigned long vm_mmap(struct file *file, unsigned long addr, argument
283 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
287 void kvfree(const void *addr) argument
289 if (is_vmalloc_addr(addr))
290 vfree(addr);
292 kfree(addr);
H A Diov_iter.c453 unsigned long addr; local
462 addr = (unsigned long)iov->iov_base + offset;
463 len += *start = addr & (PAGE_SIZE - 1);
466 addr &= ~(PAGE_SIZE - 1);
468 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
481 unsigned long addr; local
491 addr = (unsigned long)iov->iov_base + offset;
492 len += *start = addr & (PAGE_SIZE - 1);
493 addr &= ~(PAGE_SIZE - 1);
502 res = get_user_pages_fast(addr,
520 unsigned long addr = (unsigned long)iov->iov_base + offset; local
553 char *addr = kmap_atomic(page); local
859 copy_to_iter(void *addr, size_t bytes, struct iov_iter *i) argument
868 copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) argument
[all...]
H A Dpage_cgroup.c112 void *addr = NULL; local
114 addr = alloc_pages_exact_nid(nid, size, flags);
115 if (addr) {
116 kmemleak_alloc(addr, size, 1, flags);
117 return addr;
121 addr = vzalloc_node(size, nid);
123 addr = vzalloc(size);
125 return addr;
164 static void free_page_cgroup(void *addr) argument
166 if (is_vmalloc_addr(addr)) {
[all...]
H A Dbootmem.c150 * @addr: starting physical address of the range
407 * @addr: starting physical address of the range
450 * @addr: starting address of the range
458 int __init reserve_bootmem(unsigned long addr, unsigned long size, argument
463 start = PFN_DOWN(addr);
464 end = PFN_UP(addr + size);
H A Dfilemap.c2000 unsigned long addr; local
2047 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
2048 do_set_pte(vma, addr, page, pte, false, false);
H A Dmemblock.c280 phys_addr_t *addr)
285 *addr = __pa(memblock.reserved.regions);
292 phys_addr_t *addr)
297 *addr = __pa(memblock.memory.regions);
326 phys_addr_t old_size, new_size, addr; local
365 addr = new_array ? __pa(new_array) : 0;
371 addr = memblock_find_in_range(new_area_start + new_area_size,
374 if (!addr && new_area_size)
375 addr = memblock_find_in_range(0,
379 new_array = addr
279 get_allocated_memblock_reserved_regions_info( phys_addr_t *addr) argument
291 get_allocated_memblock_memory_regions_info( phys_addr_t *addr) argument
1376 memblock_search(struct memblock_type *type, phys_addr_t addr) argument
1394 memblock_is_reserved(phys_addr_t addr) argument
1399 memblock_is_memory(phys_addr_t addr) argument
[all...]
H A Dmemory-failure.c190 static int kill_proc(struct task_struct *t, unsigned long addr, int trapno, argument
201 si.si_addr = (void *)addr;
287 unsigned long addr; member in struct:to_kill
319 tk->addr = page_address_in_vma(p, vma);
328 if (tk->addr == -EFAULT) {
372 else if (kill_proc(tk->tsk, tk->addr, trapno,
H A Dmigrate.c106 unsigned long addr, void *old)
115 ptep = huge_pte_offset(mm, addr);
120 pmd = mm_find_pmd(mm, addr);
124 ptep = pte_offset_map(pmd, addr);
161 set_pte_at(mm, addr, ptep, pte);
165 hugepage_add_anon_rmap(new, vma, addr);
169 page_add_anon_rmap(new, vma, addr);
174 update_mmu_cache(vma, addr, ptep);
200 unsigned long addr; local
205 addr
105 remove_migration_pte(struct page *new, struct vm_area_struct *vma, unsigned long addr, void *old) argument
1179 unsigned long addr; member in struct:page_to_node
1394 unsigned long addr = (unsigned long)(*pages); local
[all...]

Completed in 252 milliseconds

12