/mm/ |
H A D | pgtable-generic.c | 48 unsigned long address, pte_t *ptep, 53 set_pte_at(vma->vm_mm, address, ptep, entry); 54 flush_tlb_fix_spurious_fault(vma, address); 62 unsigned long address, pmd_t *pmdp, 67 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 69 set_pmd_at(vma->vm_mm, address, pmdp, entry); 70 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 82 unsigned long address, pte_t *ptep) 85 young = ptep_test_and_clear_young(vma, address, pte 47 ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) argument 61 pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) argument 81 ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument 93 pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument 110 ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument 124 pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument 138 pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument 192 pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) argument [all...] |
H A D | rmap.c | 517 * At what user virtual address is page expected in @vma? 529 unsigned long address = __vma_address(page, vma); local 532 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); 534 return address; 538 * At what user virtual address is page expected in vma? 543 unsigned long address; local 559 address = __vma_address(page, vma); 560 if (unlikely(address < vma->vm_start || address > 565 mm_find_pmd(struct mm_struct *mm, unsigned long address) argument 602 __page_check_address(struct page *page, struct mm_struct *mm, unsigned long address, spinlock_t **ptlp, int sync) argument 652 unsigned long address; local 676 page_referenced_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument 818 page_mkclean_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument 896 page_move_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 916 __page_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument 945 __page_check_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 977 page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 988 do_page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument 1026 page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1138 try_to_unmap_one(struct page *page, struct vm_area_struct *vma, unsigned long address, void *arg) argument 1314 unsigned long address; local 1648 unsigned long address = vma_address(page, vma); local 1695 unsigned long address = vma_address(page, vma); local 1736 __hugepage_set_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address, int exclusive) argument 1753 hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1767 hugepage_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) argument [all...] |
H A D | gup.c | 36 unsigned long address, pmd_t *pmd, unsigned int flags) 47 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); 64 migration_entry_wait(mm, pmd, address); 74 page = vm_normal_page(vma, address, pte); 131 * follow_page_mask - look up a page descriptor from a user-virtual address 132 * @vma: vm_area_struct mapping @address 133 * @address: virtual address to look up 144 unsigned long address, unsigned int flags, 156 page = follow_huge_addr(mm, address, flag 35 follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) argument 143 follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) argument 221 get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) argument 270 faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) argument 553 fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags) argument [all...] |
H A D | memory.c | 99 * Randomize the address space (stacks, mmaps, brk, etc.). 214 * users and we're going to destroy the full address space (exit/execve). 485 * of the address space and the top of it (using -1 for the 488 * the address space, but end 0 and ceiling 0 refer to the top 564 pmd_t *pmd, unsigned long address) 567 pgtable_t new = pte_alloc_one(mm, address); 603 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address) argument 605 pte_t *new = pte_alloc_one_kernel(&init_mm, address); 728 * This restricts such mappings to be a linear translation from virtual address 1354 * @tlb: address o 563 __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, unsigned long address) argument 1417 zap_page_range_single(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *details) argument 1445 zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size) argument 1996 do_page_mkwrite(struct vm_area_struct *vma, struct page *page, unsigned long address) argument 2411 do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument 2594 check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) argument 2628 do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) argument 2704 __do_fault(struct vm_area_struct *vma, unsigned long address, pgoff_t pgoff, unsigned int flags, struct page **page) argument 2750 do_set_pte(struct vm_area_struct *vma, unsigned long address, struct page *page, pte_t *pte, bool write, bool anon) argument 2838 do_fault_around(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pgoff_t pgoff, unsigned int flags) argument 2881 do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument 2922 do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument 2971 do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pgoff_t pgoff, unsigned int flags, pte_t orig_pte) argument 3035 do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument 3063 do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags, pte_t orig_pte) argument 3198 handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) argument 3260 __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument 3346 handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument 3388 __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) argument 3411 __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) argument 3436 __follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) argument 3474 follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp) argument 3495 follow_pfn(struct vm_area_struct *vma, unsigned long address, unsigned long *pfn) argument 3515 follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys) argument [all...] |
H A D | huge_memory.c | 92 * @address: the next address inside that to be scanned 99 unsigned long address; member in struct:khugepaged_scan 796 unsigned long address, pmd_t *pmd, 800 unsigned long haddr = address & HPAGE_PMD_MASK; 926 unsigned long address, 939 haddr = address & HPAGE_PMD_MASK; 941 update_mmu_cache_pmd(vma, address, pmd); 980 unsigned long address, 1004 vma, address, page_to_ni 795 do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags) argument 924 huge_pmd_set_accessed(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, int dirty) argument 978 do_huge_pmd_wp_page_fallback(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd, struct page *page, unsigned long haddr) argument 1087 do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, pmd_t orig_pmd) argument 1577 page_check_address_pmd(struct page *page, struct mm_struct *mm, unsigned long address, enum page_check_address_pmd_flag flag, spinlock_t **ptl) argument 1623 __split_huge_page_splitting(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 1775 __split_huge_page_map(struct page *page, struct vm_area_struct *vma, unsigned long address) argument 2144 __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, pte_t *pte) argument 2206 __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl) argument 2321 khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) argument 2390 khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int node) argument 2414 collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, struct vm_area_struct *vma, int node) argument 2545 khugepaged_scan_pmd(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, struct page **hpage) argument 2857 __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) argument 2904 split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, pmd_t *pmd) argument 2914 split_huge_page_address(struct mm_struct *mm, unsigned long address) argument [all...] |
H A D | mmu_notifier.c | 106 * unmap the address and return 1 or 0 depending if the mapping previously 127 unsigned long address) 135 young = mn->ops->test_young(mn, mm, address); 145 void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, argument 154 mn->ops->change_pte(mn, mm, address, pte); 160 unsigned long address) 168 mn->ops->invalidate_page(mn, mm, address); 126 __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address) argument 159 __mmu_notifier_invalidate_page(struct mm_struct *mm, unsigned long address) argument
|
H A D | ksm.c | 112 * @address: the next address inside that to be scanned 120 unsigned long address; member in struct:ksm_scan 152 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 155 * @address: the virtual address this rmap_item tracks (+ flags in low bits) 156 * @oldchecksum: previous checksum of the page at that virtual address 170 unsigned long address; /* + low bits used for flags below */ member in struct:rmap_item 428 unsigned long addr = rmap_item->address; 461 unsigned long addr = rmap_item->address; 1862 ksm_might_need_to_copy(struct page *page, struct vm_area_struct *vma, unsigned long address) argument [all...] |
H A D | hugetlb.c | 323 * Convert the address within this vma to the page offset within 327 struct vm_area_struct *vma, unsigned long address) 329 return ((address - vma->vm_start) >> huge_page_shift(h)) + 334 unsigned long address) 336 return vma_hugecache_offset(hstate_vma(vma), vma, address); 487 * This address is already reserved by other process(chg == 0), 554 unsigned long address, int avoid_reserve, 580 zonelist = huge_zonelist(vma, address, 1183 * address of error page. 2510 unsigned long address, pte_ 326 vma_hugecache_offset(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument 333 linear_hugepage_index(struct vm_area_struct *vma, unsigned long address) argument 552 dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve, long chg) argument 2509 set_huge_ptep_writable(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) argument 2625 unsigned long address; local 2753 unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, unsigned long address) argument 2801 hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t pte, struct page *pagecache_page, spinlock_t *ptl) argument 2921 hugetlbfs_pagecache_page(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument 2937 hugetlbfs_pagecache_present(struct hstate *h, struct vm_area_struct *vma, unsigned long address) argument 2953 hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address, pte_t *ptep, unsigned int flags) argument 3085 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) argument 3110 fault_mutex_hash(struct hstate *h, struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping, pgoff_t idx, unsigned long address) argument 3119 hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags) argument 3335 hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot) argument 3657 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write) argument 3669 follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int write) argument 3684 follow_huge_pud(struct mm_struct *mm, unsigned long address, pud_t *pud, int write) argument [all...] |
H A D | filemap_xip.c | 111 /* address based flush */ ; 170 unsigned long address; local 188 address = vma->vm_start + 190 BUG_ON(address < vma->vm_start || address >= vma->vm_end); 191 pte = page_check_address(page, mm, address, &ptl, 1); 194 flush_cache_page(vma, address, pte_pfn(*pte)); 195 pteval = ptep_clear_flush(vma, address, pte); 201 mmu_notifier_invalidate_page(mm, address);
|
H A D | mmap.c | 804 * throughout; but we cannot insert into address 969 * in front of (at a lower virtual address and file offset than) the vma. 993 * beyond (at a higher virtual address and file offset than) the vma. 1305 /* Obtain the address to map to. we verify (or select) it and ensure 1306 * that it represents a valid section of the address space. 1550 /* Check against address space limit. */ 1596 * specific mapper. the address has already been validated, but 1803 /* Adjust gap address to the desired alignment */ 1900 /* Compute highest gap address at the desired alignment */ 1909 /* Get an address rang 2155 expand_upwards(struct vm_area_struct *vma, unsigned long address) argument 2230 expand_downwards(struct vm_area_struct *vma, unsigned long address) argument 2307 expand_stack(struct vm_area_struct *vma, unsigned long address) argument 2336 expand_stack(struct vm_area_struct *vma, unsigned long address) argument [all...] |
H A D | migrate.c | 268 unsigned long address) 271 pte_t *ptep = pte_offset_map(pmd, address); 687 /* No write method for the address space */ 1208 * field must be set to the virtual address of the page to be moved 1297 * Migrate an array of page address onto an array of nodes and fill 1460 * Move a list of pages in the address space of the currently executing 1771 unsigned long address, 1779 unsigned long mmun_start = address & HPAGE_PMD_MASK; 1860 update_mmu_cache_pmd(vma, address, &entry); 1865 update_mmu_cache_pmd(vma, address, 267 migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) argument 1768 migrate_misplaced_transhuge_page(struct mm_struct *mm, struct vm_area_struct *vma, pmd_t *pmd, pmd_t entry, unsigned long address, struct page *page, int node) argument [all...] |
H A D | internal.h | 106 extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
|
H A D | nommu.c | 193 * get a list of pages in an address range belonging to the specified process 217 * follow_pfn - look up PFN at a user virtual address 219 * @address: user virtual address 226 int follow_pfn(struct vm_area_struct *vma, unsigned long address, argument 232 *pfn = address >> PAGE_SHIFT; 249 * returns only a logical address. 472 * alloc_vm_area - allocate a range of kernel address space 477 * This function reserves a range of kernel address space, and 479 * are created. If the kernel address spac 872 expand_stack(struct vm_area_struct *vma, unsigned long address) argument 1833 follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned int *page_mask) argument [all...] |
H A D | mempolicy.c | 1014 * need migration. Between passing in the full user address 1146 * is in virtual address order. 1151 unsigned long uninitialized_var(address); 1155 address = page_address_in_vma(page, vma); 1156 if (address != -EFAULT) 1163 return alloc_huge_page_noerr(vma, address, 1); 1168 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 1227 * on discontinuous address spaces is okay after all 1623 * @addr: address in @vma for shared policy lookup 1625 * Returns effective policy for a VMA at specified address [all...] |
H A D | filemap.c | 259 * @mapping: address space structure to write 324 * @mapping: address space structure to wait for 328 * Walk the list of under-writeback pages of the given address space 375 * @mapping: address space structure to wait for 377 * Walk the list of under-writeback pages of the given address space 1999 unsigned long address = (unsigned long) vmf->virtual_address; local 2047 addr = address + (page->index - vmf->pgoff) * PAGE_SIZE; 2465 * Copies from kernel address space cannot fail (NFSD is a big user). 2489 * to check that the address is actually valid, when atomic
|
H A D | slab.c | 351 * cachep->size - 1* BYTES_PER_WORD: last caller address 2568 * virtual address for kfree, ksize, and slab debugging. 4099 static void show_symbol(struct seq_file *m, unsigned long address) argument 4105 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) { 4112 seq_printf(m, "%p", (void *)address);
|