/mm/ |
H A D | fremap.c | 28 pte_t pte = *ptep; local 30 if (pte_present(pte)) { 33 flush_cache_page(vma, addr, pte_pfn(pte)); 34 pte = ptep_clear_flush(vma, addr, ptep); 35 page = vm_normal_page(vma, addr, pte); 37 if (pte_dirty(pte)) 45 if (!pte_file(pte)) 46 free_swap_and_cache(pte_to_swp_entry(pte)); 52 * Install a file pte to a given virtual memory address, release any 59 pte_t *pte; local [all...] |
H A D | pagewalk.c | 9 pte_t *pte; local 12 pte = pte_offset_map(pmd, addr); 14 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); 20 pte++; 23 pte_unmap(pte); 115 pte_t *pte; local 120 pte = huge_pte_offset(walk->mm, addr & hmask); 121 if (pte && walk->hugetlb_entry) 122 err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
|
H A D | pgtable-generic.c | 88 pte_t pte; local 89 pte = ptep_get_and_clear((vma)->vm_mm, address, ptep); 91 return pte;
|
H A D | mmu_notifier.c | 124 pte_t pte) 132 mn->ops->change_pte(mn, mm, address, pte); 123 __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, pte_t pte) argument
|
H A D | mincore.c | 69 * file will not get a swp_entry_t in its pte, but rather it is like 118 pte_t pte = *ptep; local 122 if (pte_none(pte)) 124 else if (pte_present(pte)) 126 else if (pte_file(pte)) { 127 pgoff = pte_to_pgoff(pte); 129 } else { /* pte is a swap entry */ 130 swp_entry_t entry = pte_to_swp_entry(pte);
|
H A D | mprotect.c | 42 pte_t *pte, oldpte; local 45 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 48 oldpte = *pte; 52 ptent = ptep_modify_prot_start(mm, addr, pte); 62 ptep_modify_prot_commit(mm, addr, pte, ptent); 72 set_pte_at(mm, addr, pte, 76 } while (pte++, addr += PAGE_SIZE, addr != end); 78 pte_unmap_unlock(pte - 1, ptl);
|
H A D | filemap_xip.c | 172 pte_t *pte; local 192 pte = page_check_address(page, mm, address, &ptl, 1); 193 if (pte) { 195 flush_cache_page(vma, address, pte_pfn(*pte)); 196 pteval = ptep_clear_flush_notify(vma, address, pte); 200 pte_unmap_unlock(pte, ptl);
|
H A D | mremap.c | 78 pte_t *old_pte, *new_pte, pte; local 94 * pte locks because exclusive mmap_sem prevents deadlock. 107 pte = ptep_get_and_clear(mm, old_addr, old_pte); 108 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); 109 set_pte_at(mm, new_addr, new_pte, pte);
|
H A D | sparse-vmemmap.c | 87 void __meminit vmemmap_verify(pte_t *pte, int node, argument 90 unsigned long pfn = pte_pfn(*pte); 100 pte_t *pte = pte_offset_kernel(pmd, addr); local 101 if (pte_none(*pte)) { 107 set_pte_at(&init_mm, addr, pte, entry); 109 return pte; 156 pte_t *pte; local 168 pte = vmemmap_pte_populate(pmd, addr, node); 169 if (!pte) 171 vmemmap_verify(pte, nod [all...] |
H A D | migrate.c | 86 * Restore a potential migration pte to a working pte entry 96 pte_t *ptep, pte; local 130 pte = *ptep; 131 if (!is_swap_pte(pte)) 134 entry = pte_to_swp_entry(pte); 141 pte = pte_mkold(mk_pte(new, vma->vm_page_prot)); 143 pte = pte_mkwrite(pte); 146 pte 186 pte_t *ptep, pte; local [all...] |
H A D | rmap.c | 42 * pte map lock 613 * the page table lock when the pte is not present (helpful when reclaiming 616 * On success returns with pte mapped and locked. 624 pte_t *pte; local 628 pte = huge_pte_offset(mm, address); 647 pte = pte_offset_map(pmd, address); 649 if (!sync && !pte_present(*pte)) { 650 pte_unmap(pte); 657 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 677 pte_t *pte; local 729 pte_t *pte; local 929 pte_t *pte; local 1218 pte_t *pte; local 1368 pte_t *pte; local [all...] |
H A D | huge_memory.c | 41 /* default scan 8*512 pte (or vmas) every 30 second */ 53 * default collapse hugepages if there is at least one pte mapped like 708 pte_t *pte; local 746 pte = pte_offset_map(pmd, address); 747 return handle_pte_fault(mm, vma, address, pte, pmd, flags); 873 /* leave pmd empty until pte is filled */ 879 pte_t *pte, entry; local 883 pte = pte_offset_map(&_pmd, haddr); 884 VM_BUG_ON(!pte_none(*pte)); 885 set_pte_at(mm, haddr, pte, entr 1362 pte_t *pte, entry; local 1707 release_pte_pages(pte_t *pte, pte_t *_pte) argument 1716 release_all_pte_pages(pte_t *pte) argument 1721 __collapse_huge_page_isolate(struct vm_area_struct *vma, unsigned long address, pte_t *pte) argument 1794 __collapse_huge_page_copy(pte_t *pte, struct page *page, struct vm_area_struct *vma, unsigned long address, spinlock_t *ptl) argument 1843 pte_t *pte; local 2018 pte_t *pte, *_pte; local [all...] |
H A D | memory.c | 604 * Ensure all pte setup (eg. pte page lock and page clearing) are 605 * visible before the pte is made visible to other CPUs by being 671 * This function is called to print an error when a bad pte 672 * is found. For example, we might have a PFN-mapped pte in 678 pte_t pte, struct page *page) 713 "BUG: Bad page map in process %s pte:%08llx pmd:%08llx\n", 715 (long long)pte_val(pte), (long long)pmd_val(*pmd)); 754 * vm_normal_page -- This function gets the "struct page" associated with a pte. 761 * pte bi 677 print_bad_pte(struct vm_area_struct *vma, unsigned long addr, pte_t pte, struct page *page) argument 800 vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) argument 860 pte_t pte = *src_pte; local 1126 pte_t *pte; local 1449 pte_t *ptep, pte; local 1675 pte_t *pte; local 2008 pte_t *pte; local 2077 pte_t *pte, entry; local 2182 pte_t *pte; local 2319 pte_t *pte; local 2878 pte_t pte; local 3405 handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, unsigned int flags) argument 3467 pte_t *pte; local 3730 pte_t *ptep, pte; local [all...] |
H A D | mempolicy.c | 470 pte_t *pte; local 473 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 478 if (!pte_present(*pte)) 480 page = vm_normal_page(vma, addr, *pte); 498 } while (pte++, addr += PAGE_SIZE, addr != end);
|
H A D | swapfile.c | 852 pte_t *pte; local 861 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 862 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 872 set_pte_at(vma->vm_mm, addr, pte, 883 pte_unmap_unlock(pte, ptl); 893 pte_t *pte; local 897 * We don't actually need pte lock while scanning for swp_pte: since 902 * recheck under pte lock. Scanning without pte lock lets it be 905 pte [all...] |
H A D | vmalloc.c | 38 pte_t *pte; local 40 pte = pte_offset_kernel(pmd, addr); 42 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); 44 } while (pte++, addr += PAGE_SIZE, addr != end); 93 pte_t *pte; local 100 pte = pte_alloc_kernel(pmd, addr); 101 if (!pte) 106 if (WARN_ON(!pte_none(*pte))) 110 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); 112 } while (pte 222 pte_t *ptep, pte; local 2169 f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) argument [all...] |
H A D | hugetlb.c | 2185 static int is_hugetlb_entry_migration(pte_t pte) argument 2189 if (huge_pte_none(pte) || pte_present(pte)) 2191 swp = pte_to_swp_entry(pte); 2198 static int is_hugetlb_entry_hwpoisoned(pte_t pte) argument 2202 if (huge_pte_none(pte) || pte_present(pte)) 2204 swp = pte_to_swp_entry(pte); 2217 pte_t pte; local 2250 pte 2355 hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t pte, struct page *pagecache_page) argument 2762 pte_t *pte; local 2839 pte_t pte; local [all...] |
H A D | memcontrol.c | 2744 * the pte, and even removed page from swap cache: in those cases 5043 * is_target_pte_for_mc - check a pte whether it is valid for move charge 5044 * @vma: the vma the pte to be checked belongs 5045 * @addr: the address corresponding to the pte to be checked 5046 * @ptent: the pte to be checked 5050 * 0(MC_TARGET_NONE): if the pte is not a target for move charge. 5051 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for 5054 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a 5058 * Called with pte lock held. 5194 pte_t *pte; local 5356 pte_t *pte; local [all...] |