Searched defs:pfn (Results 26 - 50 of 140) sorted by relevance

123456

/arch/microblaze/mm/
H A Dconsistent.c187 unsigned long pfn; local
194 pfn = pte_pfn(*ptep);
196 if (pfn_valid(pfn)) {
197 page = pfn_to_page(pfn);
/arch/mips/mm/
H A Dioremap.c23 unsigned long pfn; local
32 pfn = phys_addr >> PAGE_SHIFT;
38 set_pte(pte, pfn_pte(pfn, pgprot));
40 pfn++;
H A Dc-octeon.c162 * @pfn:
165 unsigned long page, unsigned long pfn)
164 octeon_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn) argument
H A Dcache.c31 unsigned long pfn);
125 unsigned long pfn, addr; local
128 pfn = pte_pfn(pte);
129 if (unlikely(!pfn_valid(pfn)))
131 page = pfn_to_page(pfn);
/arch/mn10300/mm/
H A Dpgtable.c31 * and protection flags for that frame. pfn is for the base of the page,
35 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) argument
45 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
46 printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
56 set_pmd(pmd, pfn_pmd(pfn, flags));
/arch/powerpc/kernel/
H A Dcrash_dump.c96 * @pfn: page frame number to be copied
100 * @offset: offset in bytes into the page (based on pfn) to begin the copy
107 ssize_t copy_oldmem_page(unsigned long pfn, char *buf, argument
117 if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
118 vaddr = __va(pfn << PAGE_SHIFT);
121 vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
/arch/powerpc/mm/
H A Dpgtable.c56 unsigned long pfn = pte_pfn(pte); local
59 if (unlikely(!pfn_valid(pfn)))
61 page = pfn_to_page(pfn);
/arch/s390/kernel/
H A Dsuspend.c9 #include <linux/pfn.h>
98 void page_key_read(unsigned long *pfn) argument
102 addr = (unsigned long) page_address(pfn_to_page(*pfn));
103 *(unsigned char *) pfn = (unsigned char) page_get_storage_key(addr);
110 void page_key_memorize(unsigned long *pfn) argument
112 page_key_wp->data[page_key_wx] = *(unsigned char *) pfn;
113 *(unsigned char *) pfn = 0;
136 int pfn_is_nosave(unsigned long pfn) argument
142 if (pfn <= LC_PAGES)
144 if (pfn >
[all...]
/arch/score/mm/
H A Dinit.c114 unsigned long pfn; local
116 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
117 struct page *page = pfn_to_page(pfn);
118 void *addr = phys_to_virt(PFN_PHYS(pfn));
H A Dcache.c81 unsigned long pfn, addr; local
84 pfn = pte_pfn(pte);
85 if (unlikely(!pfn_valid(pfn)))
87 page = pfn_to_page(pfn);
211 unsigned long addr, unsigned long pfn)
214 unsigned long kaddr = 0xa0000000 | (pfn << PAGE_SHIFT);
210 flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) argument
/arch/sh/mm/
H A Dcache-sh7705.c167 unsigned long pfn = data->addr2; local
169 __flush_dcache_page(pfn << PAGE_SHIFT);
H A Dmmap.c251 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) argument
/arch/unicore32/kernel/
H A Dhibernate.c74 unsigned long pfn; local
82 pfn = 0;
89 if (pfn >= max_low_pfn)
95 if (pfn >= max_low_pfn)
106 for (; pte < max_pte; pte++, pfn++) {
107 if (pfn >= max_low_pfn)
110 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
141 * pfn_is_nosave - check if given pfn is in the 'nosave' section
144 int pfn_is_nosave(unsigned long pfn) argument
149 return (pfn >
[all...]
/arch/unicore32/mm/
H A Dflush.c31 unsigned long pfn)
30 flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) argument
/arch/x86/mm/
H A Dhighmem_32.c64 void *kmap_atomic_pfn(unsigned long pfn) argument
66 return kmap_atomic_prot_pfn(pfn, kmap_prot);
/arch/x86/power/
H A Dhibernate_32.c85 unsigned long pfn; local
93 pfn = 0;
100 if (pfn >= max_low_pfn)
104 if (pfn >= max_low_pfn)
112 set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
113 pfn += PTRS_PER_PTE;
122 for (; pte < max_pte; pte++, pfn++) {
123 if (pfn >= max_low_pfn)
126 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
168 * pfn_is_nosave - check if given pfn i
171 pfn_is_nosave(unsigned long pfn) argument
[all...]
/arch/xtensa/mm/
H A Dcache.c136 unsigned long pfn)
140 unsigned long phys = page_to_phys(pfn_to_page(pfn));
152 unsigned long pfn = pte_pfn(*ptep); local
155 if (!pfn_valid(pfn))
158 page = pfn_to_page(pfn);
135 flush_cache_page(struct vm_area_struct* vma, unsigned long address, unsigned long pfn) argument
H A Dinit.c102 unsigned long pfn; local
110 pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
111 if (pfn < min_low_pfn)
112 min_low_pfn = pfn;
113 pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
114 if (pfn > max_pfn)
115 max_pfn = pfn;
/arch/arm/mm/
H A Dfault-armv.c41 unsigned long pfn, pte_t *ptep)
56 flush_cache_page(vma, address, pfn);
57 outer_flush_range((pfn << PAGE_SHIFT),
58 (pfn << PAGE_SHIFT) + PAGE_SIZE);
93 unsigned long pfn)
123 ret = do_adjust_pte(vma, address, pfn, pte);
133 unsigned long addr, pte_t *ptep, unsigned long pfn)
161 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
165 do_adjust_pte(vma, addr, pfn, ptep);
184 unsigned long pfn local
40 do_adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn, pte_t *ptep) argument
92 adjust_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn) argument
132 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, unsigned long pfn) argument
[all...]
H A Dflush.c25 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) argument
30 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
39 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) argument
45 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
85 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) argument
88 vivt_flush_cache_page(vma, user_addr, pfn);
93 flush_pfn_alias(pfn, user_addr);
102 #define flush_pfn_alias(pfn,vaddr) do { } while (0)
103 #define flush_icache_alias(pfn,vaddr,len) do { } while (0)
230 unsigned long pfn; local
314 unsigned long pfn; local
[all...]
H A Dioremap.c121 remap_area_sections(unsigned long virt, unsigned long pfn, argument
139 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
140 pfn += SZ_1M >> PAGE_SHIFT;
141 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
142 pfn += SZ_1M >> PAGE_SHIFT;
153 remap_area_supersections(unsigned long virt, unsigned long pfn, argument
173 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
175 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
186 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
193 void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, argument
280 unsigned long pfn = __phys_to_pfn(phys_addr); local
303 __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype) argument
[all...]
H A Dmmap.c297 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) argument
299 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
313 int devmem_is_allowed(unsigned long pfn) argument
315 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
317 if (!page_is_ram(pfn))
H A Dnommu.c69 void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, argument
72 if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
74 return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
78 void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, argument
81 return __arm_ioremap_pfn(pfn, offset, size, mtype);
/arch/openrisc/mm/
H A Dinit.c198 int reservedpages, pfn; local
204 for (pfn = 0; pfn < max_low_pfn; pfn++) {
208 if (PageReserved(mem_map + pfn))
/arch/powerpc/include/asm/
H A Dkvm_e500.h29 pfn_t pfn; member in struct:tlbe_ref

Completed in 1835 milliseconds

123456