Searched defs:pfn (Results 126 - 140 of 140) sorted by relevance

123456

/arch/parisc/include/asm/
H A Dpgtable.h362 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) argument
365 pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot);
499 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
500 remap_pfn_range(vma, vaddr, pfn, size, prot)
/arch/powerpc/mm/
H A Dnuma.c22 #include <linux/pfn.h>
130 * get_node_active_region - Return active region containing pfn
132 * @pfn: The page to return the region for
133 * @node_ar: Returned set to the active region containing @pfn
135 static void __init get_node_active_region(unsigned long pfn, argument
142 if (pfn >= start_pfn && pfn < end_pfn) {
/arch/powerpc/platforms/cell/spufs/
H A Dfile.c241 unsigned long pfn, offset; local
272 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
275 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
277 vm_insert_pfn(vma, address, pfn);
/arch/s390/include/asm/
H A Dpgtable.h61 static inline int is_zero_pfn(unsigned long pfn) argument
64 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
1152 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
/arch/sparc/include/asm/
H A Dpgtable_64.h231 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) argument
233 unsigned long paddr = pfn << PAGE_SHIFT;
760 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
763 #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
764 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
765 #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
771 unsigned long from, unsigned long pfn,
770 io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot) argument
[all...]
/arch/tile/kernel/
H A Dsetup.c585 unsigned long pfn = kaddr_to_pfn(kva); local
586 BUG_ON(goal && PFN_PHYS(pfn) != goal);
587 return pfn;
1324 unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid]; local
1331 percpu_pfn[cpu] = pfn;
1332 return pfn_to_kaddr(pfn);
1375 unsigned long delta, pfn, lowmem_va; local
1392 pfn = percpu_pfn[cpu];
1395 pg = pfn_to_page(pfn);
1396 for (i = 0; i < size; i += PAGE_SIZE, ++pfn,
[all...]
/arch/x86/include/asm/
H A Dparavirt.h415 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) argument
417 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
419 static inline void paravirt_release_pte(unsigned long pfn) argument
421 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
424 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) argument
426 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
429 static inline void paravirt_release_pmd(unsigned long pfn) argument
431 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
434 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) argument
436 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
438 paravirt_release_pud(unsigned long pfn) argument
[all...]
/arch/x86/kernel/cpu/mcheck/
H A Dmce.c477 static int mce_ring_get(unsigned long *pfn) argument
482 *pfn = 0;
487 *pfn = r->ring[r->start];
496 static int mce_ring_add(unsigned long pfn) argument
504 r->ring[r->end] = pfn;
1160 int memory_failure(unsigned long pfn, int vector, int flags) argument
1165 "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", pfn);
1181 unsigned long pfn; local
1186 pfn = mi->paddr >> PAGE_SHIFT;
1197 if (memory_failure(pfn, MCE_VECTO
1212 unsigned long pfn; local
[all...]
/arch/x86/xen/
H A Denlighten.c398 unsigned long pfn; local
404 pfn = pte_pfn(*ptep);
405 page = pfn_to_page(pfn);
407 pte = pfn_pte(pfn, prot);
413 void *av = __va(PFN_PHYS(pfn));
476 unsigned long pfn, mfn; local
489 pfn = pte_pfn(*ptep);
490 mfn = pfn_to_mfn(pfn);
491 virt = __va(PFN_PHYS(pfn));
524 unsigned long pfn, mf local
1432 u64 pfn; local
[all...]
H A Dmmu.c6 * domain's pfn and the overall machine mfns.
14 * notion of a "physical" pfn - which is just a domain-local linear
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
22 * the mfn back into a pfn.
356 unsigned long pfn = mfn_to_pfn(mfn); local
359 if (unlikely(pfn == ~0))
362 val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
371 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; local
376 mfn = get_phys_to_machine(pfn);
378 mfn = pfn;
409 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; local
799 xen_do_pin(unsigned level, unsigned long pfn) argument
823 unsigned long pfn = page_to_pfn(page); local
965 unsigned long pfn = page_to_pfn(page); local
1402 unsigned long pfn = pte_pfn(pte); local
1428 pin_pagetable_pfn(unsigned cmd, unsigned long pfn) argument
1439 xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) argument
1449 xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) argument
1459 xen_release_pte_init(unsigned long pfn) argument
1465 xen_release_pmd_init(unsigned long pfn) argument
1470 __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) argument
1483 __set_pfn_prot(unsigned long pfn, pgprot_t prot) argument
1495 xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) argument
1524 xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) argument
1529 xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) argument
1535 xen_release_ptpage(unsigned long pfn, unsigned level) argument
1557 xen_release_pte(unsigned long pfn) argument
1562 xen_release_pmd(unsigned long pfn) argument
1568 xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) argument
1573 xen_release_pud(unsigned long pfn) argument
1625 unsigned long pfn = __pa(addr) >> PAGE_SHIFT; local
1636 unsigned long pfn; local
1793 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); local
[all...]
/arch/powerpc/include/asm/
H A Dkvm_host.h164 unsigned long pfn; member in struct:kvmppc_pginfo
/arch/sparc/mm/
H A Dinit_64.c262 static void flush_dcache(unsigned long pfn) argument
266 page = pfn_to_page(pfn);
300 unsigned long pfn = pte_pfn(pte); local
302 if (pfn_valid(pfn))
303 flush_dcache(pfn);
/arch/ia64/kernel/
H A Dperfmon.c2247 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT; local
2250 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
/arch/x86/kvm/
H A Dmmu.c220 static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access) argument
222 if (unlikely(is_noslot_pfn(pfn))) {
479 * Update the state bits, it means the mapped pfn is not changged.
517 pfn_t pfn; local
528 pfn = spte_to_pfn(old_spte);
530 kvm_set_pfn_accessed(pfn);
532 kvm_set_pfn_dirty(pfn);
2194 gfn_t gfn, pfn_t pfn, bool speculative,
2200 if (set_mmio_spte(sptep, gfn, pfn, pte_access))
2217 kvm_is_mmio_pfn(pfn));
2191 set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int user_fault, int write_fault, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool can_unsync, bool host_writable) argument
2288 mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pt_access, unsigned pte_access, int user_fault, int write_fault, int *emulate, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool host_writable) argument
2445 __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, int map_writable, int level, gfn_t gfn, pfn_t pfn, bool prefault) argument
2503 kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) argument
2517 pfn_t pfn = *pfnp; local
2556 mmu_invalid_pfn(pfn_t pfn) argument
2561 handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, pfn_t pfn, unsigned access, int *ret_val) argument
2589 pfn_t pfn; local
3020 try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gva_t gva, pfn_t *pfn, bool write, bool *writable) argument
3050 pfn_t pfn; local
[all...]
/arch/x86/include/asm/uv/
H A Duv_mmrs.h709 unsigned long pfn:41; /* RO */ member in struct:uvh_gr0_tlb_mmr_read_data_hi_u::uvh_gr0_tlb_mmr_read_data_hi_s
958 unsigned long pfn:41; /* RO */ member in struct:uvh_gr1_tlb_mmr_read_data_hi_u::uvh_gr1_tlb_mmr_read_data_hi_s

Completed in 426 milliseconds

123456