/arch/ia64/kvm/ |
H A D | misc.h | 34 static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn, argument 41 pmt_base[gfn] = pte;
|
/arch/x86/kvm/ |
H A D | x86.h | 76 gva_t gva, gfn_t gfn, unsigned access) 80 vcpu->arch.mmio_gfn = gfn; 75 vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, unsigned access) argument
|
H A D | mmu_audit.c | 99 gfn_t gfn; local 116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); 117 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); 136 gfn_t gfn; local 139 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); 141 if (!gfn_to_memslot(kvm, gfn)) { 144 audit_printk(kvm, "no memslot for gfn %llx\n", gfn); 145 audit_printk(kvm, "index %ld of sp (gfn=%llx)\n", 146 (long int)(sptep - rev_sp->spt), rev_sp->gfn); [all...] |
H A D | paging_tmpl.h | 72 gfn_t gfn; member in struct:guest_walker 255 gfn_t gfn; local 258 gfn = gpte_to_gfn_lvl(pte, lvl); 259 gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT; 264 gfn += pse36_gfn_delta(pte); 268 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), 273 walker->gfn = real_gpa >> PAGE_SHIFT; 433 gfn_t gfn; local 449 gfn = gpte_to_gfn(gpte); 450 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, 784 gfn_t gfn; local [all...] |
H A D | svm.c | 3994 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) argument
|
H A D | mmu.c | 197 static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access) argument 201 trace_mark_mmio_spte(sptep, gfn, access); 202 mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT); 220 static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access) argument 223 mark_mmio_spte(sptep, gfn, access); 670 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); 673 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) argument 676 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); 678 sp->gfns[index] = gfn; 682 * Return the pointer to the large page information for a given gfn, 685 lpage_info_slot(gfn_t gfn, struct kvm_memory_slot *slot, int level) argument 695 account_shadowed(struct kvm *kvm, gfn_t gfn) argument 710 unaccount_shadowed(struct kvm *kvm, gfn_t gfn) argument 726 has_wrprotected_page(struct kvm *kvm, gfn_t gfn, int level) argument 742 host_mapping_level(struct kvm *kvm, gfn_t gfn) argument 761 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) argument 948 __gfn_to_rmap(gfn_t gfn, int level, struct kvm_memory_slot *slot) argument 963 gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level) argument 979 rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) argument 998 gfn_t gfn; local 1013 kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, struct kvm_memory_slot *slot) argument 1054 rmap_write_protect(struct kvm *kvm, u64 gfn) argument 1133 gfn_t gfn = memslot->base_gfn + gfn_offset; local 1224 rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) argument 1300 kvm_page_table_hashfn(gfn_t gfn) argument 1535 kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) argument 1669 kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gaddr, unsigned level, int direct, unsigned access, u64 *parent_pte) argument 2010 kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) argument 2033 page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) argument 2134 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) argument 2155 kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) argument 2168 mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) argument 2191 set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int user_fault, int write_fault, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool can_unsync, bool host_writable) argument 2288 mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pt_access, unsigned pte_access, int user_fault, int write_fault, int *emulate, int level, gfn_t gfn, pfn_t pfn, bool speculative, bool host_writable) argument 2359 pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) argument 2383 gfn_t gfn; local 2445 __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, int map_writable, int level, gfn_t gfn, pfn_t pfn, bool prefault) argument 2503 kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn) argument 2518 gfn_t gfn = *gfnp; local 2561 handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, pfn_t pfn, unsigned access, int *ret_val) argument 2583 nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn, bool prefault) argument 2939 gfn_t gfn = get_mmio_spte_gfn(spte); local 2978 gfn_t gfn; local 2999 kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) argument 3020 try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gva_t gva, pfn_t *pfn, bool write, bool *writable) argument 3054 gfn_t gfn = gpa >> PAGE_SHIFT; local 3162 sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access, int *nr_present) argument 3625 gfn_t gfn = gpa >> PAGE_SHIFT; local [all...] |
H A D | x86.c | 421 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, argument 424 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, 471 gfn_t gfn; local 481 gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT; 483 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), 1417 u64 gfn; local 1428 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT; 1429 addr = gfn_to_hva(kvm, gfn); 3057 * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap. 3079 unsigned long gfn local 6473 kvm_async_pf_hash_fn(gfn_t gfn) argument 6483 kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) argument 6493 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) argument 6506 kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) argument 6511 kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) argument [all...] |
H A D | vmx.c | 6376 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) argument 6395 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
|
/arch/powerpc/kvm/ |
H A D | book3s.c | 288 pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) argument 294 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == 304 return gfn_to_pfn(vcpu->kvm, gfn);
|
H A D | 44x_tlb.c | 301 gfn_t gfn; local 317 gfn = gpaddr >> PAGE_SHIFT; 318 new_page = gfn_to_page(vcpu->kvm, gfn); 320 printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", 321 (unsigned long long)gfn);
|
H A D | book3s_hv_rm_mmu.c | 75 unsigned long gfn, ptel, head; local 82 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel)); 83 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 87 rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]); 137 unsigned long i, pa, gpa, gfn, psize; local 164 gfn = gpa >> PAGE_SHIFT; 165 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 182 slot_fn = gfn - memslot->base_gfn; 200 hva = gfn_to_hva_memslot(memslot, gfn);
|
H A D | book3s_pr.c | 250 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) argument 255 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { 259 return kvm_is_visible_gfn(vcpu->kvm, gfn);
|
H A D | booke.c | 527 gfn_t gfn; local 554 gfn = gpaddr >> PAGE_SHIFT; 556 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { 580 gfn_t gfn; local 598 gfn = gpaddr >> PAGE_SHIFT; 600 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
|
H A D | book3s_64_mmu_hv.c | 204 static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, argument 220 if (physp[gfn - memslot->base_gfn]) 228 start = gfn_to_hva_memslot(memslot, gfn); 273 physp += (gfn - memslot->base_gfn) & ~(npages - 1); 304 unsigned long psize, gpa, gfn; local 319 gfn = gpa >> PAGE_SHIFT; 320 memslot = gfn_to_memslot(kvm, gfn); 324 if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0) 511 unsigned long gfn, hva, pfn; local 549 gfn 697 kvm_handle_hva(struct kvm *kvm, unsigned long hva, int (*handler)(struct kvm *kvm, unsigned long *rmapp, unsigned long gfn)) argument 724 kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long gfn) argument 791 kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long gfn) argument 847 kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, unsigned long gfn) argument 965 unsigned long gfn = gpa >> PAGE_SHIFT; local [all...] |
H A D | e500_tlb.c | 609 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 626 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); 627 hva = gfn_to_hva_memslot(slot, gfn); 654 slot_start = pfn - (gfn - slot->base_gfn); 673 * requested) that will cover gfn, stay within the 674 * range, and for which gfn and pfn are mutually 682 gfn_start = gfn & ~(tsize_pages - 1); 685 if (gfn_start + pfn - gfn < start) 687 if (gfn_end + pfn - gfn > end) 689 if ((gfn 608 kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe, struct tlbe_ref *ref) argument 761 kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, struct kvm_book3e_206_tlb_entry *stlbe) argument 1074 gfn_t gfn = gpaddr >> PAGE_SHIFT; local [all...] |
/arch/x86/include/asm/ |
H A D | kvm_host.h | 190 * bit 16 - direct mapping of virtual to physical mapping at gfn 218 gfn_t gfn; member in struct:kvm_mmu_page 222 /* hold the gfn of each spte inside spt */ 660 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 686 gfn_t gfn; member in struct:kvm_arch_async_pf 715 int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, 725 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); 798 gfn_t gfn, void *data, int offset, int len, 812 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn); 953 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); [all...] |