Lines Matching refs:access

22  * We need the mmu code to access both 32-bit and 64-bit guest ptes,
114 static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
125 /* Allow write access to dirty gptes */
128 *access &= mask;
194 unsigned access;
196 access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
200 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
201 access &= ~(gpte >> PT64_NX_SHIFT);
204 return access;
270 gva_t addr, u32 access)
279 const int write_fault = access & PFERR_WRITE_MASK;
280 const int user_fault = access & PFERR_USER_MASK;
281 const int fetch_fault = access & PFERR_FETCH_MASK;
286 trace_kvm_mmu_pagetable_walk(addr, access);
330 * "guest page access" as the nested page fault's cause,
331 * instead of "guest page structure access". To fix this,
368 if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) {
379 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
446 struct kvm_vcpu *vcpu, gva_t addr, u32 access)
449 access);
455 u32 access)
458 addr, access);
476 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
567 unsigned direct_access, access = gw->pt_access;
599 false, access, it.sptep);
690 * - write access through a shadow pte marked read only so that we can set
692 * - write access to a shadow pte marked read only so we can update the page
694 * - mmio access; in this case we will never install a present shadow pte
771 * we will cache the incorrect access into mmio spte.
878 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
885 r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
898 u32 access,
905 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
926 * used by guest then tlbs are not flushed, so guest is allowed to access the
962 pte_access = sp->role.access;