|
@@ -3456,27 +3456,27 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
|
|
|
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
|
|
|
{
|
|
|
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
|
|
- return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
|
|
|
+ return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
|
|
|
}
|
|
|
|
|
|
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
|
|
|
{
|
|
|
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
|
|
access |= PFERR_FETCH_MASK;
|
|
|
- return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
|
|
|
+ return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
|
|
|
}
|
|
|
|
|
|
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
|
|
|
{
|
|
|
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
|
|
|
access |= PFERR_WRITE_MASK;
|
|
|
- return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
|
|
|
+ return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
|
|
|
}
|
|
|
|
|
|
/* uses this to access any guest's mapped memory without checking CPL */
|
|
|
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
|
|
|
{
|
|
|
- return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
|
|
|
+ return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
|
|
|
}
|
|
|
|
|
|
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
|
|
@@ -3487,7 +3487,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
|
|
|
int r = X86EMUL_CONTINUE;
|
|
|
|
|
|
while (bytes) {
|
|
|
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
|
|
|
+ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
|
|
|
+ error);
|
|
|
unsigned offset = addr & (PAGE_SIZE-1);
|
|
|
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
|
|
|
int ret;
|
|
@@ -3542,8 +3543,9 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val,
|
|
|
int r = X86EMUL_CONTINUE;
|
|
|
|
|
|
while (bytes) {
|
|
|
- gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
|
|
|
- PFERR_WRITE_MASK, error);
|
|
|
+ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
|
|
|
+ PFERR_WRITE_MASK,
|
|
|
+ error);
|
|
|
unsigned offset = addr & (PAGE_SIZE-1);
|
|
|
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
|
|
|
int ret;
|
|
@@ -5663,6 +5665,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
kvm = vcpu->kvm;
|
|
|
|
|
|
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
|
|
|
+ vcpu->arch.walk_mmu = &vcpu->arch.mmu;
|
|
|
vcpu->arch.mmu.root_hpa = INVALID_PAGE;
|
|
|
vcpu->arch.mmu.translate_gpa = translate_gpa;
|
|
|
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
|