|
@@ -1998,7 +1998,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
|
|
|
kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
|
|
|
}
|
|
|
|
|
|
-static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
|
|
+int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
|
|
{
|
|
|
struct kvm_mmu_page *sp;
|
|
|
struct hlist_node *node;
|
|
@@ -2007,7 +2007,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
|
|
|
|
|
pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
|
|
|
r = 0;
|
|
|
-
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
|
|
|
pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
|
|
|
sp->role.word);
|
|
@@ -2015,8 +2015,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
|
|
|
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
|
|
}
|
|
|
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
+
|
|
|
return r;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
|
|
|
|
|
|
static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
|
|
|
{
|
|
@@ -3698,9 +3701,8 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
|
|
|
|
|
|
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
|
|
|
|
|
|
- spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
|
|
|
- spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
+
|
|
|
return r;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
|
|
@@ -3721,10 +3723,18 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
|
|
|
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
|
|
|
}
|
|
|
|
|
|
+static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
|
|
|
+{
|
|
|
+ if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
|
|
|
+ return vcpu_match_mmio_gpa(vcpu, addr);
|
|
|
+
|
|
|
+ return vcpu_match_mmio_gva(vcpu, addr);
|
|
|
+}
|
|
|
+
|
|
|
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
|
|
|
void *insn, int insn_len)
|
|
|
{
|
|
|
- int r;
|
|
|
+ int r, emulation_type = EMULTYPE_RETRY;
|
|
|
enum emulation_result er;
|
|
|
|
|
|
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
|
|
@@ -3736,7 +3746,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
|
|
|
+ if (is_mmio_page_fault(vcpu, cr2))
|
|
|
+ emulation_type = 0;
|
|
|
+
|
|
|
+ er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
|
|
|
|
|
|
switch (er) {
|
|
|
case EMULATE_DONE:
|