|
@@ -5296,8 +5296,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
r = kvm_mmu_reload(vcpu);
|
|
|
if (unlikely(r)) {
|
|
|
- kvm_x86_ops->cancel_injection(vcpu);
|
|
|
- goto out;
|
|
|
+ goto cancel_injection;
|
|
|
}
|
|
|
|
|
|
preempt_disable();
|
|
@@ -5322,9 +5321,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
smp_wmb();
|
|
|
local_irq_enable();
|
|
|
preempt_enable();
|
|
|
- kvm_x86_ops->cancel_injection(vcpu);
|
|
|
r = 1;
|
|
|
- goto out;
|
|
|
+ goto cancel_injection;
|
|
|
}
|
|
|
|
|
|
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
|
@@ -5392,6 +5390,10 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
kvm_lapic_sync_from_vapic(vcpu);
|
|
|
|
|
|
r = kvm_x86_ops->handle_exit(vcpu);
|
|
|
+ return r;
|
|
|
+
|
|
|
+cancel_injection:
|
|
|
+ kvm_x86_ops->cancel_injection(vcpu);
|
|
|
out:
|
|
|
return r;
|
|
|
}
|