|
@@ -5648,6 +5648,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
int r;
|
|
int r;
|
|
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
|
|
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
|
|
vcpu->run->request_interrupt_window;
|
|
vcpu->run->request_interrupt_window;
|
|
|
|
+ bool req_immediate_exit = 0;
|
|
|
|
|
|
if (vcpu->requests) {
|
|
if (vcpu->requests) {
|
|
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
|
|
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
|
|
@@ -5687,7 +5688,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
record_steal_time(vcpu);
|
|
record_steal_time(vcpu);
|
|
if (kvm_check_request(KVM_REQ_NMI, vcpu))
|
|
if (kvm_check_request(KVM_REQ_NMI, vcpu))
|
|
process_nmi(vcpu);
|
|
process_nmi(vcpu);
|
|
-
|
|
|
|
|
|
+ req_immediate_exit =
|
|
|
|
+ kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
r = kvm_mmu_reload(vcpu);
|
|
r = kvm_mmu_reload(vcpu);
|
|
@@ -5738,6 +5740,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
|
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
|
|
|
|
|
|
|
|
+ if (req_immediate_exit)
|
|
|
|
+ smp_send_reschedule(vcpu->cpu);
|
|
|
|
+
|
|
kvm_guest_enter();
|
|
kvm_guest_enter();
|
|
|
|
|
|
if (unlikely(vcpu->arch.switch_db_regs)) {
|
|
if (unlikely(vcpu->arch.switch_db_regs)) {
|