|
@@ -5588,6 +5588,36 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * kvm_pv_kick_cpu_op: Kick a vcpu.
|
|
|
+ *
|
|
|
+ * @apicid - apicid of vcpu to be kicked.
|
|
|
+ */
|
|
|
+static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
|
|
|
+{
|
|
|
+ struct kvm_vcpu *vcpu = NULL;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
|
+ if (!kvm_apic_present(vcpu))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (kvm_apic_match_dest(vcpu, 0, 0, apicid, 0))
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ if (vcpu) {
|
|
|
+ /*
|
|
|
+ * Setting unhalt flag here can result in spurious runnable
|
|
|
+ * state when unhalt reset does not happen in vcpu_block.
|
|
|
+ * But that is harmless since that should soon result in halt.
|
|
|
+ */
|
|
|
+ vcpu->arch.pv.pv_unhalted = true;
|
|
|
+ /* We need everybody see unhalt before vcpu unblocks */
|
|
|
+ smp_wmb();
|
|
|
+ kvm_vcpu_kick(vcpu);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
unsigned long nr, a0, a1, a2, a3, ret;
|
|
@@ -5621,6 +5651,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
|
|
case KVM_HC_VAPIC_POLL_IRQ:
|
|
|
ret = 0;
|
|
|
break;
|
|
|
+ case KVM_HC_KICK_CPU:
|
|
|
+ kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
|
|
|
+ ret = 0;
|
|
|
+ break;
|
|
|
default:
|
|
|
ret = -KVM_ENOSYS;
|
|
|
break;
|
|
@@ -6043,6 +6077,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
|
|
|
kvm_apic_accept_events(vcpu);
|
|
|
switch(vcpu->arch.mp_state) {
|
|
|
case KVM_MP_STATE_HALTED:
|
|
|
+ vcpu->arch.pv.pv_unhalted = false;
|
|
|
vcpu->arch.mp_state =
|
|
|
KVM_MP_STATE_RUNNABLE;
|
|
|
case KVM_MP_STATE_RUNNABLE:
|
|
@@ -6342,7 +6377,12 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_mp_state *mp_state)
|
|
|
{
|
|
|
kvm_apic_accept_events(vcpu);
|
|
|
- mp_state->mp_state = vcpu->arch.mp_state;
|
|
|
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
|
|
|
+ vcpu->arch.pv.pv_unhalted)
|
|
|
+ mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
+ else
|
|
|
+ mp_state->mp_state = vcpu->arch.mp_state;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -6863,6 +6903,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
BUG_ON(vcpu->kvm == NULL);
|
|
|
kvm = vcpu->kvm;
|
|
|
|
|
|
+ vcpu->arch.pv.pv_unhalted = false;
|
|
|
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
|
|
|
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
|
|
|
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
|
@@ -7200,6 +7241,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
|
|
!vcpu->arch.apf.halted)
|
|
|
|| !list_empty_careful(&vcpu->async_pf.done)
|
|
|
|| kvm_apic_has_events(vcpu)
|
|
|
+ || vcpu->arch.pv.pv_unhalted
|
|
|
|| atomic_read(&vcpu->arch.nmi_queued) ||
|
|
|
(kvm_arch_interrupt_allowed(vcpu) &&
|
|
|
kvm_cpu_has_interrupt(vcpu));
|