|
@@ -1262,6 +1262,34 @@ static const struct kvm_io_device_ops apic_mmio_ops = {
|
|
|
.write = apic_mmio_write,
|
|
|
};
|
|
|
|
|
|
+static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
|
|
|
+{
|
|
|
+ struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
|
|
|
+ struct kvm_vcpu *vcpu = ktimer->vcpu;
|
|
|
+ wait_queue_head_t *q = &vcpu->wq;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * There is a race window between reading and incrementing, but we do
|
|
|
+ * not care about potentially losing timer events in the !reinject
|
|
|
+ * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
|
|
|
+ * in vcpu_enter_guest.
|
|
|
+ */
|
|
|
+ if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
|
|
|
+ atomic_inc(&ktimer->pending);
|
|
|
+ /* FIXME: this code should not know anything about vcpus */
|
|
|
+ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (waitqueue_active(q))
|
|
|
+ wake_up_interruptible(q);
|
|
|
+
|
|
|
+ if (ktimer->t_ops->is_periodic(ktimer)) {
|
|
|
+ hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
|
|
|
+ return HRTIMER_RESTART;
|
|
|
+ } else
|
|
|
+ return HRTIMER_NORESTART;
|
|
|
+}
|
|
|
+
|
|
|
int kvm_create_lapic(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct kvm_lapic *apic;
|
|
@@ -1285,7 +1313,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
|
|
|
HRTIMER_MODE_ABS);
|
|
|
- apic->lapic_timer.timer.function = kvm_timer_fn;
|
|
|
+ apic->lapic_timer.timer.function = apic_timer_fn;
|
|
|
apic->lapic_timer.t_ops = &lapic_timer_ops;
|
|
|
apic->lapic_timer.kvm = vcpu->kvm;
|
|
|
apic->lapic_timer.vcpu = vcpu;
|