|
@@ -209,6 +209,16 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
|
|
|
clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
|
|
|
}
|
|
|
|
|
|
+static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
|
|
|
+}
|
|
|
+
|
|
|
+static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
|
|
|
+}
|
|
|
+
|
|
|
static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
|
|
|
{
|
|
|
#ifdef CONFIG_KVM_BOOKE_HV
|
|
@@ -328,6 +338,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
|
|
msr_mask = MSR_CE | MSR_ME | MSR_DE;
|
|
|
int_class = INT_CLASS_NONCRIT;
|
|
|
break;
|
|
|
+ case BOOKE_IRQPRIO_WATCHDOG:
|
|
|
case BOOKE_IRQPRIO_CRITICAL:
|
|
|
case BOOKE_IRQPRIO_DBELL_CRIT:
|
|
|
allowed = vcpu->arch.shared->msr & MSR_CE;
|
|
@@ -407,12 +418,121 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
|
|
return allowed;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Return the number of jiffies until the next timeout. If the timeout is
|
|
|
+ * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
|
|
|
+ * because the larger value can break the timer APIs.
|
|
|
+ */
|
|
|
+static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ u64 tb, wdt_tb, wdt_ticks = 0;
|
|
|
+ u64 nr_jiffies = 0;
|
|
|
+ u32 period = TCR_GET_WP(vcpu->arch.tcr);
|
|
|
+
|
|
|
+ wdt_tb = 1ULL << (63 - period);
|
|
|
+ tb = get_tb();
|
|
|
+ /*
|
|
|
+ * The watchdog timeout will hapeen when TB bit corresponding
|
|
|
+ * to watchdog will toggle from 0 to 1.
|
|
|
+ */
|
|
|
+ if (tb & wdt_tb)
|
|
|
+ wdt_ticks = wdt_tb;
|
|
|
+
|
|
|
+ wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
|
|
|
+
|
|
|
+ /* Convert timebase ticks to jiffies */
|
|
|
+ nr_jiffies = wdt_ticks;
|
|
|
+
|
|
|
+ if (do_div(nr_jiffies, tb_ticks_per_jiffy))
|
|
|
+ nr_jiffies++;
|
|
|
+
|
|
|
+ return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
|
|
|
+}
|
|
|
+
|
|
|
+static void arm_next_watchdog(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ unsigned long nr_jiffies;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If TSR_ENW and TSR_WIS are not set then no need to exit to
|
|
|
+ * userspace, so clear the KVM_REQ_WATCHDOG request.
|
|
|
+ */
|
|
|
+ if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
|
|
|
+ clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
|
|
|
+
|
|
|
+ spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
|
|
|
+ nr_jiffies = watchdog_next_timeout(vcpu);
|
|
|
+ /*
|
|
|
+ * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
|
|
|
+ * then do not run the watchdog timer as this can break timer APIs.
|
|
|
+ */
|
|
|
+ if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
|
|
|
+ mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
|
|
|
+ else
|
|
|
+ del_timer(&vcpu->arch.wdt_timer);
|
|
|
+ spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
+void kvmppc_watchdog_func(unsigned long data)
|
|
|
+{
|
|
|
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
|
|
|
+ u32 tsr, new_tsr;
|
|
|
+ int final;
|
|
|
+
|
|
|
+ do {
|
|
|
+ new_tsr = tsr = vcpu->arch.tsr;
|
|
|
+ final = 0;
|
|
|
+
|
|
|
+ /* Time out event */
|
|
|
+ if (tsr & TSR_ENW) {
|
|
|
+ if (tsr & TSR_WIS)
|
|
|
+ final = 1;
|
|
|
+ else
|
|
|
+ new_tsr = tsr | TSR_WIS;
|
|
|
+ } else {
|
|
|
+ new_tsr = tsr | TSR_ENW;
|
|
|
+ }
|
|
|
+ } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
|
|
|
+
|
|
|
+ if (new_tsr & TSR_WIS) {
|
|
|
+ smp_wmb();
|
|
|
+ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
|
|
|
+ kvm_vcpu_kick(vcpu);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If this is final watchdog expiry and some action is required
|
|
|
+ * then exit to userspace.
|
|
|
+ */
|
|
|
+ if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
|
|
|
+ vcpu->arch.watchdog_enabled) {
|
|
|
+ smp_wmb();
|
|
|
+ kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
|
|
|
+ kvm_vcpu_kick(vcpu);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Stop running the watchdog timer after final expiration to
|
|
|
+ * prevent the host from being flooded with timers if the
|
|
|
+ * guest sets a short period.
|
|
|
+ * Timers will resume when TSR/TCR is updated next time.
|
|
|
+ */
|
|
|
+ if (!final)
|
|
|
+ arm_next_watchdog(vcpu);
|
|
|
+}
|
|
|
+
|
|
|
static void update_timer_ints(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
|
|
|
kvmppc_core_queue_dec(vcpu);
|
|
|
else
|
|
|
kvmppc_core_dequeue_dec(vcpu);
|
|
|
+
|
|
|
+ if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
|
|
|
+ kvmppc_core_queue_watchdog(vcpu);
|
|
|
+ else
|
|
|
+ kvmppc_core_dequeue_watchdog(vcpu);
|
|
|
}
|
|
|
|
|
|
static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
|
|
@@ -466,6 +586,11 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
|
|
kvmppc_core_flush_tlb(vcpu);
|
|
|
#endif
|
|
|
|
|
|
+ if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
|
|
|
+ vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
|
|
|
+ r = 0;
|
|
|
+ }
|
|
|
+
|
|
|
return r;
|
|
|
}
|
|
|
|
|
@@ -995,6 +1120,21 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
return r;
|
|
|
}
|
|
|
|
|
|
+int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ /* setup watchdog timer once */
|
|
|
+ spin_lock_init(&vcpu->arch.wdt_lock);
|
|
|
+ setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
|
|
|
+ (unsigned long)vcpu);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ del_timer_sync(&vcpu->arch.wdt_timer);
|
|
|
+}
|
|
|
+
|
|
|
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
|
|
{
|
|
|
int i;
|
|
@@ -1090,7 +1230,13 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
|
|
|
}
|
|
|
|
|
|
if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
|
|
|
+ u32 old_tsr = vcpu->arch.tsr;
|
|
|
+
|
|
|
vcpu->arch.tsr = sregs->u.e.tsr;
|
|
|
+
|
|
|
+ if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
|
|
|
+ arm_next_watchdog(vcpu);
|
|
|
+
|
|
|
update_timer_ints(vcpu);
|
|
|
}
|
|
|
|
|
@@ -1251,6 +1397,7 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
|
|
void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
|
|
|
{
|
|
|
vcpu->arch.tcr = new_tcr;
|
|
|
+ arm_next_watchdog(vcpu);
|
|
|
update_timer_ints(vcpu);
|
|
|
}
|
|
|
|
|
@@ -1265,6 +1412,14 @@ void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
|
|
|
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
|
|
|
{
|
|
|
clear_bits(tsr_bits, &vcpu->arch.tsr);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We may have stopped the watchdog due to
|
|
|
+ * being stuck on final expiration.
|
|
|
+ */
|
|
|
+ if (tsr_bits & (TSR_ENW | TSR_WIS))
|
|
|
+ arm_next_watchdog(vcpu);
|
|
|
+
|
|
|
update_timer_ints(vcpu);
|
|
|
}
|
|
|
|