|
@@ -78,6 +78,34 @@ static cycle_t kvm_clock_read(void)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * If we don't do that, there is the possibility that the guest
|
|
|
+ * will calibrate under heavy load - thus, getting a lower lpj -
|
|
|
+ * and execute the delays themselves without load. This is wrong,
|
|
|
+ * because no delay loop can finish beforehand.
|
|
|
+ * Any heuristics is subject to fail, because ultimately, a large
|
|
|
+ * poll of guests can be running and trouble each other. So we preset
|
|
|
+ * lpj here
|
|
|
+ */
|
|
|
+static unsigned long kvm_get_tsc_khz(void)
|
|
|
+{
|
|
|
+ return preset_lpj;
|
|
|
+}
|
|
|
+
|
|
|
+static void kvm_get_preset_lpj(void)
|
|
|
+{
|
|
|
+ struct pvclock_vcpu_time_info *src;
|
|
|
+ unsigned long khz;
|
|
|
+ u64 lpj;
|
|
|
+
|
|
|
+ src = &per_cpu(hv_clock, 0);
|
|
|
+ khz = pvclock_tsc_khz(src);
|
|
|
+
|
|
|
+ lpj = ((u64)khz * 1000);
|
|
|
+ do_div(lpj, HZ);
|
|
|
+ preset_lpj = lpj;
|
|
|
+}
|
|
|
+
|
|
|
static struct clocksource kvm_clock = {
|
|
|
.name = "kvm-clock",
|
|
|
.read = kvm_clock_read,
|
|
@@ -153,6 +181,7 @@ void __init kvmclock_init(void)
|
|
|
pv_time_ops.get_wallclock = kvm_get_wallclock;
|
|
|
pv_time_ops.set_wallclock = kvm_set_wallclock;
|
|
|
pv_time_ops.sched_clock = kvm_clock_read;
|
|
|
+ pv_time_ops.get_tsc_khz = kvm_get_tsc_khz;
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
|
|
|
#endif
|
|
@@ -163,6 +192,7 @@ void __init kvmclock_init(void)
|
|
|
#ifdef CONFIG_KEXEC
|
|
|
machine_ops.crash_shutdown = kvm_crash_shutdown;
|
|
|
#endif
|
|
|
+ kvm_get_preset_lpj();
|
|
|
clocksource_register(&kvm_clock);
|
|
|
}
|
|
|
}
|