|
@@ -55,6 +55,7 @@
|
|
|
#include <asm/mce.h>
|
|
|
#include <asm/i387.h>
|
|
|
#include <asm/xcr.h>
|
|
|
+#include <asm/pvclock.h>
|
|
|
|
|
|
#define MAX_IO_MSRS 256
|
|
|
#define CR0_RESERVED_BITS \
|
|
@@ -976,14 +977,15 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
|
|
|
struct kvm_vcpu_arch *vcpu = &v->arch;
|
|
|
void *shared_kaddr;
|
|
|
unsigned long this_tsc_khz;
|
|
|
- s64 kernel_ns;
|
|
|
+ s64 kernel_ns, max_kernel_ns;
|
|
|
+ u64 tsc_timestamp;
|
|
|
|
|
|
if ((!vcpu->time_page))
|
|
|
return 0;
|
|
|
|
|
|
/* Keep irq disabled to prevent changes to the clock */
|
|
|
local_irq_save(flags);
|
|
|
- kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
|
|
|
+ kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
|
|
|
kernel_ns = get_kernel_ns();
|
|
|
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
|
|
|
local_irq_restore(flags);
|
|
@@ -993,13 +995,49 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * Time as measured by the TSC may go backwards when resetting the base
|
|
|
+ * tsc_timestamp. The reason for this is that the TSC resolution is
|
|
|
+ * higher than the resolution of the other clock scales. Thus, many
|
|
|
+ * possible measurments of the TSC correspond to one measurement of any
|
|
|
+ * other clock, and so a spread of values is possible. This is not a
|
|
|
+ * problem for the computation of the nanosecond clock; with TSC rates
|
|
|
+ * around 1GHZ, there can only be a few cycles which correspond to one
|
|
|
+ * nanosecond value, and any path through this code will inevitably
|
|
|
+ * take longer than that. However, with the kernel_ns value itself,
|
|
|
+ * the precision may be much lower, down to HZ granularity. If the
|
|
|
+ * first sampling of TSC against kernel_ns ends in the low part of the
|
|
|
+ * range, and the second in the high end of the range, we can get:
|
|
|
+ *
|
|
|
+ * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
|
|
|
+ *
|
|
|
+ * As the sampling errors potentially range in the thousands of cycles,
|
|
|
+ * it is possible such a time value has already been observed by the
|
|
|
+ * guest. To protect against this, we must compute the system time as
|
|
|
+ * observed by the guest and ensure the new system time is greater.
|
|
|
+ */
|
|
|
+ max_kernel_ns = 0;
|
|
|
+ if (vcpu->hv_clock.tsc_timestamp && vcpu->last_guest_tsc) {
|
|
|
+ max_kernel_ns = vcpu->last_guest_tsc -
|
|
|
+ vcpu->hv_clock.tsc_timestamp;
|
|
|
+ max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
|
|
|
+ vcpu->hv_clock.tsc_to_system_mul,
|
|
|
+ vcpu->hv_clock.tsc_shift);
|
|
|
+ max_kernel_ns += vcpu->last_kernel_ns;
|
|
|
+ }
|
|
|
+
|
|
|
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
|
|
|
kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
|
|
|
vcpu->hw_tsc_khz = this_tsc_khz;
|
|
|
}
|
|
|
|
|
|
+ if (max_kernel_ns > kernel_ns)
|
|
|
+ kernel_ns = max_kernel_ns;
|
|
|
+
|
|
|
/* With all the info we got, fill in the values */
|
|
|
+ vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
|
|
|
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
|
|
|
+ vcpu->last_kernel_ns = kernel_ns;
|
|
|
vcpu->hv_clock.flags = 0;
|
|
|
|
|
|
/*
|
|
@@ -4931,6 +4969,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
if (hw_breakpoint_active())
|
|
|
hw_breakpoint_restore();
|
|
|
|
|
|
+ kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
|
|
|
+
|
|
|
atomic_set(&vcpu->guest_mode, 0);
|
|
|
smp_wmb();
|
|
|
local_irq_enable();
|