|
@@ -920,31 +920,35 @@ static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
|
|
|
return quotient;
|
|
|
}
|
|
|
|
|
|
-static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
|
|
|
+static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
|
|
|
+ s8 *pshift, u32 *pmultiplier)
|
|
|
{
|
|
|
- uint64_t nsecs = 1000000000LL;
|
|
|
+ uint64_t scaled64;
|
|
|
int32_t shift = 0;
|
|
|
uint64_t tps64;
|
|
|
uint32_t tps32;
|
|
|
|
|
|
- tps64 = tsc_khz * 1000LL;
|
|
|
- while (tps64 > nsecs*2) {
|
|
|
+ tps64 = base_khz * 1000LL;
|
|
|
+ scaled64 = scaled_khz * 1000LL;
|
|
|
+ while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000UL) {
|
|
|
tps64 >>= 1;
|
|
|
shift--;
|
|
|
}
|
|
|
|
|
|
tps32 = (uint32_t)tps64;
|
|
|
- while (tps32 <= (uint32_t)nsecs) {
|
|
|
- tps32 <<= 1;
|
|
|
+ while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000UL) {
|
|
|
+ if (scaled64 & 0xffffffff00000000UL || tps32 & 0x80000000)
|
|
|
+ scaled64 >>= 1;
|
|
|
+ else
|
|
|
+ tps32 <<= 1;
|
|
|
shift++;
|
|
|
}
|
|
|
|
|
|
- hv_clock->tsc_shift = shift;
|
|
|
- hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
|
|
|
+ *pshift = shift;
|
|
|
+ *pmultiplier = div_frac(scaled64, tps32);
|
|
|
|
|
|
- pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
|
|
|
- __func__, tsc_khz, hv_clock->tsc_shift,
|
|
|
- hv_clock->tsc_to_system_mul);
|
|
|
+ pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
|
|
|
+ __func__, base_khz, scaled_khz, shift, *pmultiplier);
|
|
|
}
|
|
|
|
|
|
static inline u64 get_kernel_ns(void)
|
|
@@ -1084,7 +1088,9 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
|
|
|
}
|
|
|
|
|
|
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
|
|
|
- kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
|
|
|
+ kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
|
|
|
+ &vcpu->hv_clock.tsc_shift,
|
|
|
+ &vcpu->hv_clock.tsc_to_system_mul);
|
|
|
vcpu->hw_tsc_khz = this_tsc_khz;
|
|
|
}
|
|
|
|