|
@@ -1326,6 +1326,8 @@ out:
|
|
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
{
|
|
{
|
|
kvm_x86_ops->vcpu_load(vcpu, cpu);
|
|
kvm_x86_ops->vcpu_load(vcpu, cpu);
|
|
|
|
+ if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0))
|
|
|
|
+ per_cpu(cpu_tsc_khz, cpu) = cpufreq_quick_get(cpu);
|
|
kvm_request_guest_time_update(vcpu);
|
|
kvm_request_guest_time_update(vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3063,9 +3065,6 @@ static void bounce_off(void *info)
|
|
/* nothing */
|
|
/* nothing */
|
|
}
|
|
}
|
|
|
|
|
|
-static unsigned int ref_freq;
|
|
|
|
-static unsigned long tsc_khz_ref;
|
|
|
|
-
|
|
|
|
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
|
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
|
void *data)
|
|
void *data)
|
|
{
|
|
{
|
|
@@ -3074,14 +3073,11 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
|
|
struct kvm_vcpu *vcpu;
|
|
struct kvm_vcpu *vcpu;
|
|
int i, send_ipi = 0;
|
|
int i, send_ipi = 0;
|
|
|
|
|
|
- if (!ref_freq)
|
|
|
|
- ref_freq = freq->old;
|
|
|
|
-
|
|
|
|
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
|
|
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
|
|
return 0;
|
|
return 0;
|
|
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
|
|
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
|
|
return 0;
|
|
return 0;
|
|
- per_cpu(cpu_tsc_khz, freq->cpu) = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
|
|
|
|
|
|
+ per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
|
|
|
|
|
|
spin_lock(&kvm_lock);
|
|
spin_lock(&kvm_lock);
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
|
@@ -3122,12 +3118,14 @@ static void kvm_timer_init(void)
|
|
{
|
|
{
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
- for_each_possible_cpu(cpu)
|
|
|
|
- per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
|
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
|
- tsc_khz_ref = tsc_khz;
|
|
|
|
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
|
|
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
|
CPUFREQ_TRANSITION_NOTIFIER);
|
|
|
|
+ for_each_online_cpu(cpu)
|
|
|
|
+ per_cpu(cpu_tsc_khz, cpu) = cpufreq_get(cpu);
|
|
|
|
+ } else {
|
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
|
+ per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4700,6 +4698,14 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
int kvm_arch_hardware_enable(void *garbage)
|
|
int kvm_arch_hardware_enable(void *garbage)
|
|
{
|
|
{
|
|
|
|
+ /*
|
|
|
|
+ * Since this may be called from a hotplug notifcation,
|
|
|
|
+ * we can't get the CPU frequency directly.
|
|
|
|
+ */
|
|
|
|
+ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
|
|
|
|
+ int cpu = raw_smp_processor_id();
|
|
|
|
+ per_cpu(cpu_tsc_khz, cpu) = 0;
|
|
|
|
+ }
|
|
return kvm_x86_ops->hardware_enable(garbage);
|
|
return kvm_x86_ops->hardware_enable(garbage);
|
|
}
|
|
}
|
|
|
|
|