|
@@ -808,12 +808,12 @@ EXPORT_SYMBOL_GPL(kvm_get_dr);
|
|
|
* kvm-specific. Those are put in the beginning of the list.
|
|
|
*/
|
|
|
|
|
|
-#define KVM_SAVE_MSRS_BEGIN 8
|
|
|
+#define KVM_SAVE_MSRS_BEGIN 9
|
|
|
static u32 msrs_to_save[] = {
|
|
|
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
|
|
|
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
|
|
|
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
|
|
|
- HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
|
|
|
+ HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
|
|
|
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
|
|
|
MSR_STAR,
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -1488,6 +1488,35 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void accumulate_steal_time(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ u64 delta;
|
|
|
+
|
|
|
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
|
|
+ return;
|
|
|
+
|
|
|
+ delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
|
|
|
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
|
|
|
+ vcpu->arch.st.accum_steal = delta;
|
|
|
+}
|
|
|
+
|
|
|
+static void record_steal_time(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
|
|
+ &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
|
|
|
+ return;
|
|
|
+
|
|
|
+ vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
|
|
|
+ vcpu->arch.st.steal.version += 2;
|
|
|
+ vcpu->arch.st.accum_steal = 0;
|
|
|
+
|
|
|
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
|
|
|
+ &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
|
|
|
+}
|
|
|
+
|
|
|
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|
|
{
|
|
|
switch (msr) {
|
|
@@ -1570,6 +1599,33 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|
|
if (kvm_pv_enable_async_pf(vcpu, data))
|
|
|
return 1;
|
|
|
break;
|
|
|
+ case MSR_KVM_STEAL_TIME:
|
|
|
+
|
|
|
+ if (unlikely(!sched_info_on()))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ if (data & KVM_STEAL_RESERVED_MASK)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
|
|
|
+ data & KVM_STEAL_VALID_BITS))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ vcpu->arch.st.msr_val = data;
|
|
|
+
|
|
|
+ if (!(data & KVM_MSR_ENABLED))
|
|
|
+ break;
|
|
|
+
|
|
|
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
|
|
|
+
|
|
|
+ preempt_disable();
|
|
|
+ accumulate_steal_time(vcpu);
|
|
|
+ preempt_enable();
|
|
|
+
|
|
|
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
|
|
+
|
|
|
+ break;
|
|
|
+
|
|
|
case MSR_IA32_MCG_CTL:
|
|
|
case MSR_IA32_MCG_STATUS:
|
|
|
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
|
|
@@ -1855,6 +1911,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|
|
case MSR_KVM_ASYNC_PF_EN:
|
|
|
data = vcpu->arch.apf.msr_val;
|
|
|
break;
|
|
|
+ case MSR_KVM_STEAL_TIME:
|
|
|
+ data = vcpu->arch.st.msr_val;
|
|
|
+ break;
|
|
|
case MSR_IA32_P5_MC_ADDR:
|
|
|
case MSR_IA32_P5_MC_TYPE:
|
|
|
case MSR_IA32_MCG_CAP:
|
|
@@ -2166,6 +2225,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
kvm_migrate_timers(vcpu);
|
|
|
vcpu->cpu = cpu;
|
|
|
}
|
|
|
+
|
|
|
+ accumulate_steal_time(vcpu);
|
|
|
+ kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
|
|
|
}
|
|
|
|
|
|
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
|
@@ -2487,6 +2549,10 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|
|
(1 << KVM_FEATURE_CLOCKSOURCE2) |
|
|
|
(1 << KVM_FEATURE_ASYNC_PF) |
|
|
|
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
|
|
|
+
|
|
|
+ if (sched_info_on())
|
|
|
+ entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
|
|
|
+
|
|
|
entry->ebx = 0;
|
|
|
entry->ecx = 0;
|
|
|
entry->edx = 0;
|
|
@@ -5470,6 +5536,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|
|
r = 1;
|
|
|
goto out;
|
|
|
}
|
|
|
+ if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
|
|
|
+ record_steal_time(vcpu);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
r = kvm_mmu_reload(vcpu);
|
|
@@ -6206,6 +6275,7 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
vcpu->arch.apf.msr_val = 0;
|
|
|
+ vcpu->arch.st.msr_val = 0;
|
|
|
|
|
|
kvmclock_reset(vcpu);
|
|
|
|