|
@@ -2096,6 +2096,8 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
|
|
(nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
|
|
|
vmcs12->tsc_offset : 0));
|
|
|
} else {
|
|
|
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
|
|
|
+ vmcs_read64(TSC_OFFSET), offset);
|
|
|
vmcs_write64(TSC_OFFSET, offset);
|
|
|
}
|
|
|
}
|
|
@@ -2103,11 +2105,14 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
|
|
static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
|
|
|
{
|
|
|
u64 offset = vmcs_read64(TSC_OFFSET);
|
|
|
+
|
|
|
vmcs_write64(TSC_OFFSET, offset + adjustment);
|
|
|
if (is_guest_mode(vcpu)) {
|
|
|
/* Even when running L2, the adjustment needs to apply to L1 */
|
|
|
to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
|
|
|
- }
|
|
|
+ } else
|
|
|
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
|
|
|
+ offset + adjustment);
|
|
|
}
|
|
|
|
|
|
static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
|