|
@@ -66,7 +66,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
|
|
|
#define KVM_GUEST_CR0_MASK \
|
|
|
(KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
|
|
|
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
|
|
|
- (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
|
|
|
+ (X86_CR0_WP | X86_CR0_NE | X86_CR0_MP)
|
|
|
#define KVM_VM_CR0_ALWAYS_ON \
|
|
|
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
|
|
|
#define KVM_CR4_GUEST_OWNED_BITS \
|
|
@@ -579,9 +579,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
u32 eb;
|
|
|
|
|
|
- eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
|
|
|
- if (!vcpu->fpu_active)
|
|
|
- eb |= 1u << NM_VECTOR;
|
|
|
+ eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR)
|
|
|
+ | (1u << NM_VECTOR);
|
|
|
/*
|
|
|
* Unconditionally intercept #DB so we can maintain dr6 without
|
|
|
* reading it every exit.
|
|
@@ -595,6 +594,8 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
|
|
|
eb = ~0;
|
|
|
if (enable_ept)
|
|
|
eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
|
|
|
+ if (vcpu->fpu_active)
|
|
|
+ eb &= ~(1u << NM_VECTOR);
|
|
|
vmcs_write32(EXCEPTION_BITMAP, eb);
|
|
|
}
|
|
|
|
|
@@ -806,9 +807,6 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- if (!vcpu->fpu_active)
|
|
|
- return;
|
|
|
- vcpu->fpu_active = 0;
|
|
|
vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
|
|
|
update_exception_bitmap(vcpu);
|
|
|
}
|
|
@@ -1737,8 +1735,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|
|
else
|
|
|
hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
|
|
|
|
|
|
- vmx_fpu_deactivate(vcpu);
|
|
|
-
|
|
|
if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
|
|
|
enter_pmode(vcpu);
|
|
|
|
|
@@ -1757,12 +1753,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|
|
if (enable_ept)
|
|
|
ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
|
|
|
|
|
|
+ if (!vcpu->fpu_active)
|
|
|
+ hw_cr0 |= X86_CR0_TS;
|
|
|
+
|
|
|
vmcs_writel(CR0_READ_SHADOW, cr0);
|
|
|
vmcs_writel(GUEST_CR0, hw_cr0);
|
|
|
vcpu->arch.cr0 = cr0;
|
|
|
-
|
|
|
- if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
|
|
|
- vmx_fpu_activate(vcpu);
|
|
|
}
|
|
|
|
|
|
static u64 construct_eptp(unsigned long root_hpa)
|
|
@@ -1793,8 +1789,6 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
|
|
|
|
|
|
vmx_flush_tlb(vcpu);
|
|
|
vmcs_writel(GUEST_CR3, guest_cr3);
|
|
|
- if (kvm_read_cr0_bits(vcpu, X86_CR0_PE))
|
|
|
- vmx_fpu_deactivate(vcpu);
|
|
|
}
|
|
|
|
|
|
static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|
@@ -3002,11 +2996,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
|
|
|
};
|
|
|
break;
|
|
|
case 2: /* clts */
|
|
|
- vmx_fpu_deactivate(vcpu);
|
|
|
vcpu->arch.cr0 &= ~X86_CR0_TS;
|
|
|
vmcs_writel(CR0_READ_SHADOW, kvm_read_cr0(vcpu));
|
|
|
trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
|
|
|
- vmx_fpu_activate(vcpu);
|
|
|
skip_emulated_instruction(vcpu);
|
|
|
return 1;
|
|
|
case 1: /*mov from cr*/
|
|
@@ -4127,6 +4119,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|
|
.cache_reg = vmx_cache_reg,
|
|
|
.get_rflags = vmx_get_rflags,
|
|
|
.set_rflags = vmx_set_rflags,
|
|
|
+ .fpu_deactivate = vmx_fpu_deactivate,
|
|
|
|
|
|
.tlb_flush = vmx_flush_tlb,
|
|
|
|