|
@@ -52,6 +52,7 @@
|
|
|
#include <asm/desc.h>
|
|
|
#include <asm/mtrr.h>
|
|
|
#include <asm/mce.h>
|
|
|
+#include <asm/i387.h>
|
|
|
|
|
|
#define MAX_IO_MSRS 256
|
|
|
#define CR0_RESERVED_BITS \
|
|
@@ -5134,21 +5135,10 @@ void fx_init(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
unsigned after_mxcsr_mask;
|
|
|
|
|
|
- /*
|
|
|
- * Touch the fpu the first time in non atomic context as if
|
|
|
- * this is the first fpu instruction the exception handler
|
|
|
- * will fire before the instruction returns and it'll have to
|
|
|
- * allocate ram with GFP_KERNEL.
|
|
|
- */
|
|
|
- if (!used_math())
|
|
|
- kvm_fx_save(&vcpu->arch.host_fx_image);
|
|
|
-
|
|
|
/* Initialize guest FPU by resetting ours and saving into guest's */
|
|
|
preempt_disable();
|
|
|
- kvm_fx_save(&vcpu->arch.host_fx_image);
|
|
|
kvm_fx_finit();
|
|
|
kvm_fx_save(&vcpu->arch.guest_fx_image);
|
|
|
- kvm_fx_restore(&vcpu->arch.host_fx_image);
|
|
|
preempt_enable();
|
|
|
|
|
|
vcpu->arch.cr0 |= X86_CR0_ET;
|
|
@@ -5165,7 +5155,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|
|
return;
|
|
|
|
|
|
vcpu->guest_fpu_loaded = 1;
|
|
|
- kvm_fx_save(&vcpu->arch.host_fx_image);
|
|
|
+ unlazy_fpu(current);
|
|
|
kvm_fx_restore(&vcpu->arch.guest_fx_image);
|
|
|
trace_kvm_fpu(1);
|
|
|
}
|
|
@@ -5177,7 +5167,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
vcpu->guest_fpu_loaded = 0;
|
|
|
kvm_fx_save(&vcpu->arch.guest_fx_image);
|
|
|
- kvm_fx_restore(&vcpu->arch.host_fx_image);
|
|
|
++vcpu->stat.fpu_reload;
|
|
|
set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
|
|
|
trace_kvm_fpu(0);
|
|
@@ -5203,9 +5192,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
int r;
|
|
|
|
|
|
- /* We do fxsave: this must be aligned. */
|
|
|
- BUG_ON((unsigned long)&vcpu->arch.host_fx_image & 0xF);
|
|
|
-
|
|
|
vcpu->arch.mtrr_state.have_fixed = 1;
|
|
|
vcpu_load(vcpu);
|
|
|
r = kvm_arch_vcpu_reset(vcpu);
|