|
@@ -1335,14 +1335,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|
|
vcpu->arch.cr4 = cr4;
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
-
|
|
|
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
|
|
|
|
|
|
vcpu->arch.shadow_efer = efer;
|
|
|
+ if (!msr)
|
|
|
+ return;
|
|
|
if (efer & EFER_LMA) {
|
|
|
vmcs_write32(VM_ENTRY_CONTROLS,
|
|
|
vmcs_read32(VM_ENTRY_CONTROLS) |
|
|
@@ -1359,8 +1359,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|
|
setup_msrs(vmx);
|
|
|
}
|
|
|
|
|
|
-#endif
|
|
|
-
|
|
|
static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
|
|
|
{
|
|
|
struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
|
|
@@ -1775,9 +1773,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
|
|
|
vmx->vcpu.arch.cr0 = 0x60000010;
|
|
|
vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
|
|
|
vmx_set_cr4(&vmx->vcpu, 0);
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
vmx_set_efer(&vmx->vcpu, 0);
|
|
|
-#endif
|
|
|
vmx_fpu_activate(&vmx->vcpu);
|
|
|
update_exception_bitmap(&vmx->vcpu);
|
|
|
|
|
@@ -2668,9 +2664,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|
|
.set_cr0 = vmx_set_cr0,
|
|
|
.set_cr3 = vmx_set_cr3,
|
|
|
.set_cr4 = vmx_set_cr4,
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
.set_efer = vmx_set_efer,
|
|
|
-#endif
|
|
|
.get_idt = vmx_get_idt,
|
|
|
.set_idt = vmx_set_idt,
|
|
|
.get_gdt = vmx_get_gdt,
|