|
@@ -1696,7 +1696,6 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
|
|
|
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|
|
{
|
|
|
__set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
|
|
|
- __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
|
|
|
to_vmx(vcpu)->rflags = rflags;
|
|
|
if (to_vmx(vcpu)->rmode.vm86_active) {
|
|
|
to_vmx(vcpu)->rmode.save_rflags = rflags;
|
|
@@ -3110,7 +3109,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|
|
vmcs_writel(CR0_READ_SHADOW, cr0);
|
|
|
vmcs_writel(GUEST_CR0, hw_cr0);
|
|
|
vcpu->arch.cr0 = cr0;
|
|
|
- __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
|
|
|
}
|
|
|
|
|
|
static u64 construct_eptp(unsigned long root_hpa)
|
|
@@ -3220,8 +3218,10 @@ static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
|
|
|
return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
|
|
|
}
|
|
|
|
|
|
-static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
|
|
|
+static int vmx_get_cpl(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
+
|
|
|
if (!is_protmode(vcpu))
|
|
|
return 0;
|
|
|
|
|
@@ -3229,13 +3229,6 @@ static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
|
|
|
&& (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
|
|
|
return 3;
|
|
|
|
|
|
- return vmx_read_guest_seg_selector(to_vmx(vcpu), VCPU_SREG_CS) & 3;
|
|
|
-}
|
|
|
-
|
|
|
-static int vmx_get_cpl(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
-
|
|
|
/*
|
|
|
* If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations
|
|
|
* fail; use the cache instead.
|
|
@@ -3246,7 +3239,7 @@ static int vmx_get_cpl(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
|
|
|
__set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
|
|
|
- vmx->cpl = __vmx_get_cpl(vcpu);
|
|
|
+ vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3;
|
|
|
}
|
|
|
|
|
|
return vmx->cpl;
|