|
@@ -618,6 +618,10 @@ static void kvm_cpu_vmxon(u64 addr);
|
|
|
static void kvm_cpu_vmxoff(void);
|
|
|
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
|
|
|
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
|
|
|
+static void vmx_set_segment(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_segment *var, int seg);
|
|
|
+static void vmx_get_segment(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_segment *var, int seg);
|
|
|
|
|
|
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
|
|
|
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
|
|
@@ -2782,6 +2786,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
+ struct kvm_segment var;
|
|
|
|
|
|
if (enable_unrestricted_guest)
|
|
|
return;
|
|
@@ -2825,20 +2830,23 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
|
|
|
if (emulate_invalid_guest_state)
|
|
|
goto continue_rmode;
|
|
|
|
|
|
- vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
|
|
|
- vmcs_write32(GUEST_SS_LIMIT, 0xffff);
|
|
|
- vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
|
|
|
+ vmx_get_segment(vcpu, &var, VCPU_SREG_SS);
|
|
|
+ vmx_set_segment(vcpu, &var, VCPU_SREG_SS);
|
|
|
+
|
|
|
+ vmx_get_segment(vcpu, &var, VCPU_SREG_CS);
|
|
|
+ vmx_set_segment(vcpu, &var, VCPU_SREG_CS);
|
|
|
+
|
|
|
+ vmx_get_segment(vcpu, &var, VCPU_SREG_ES);
|
|
|
+ vmx_set_segment(vcpu, &var, VCPU_SREG_ES);
|
|
|
|
|
|
- vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
|
|
|
- vmcs_write32(GUEST_CS_LIMIT, 0xffff);
|
|
|
- if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
|
|
|
- vmcs_writel(GUEST_CS_BASE, 0xf0000);
|
|
|
- vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
|
|
|
+ vmx_get_segment(vcpu, &var, VCPU_SREG_DS);
|
|
|
+ vmx_set_segment(vcpu, &var, VCPU_SREG_DS);
|
|
|
|
|
|
- fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
|
|
|
- fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
|
|
|
- fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
|
|
|
- fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
|
|
|
+ vmx_get_segment(vcpu, &var, VCPU_SREG_GS);
|
|
|
+ vmx_set_segment(vcpu, &var, VCPU_SREG_GS);
|
|
|
+
|
|
|
+ vmx_get_segment(vcpu, &var, VCPU_SREG_FS);
|
|
|
+ vmx_set_segment(vcpu, &var, VCPU_SREG_FS);
|
|
|
|
|
|
continue_rmode:
|
|
|
kvm_mmu_reset_context(vcpu);
|
|
@@ -3243,6 +3251,44 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
vmcs_write32(sf->ar_bytes, ar);
|
|
|
__clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Fix segments for real mode guest in hosts that don't have
|
|
|
+ * "unrestricted_mode" or it was disabled.
|
|
|
+ * This is done to allow migration of the guests from hosts with
|
|
|
+ * unrestricted guest like Westmere to older host that don't have
|
|
|
+ * unrestricted guest like Nehelem.
|
|
|
+ */
|
|
|
+ if (!enable_unrestricted_guest && vmx->rmode.vm86_active) {
|
|
|
+ switch (seg) {
|
|
|
+ case VCPU_SREG_CS:
|
|
|
+ vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
|
|
|
+ vmcs_write32(GUEST_CS_LIMIT, 0xffff);
|
|
|
+ if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
|
|
|
+ vmcs_writel(GUEST_CS_BASE, 0xf0000);
|
|
|
+ vmcs_write16(GUEST_CS_SELECTOR,
|
|
|
+ vmcs_readl(GUEST_CS_BASE) >> 4);
|
|
|
+ break;
|
|
|
+ case VCPU_SREG_ES:
|
|
|
+ fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
|
|
|
+ break;
|
|
|
+ case VCPU_SREG_DS:
|
|
|
+ fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
|
|
|
+ break;
|
|
|
+ case VCPU_SREG_GS:
|
|
|
+ fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
|
|
|
+ break;
|
|
|
+ case VCPU_SREG_FS:
|
|
|
+ fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
|
|
|
+ break;
|
|
|
+ case VCPU_SREG_SS:
|
|
|
+ vmcs_write16(GUEST_SS_SELECTOR,
|
|
|
+ vmcs_readl(GUEST_SS_BASE) >> 4);
|
|
|
+ vmcs_write32(GUEST_SS_LIMIT, 0xffff);
|
|
|
+ vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
|