|
@@ -3184,6 +3184,10 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
|
|
|
kvm_desct->base |= seg_desc->base2 << 24;
|
|
|
kvm_desct->limit = seg_desc->limit0;
|
|
|
kvm_desct->limit |= seg_desc->limit << 16;
|
|
|
+ if (seg_desc->g) {
|
|
|
+ kvm_desct->limit <<= 12;
|
|
|
+ kvm_desct->limit |= 0xfff;
|
|
|
+ }
|
|
|
kvm_desct->selector = selector;
|
|
|
kvm_desct->type = seg_desc->type;
|
|
|
kvm_desct->present = seg_desc->p;
|
|
@@ -3223,6 +3227,7 @@ static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
|
|
|
static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
|
|
struct desc_struct *seg_desc)
|
|
|
{
|
|
|
+ gpa_t gpa;
|
|
|
struct descriptor_table dtable;
|
|
|
u16 index = selector >> 3;
|
|
|
|
|
@@ -3232,13 +3237,16 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
|
|
kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
|
|
|
return 1;
|
|
|
}
|
|
|
- return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
|
|
|
+ gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
|
|
|
+ gpa += index * 8;
|
|
|
+ return kvm_read_guest(vcpu->kvm, gpa, seg_desc, 8);
|
|
|
}
|
|
|
|
|
|
/* allowed just for 8 bytes segments */
|
|
|
static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
|
|
struct desc_struct *seg_desc)
|
|
|
{
|
|
|
+ gpa_t gpa;
|
|
|
struct descriptor_table dtable;
|
|
|
u16 index = selector >> 3;
|
|
|
|
|
@@ -3246,7 +3254,9 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
|
|
|
|
|
|
if (dtable.limit < index * 8 + 7)
|
|
|
return 1;
|
|
|
- return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
|
|
|
+ gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, dtable.base);
|
|
|
+ gpa += index * 8;
|
|
|
+ return kvm_write_guest(vcpu->kvm, gpa, seg_desc, 8);
|
|
|
}
|
|
|
|
|
|
static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
|
|
@@ -3258,55 +3268,7 @@ static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
|
|
|
base_addr |= (seg_desc->base1 << 16);
|
|
|
base_addr |= (seg_desc->base2 << 24);
|
|
|
|
|
|
- return base_addr;
|
|
|
-}
|
|
|
-
|
|
|
-static int load_tss_segment32(struct kvm_vcpu *vcpu,
|
|
|
- struct desc_struct *seg_desc,
|
|
|
- struct tss_segment_32 *tss)
|
|
|
-{
|
|
|
- u32 base_addr;
|
|
|
-
|
|
|
- base_addr = get_tss_base_addr(vcpu, seg_desc);
|
|
|
-
|
|
|
- return kvm_read_guest(vcpu->kvm, base_addr, tss,
|
|
|
- sizeof(struct tss_segment_32));
|
|
|
-}
|
|
|
-
|
|
|
-static int save_tss_segment32(struct kvm_vcpu *vcpu,
|
|
|
- struct desc_struct *seg_desc,
|
|
|
- struct tss_segment_32 *tss)
|
|
|
-{
|
|
|
- u32 base_addr;
|
|
|
-
|
|
|
- base_addr = get_tss_base_addr(vcpu, seg_desc);
|
|
|
-
|
|
|
- return kvm_write_guest(vcpu->kvm, base_addr, tss,
|
|
|
- sizeof(struct tss_segment_32));
|
|
|
-}
|
|
|
-
|
|
|
-static int load_tss_segment16(struct kvm_vcpu *vcpu,
|
|
|
- struct desc_struct *seg_desc,
|
|
|
- struct tss_segment_16 *tss)
|
|
|
-{
|
|
|
- u32 base_addr;
|
|
|
-
|
|
|
- base_addr = get_tss_base_addr(vcpu, seg_desc);
|
|
|
-
|
|
|
- return kvm_read_guest(vcpu->kvm, base_addr, tss,
|
|
|
- sizeof(struct tss_segment_16));
|
|
|
-}
|
|
|
-
|
|
|
-static int save_tss_segment16(struct kvm_vcpu *vcpu,
|
|
|
- struct desc_struct *seg_desc,
|
|
|
- struct tss_segment_16 *tss)
|
|
|
-{
|
|
|
- u32 base_addr;
|
|
|
-
|
|
|
- base_addr = get_tss_base_addr(vcpu, seg_desc);
|
|
|
-
|
|
|
- return kvm_write_guest(vcpu->kvm, base_addr, tss,
|
|
|
- sizeof(struct tss_segment_16));
|
|
|
+ return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
|
|
|
}
|
|
|
|
|
|
static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
|
|
@@ -3466,20 +3428,26 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
|
|
|
}
|
|
|
|
|
|
static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
|
|
|
- struct desc_struct *cseg_desc,
|
|
|
+ u32 old_tss_base,
|
|
|
struct desc_struct *nseg_desc)
|
|
|
{
|
|
|
struct tss_segment_16 tss_segment_16;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
|
|
|
+ if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
|
|
|
+ sizeof tss_segment_16))
|
|
|
goto out;
|
|
|
|
|
|
save_state_to_tss16(vcpu, &tss_segment_16);
|
|
|
- save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
|
|
|
|
|
|
- if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
|
|
|
+ if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_16,
|
|
|
+ sizeof tss_segment_16))
|
|
|
goto out;
|
|
|
+
|
|
|
+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
|
|
|
+ &tss_segment_16, sizeof tss_segment_16))
|
|
|
+ goto out;
|
|
|
+
|
|
|
if (load_state_from_tss16(vcpu, &tss_segment_16))
|
|
|
goto out;
|
|
|
|
|
@@ -3489,20 +3457,26 @@ out:
|
|
|
}
|
|
|
|
|
|
static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
|
|
|
- struct desc_struct *cseg_desc,
|
|
|
+ u32 old_tss_base,
|
|
|
struct desc_struct *nseg_desc)
|
|
|
{
|
|
|
struct tss_segment_32 tss_segment_32;
|
|
|
int ret = 0;
|
|
|
|
|
|
- if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
|
|
|
+ if (kvm_read_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
|
|
|
+ sizeof tss_segment_32))
|
|
|
goto out;
|
|
|
|
|
|
save_state_to_tss32(vcpu, &tss_segment_32);
|
|
|
- save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
|
|
|
|
|
|
- if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
|
|
|
+ if (kvm_write_guest(vcpu->kvm, old_tss_base, &tss_segment_32,
|
|
|
+ sizeof tss_segment_32))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
|
|
|
+ &tss_segment_32, sizeof tss_segment_32))
|
|
|
goto out;
|
|
|
+
|
|
|
if (load_state_from_tss32(vcpu, &tss_segment_32))
|
|
|
goto out;
|
|
|
|
|
@@ -3517,16 +3491,20 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
|
|
|
struct desc_struct cseg_desc;
|
|
|
struct desc_struct nseg_desc;
|
|
|
int ret = 0;
|
|
|
+ u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
|
|
|
+ u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
|
|
|
|
|
|
- kvm_get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
|
|
|
+ old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
|
|
|
|
|
|
+ /* FIXME: Handle errors. Failure to read either TSS or their
|
|
|
+ * descriptors should generate a pagefault.
|
|
|
+ */
|
|
|
if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
|
|
|
goto out;
|
|
|
|
|
|
- if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
|
|
|
+ if (load_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc))
|
|
|
goto out;
|
|
|
|
|
|
-
|
|
|
if (reason != TASK_SWITCH_IRET) {
|
|
|
int cpl;
|
|
|
|
|
@@ -3544,8 +3522,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
|
|
|
|
|
|
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
|
|
|
cseg_desc.type &= ~(1 << 1); //clear the B flag
|
|
|
- save_guest_segment_descriptor(vcpu, tr_seg.selector,
|
|
|
- &cseg_desc);
|
|
|
+ save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
|
|
|
}
|
|
|
|
|
|
if (reason == TASK_SWITCH_IRET) {
|
|
@@ -3557,10 +3534,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
|
|
|
kvm_x86_ops->cache_regs(vcpu);
|
|
|
|
|
|
if (nseg_desc.type & 8)
|
|
|
- ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
|
|
|
+ ret = kvm_task_switch_32(vcpu, tss_selector, old_tss_base,
|
|
|
&nseg_desc);
|
|
|
else
|
|
|
- ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
|
|
|
+ ret = kvm_task_switch_16(vcpu, tss_selector, old_tss_base,
|
|
|
&nseg_desc);
|
|
|
|
|
|
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
|