|
@@ -803,7 +803,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
|
*/
|
|
*/
|
|
vmx->host_state.ldt_sel = kvm_read_ldt();
|
|
vmx->host_state.ldt_sel = kvm_read_ldt();
|
|
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
|
|
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
|
|
- vmx->host_state.fs_sel = kvm_read_fs();
|
|
|
|
|
|
+ savesegment(fs, vmx->host_state.fs_sel);
|
|
if (!(vmx->host_state.fs_sel & 7)) {
|
|
if (!(vmx->host_state.fs_sel & 7)) {
|
|
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
|
|
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
|
|
vmx->host_state.fs_reload_needed = 0;
|
|
vmx->host_state.fs_reload_needed = 0;
|
|
@@ -811,7 +811,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
|
vmcs_write16(HOST_FS_SELECTOR, 0);
|
|
vmcs_write16(HOST_FS_SELECTOR, 0);
|
|
vmx->host_state.fs_reload_needed = 1;
|
|
vmx->host_state.fs_reload_needed = 1;
|
|
}
|
|
}
|
|
- vmx->host_state.gs_sel = kvm_read_gs();
|
|
|
|
|
|
+ savesegment(gs, vmx->host_state.gs_sel);
|
|
if (!(vmx->host_state.gs_sel & 7))
|
|
if (!(vmx->host_state.gs_sel & 7))
|
|
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
|
|
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
|
|
else {
|
|
else {
|
|
@@ -841,27 +841,21 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
|
|
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
if (!vmx->host_state.loaded)
|
|
if (!vmx->host_state.loaded)
|
|
return;
|
|
return;
|
|
|
|
|
|
++vmx->vcpu.stat.host_state_reload;
|
|
++vmx->vcpu.stat.host_state_reload;
|
|
vmx->host_state.loaded = 0;
|
|
vmx->host_state.loaded = 0;
|
|
if (vmx->host_state.fs_reload_needed)
|
|
if (vmx->host_state.fs_reload_needed)
|
|
- kvm_load_fs(vmx->host_state.fs_sel);
|
|
|
|
|
|
+ loadsegment(fs, vmx->host_state.fs_sel);
|
|
if (vmx->host_state.gs_ldt_reload_needed) {
|
|
if (vmx->host_state.gs_ldt_reload_needed) {
|
|
kvm_load_ldt(vmx->host_state.ldt_sel);
|
|
kvm_load_ldt(vmx->host_state.ldt_sel);
|
|
- /*
|
|
|
|
- * If we have to reload gs, we must take care to
|
|
|
|
- * preserve our gs base.
|
|
|
|
- */
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- kvm_load_gs(vmx->host_state.gs_sel);
|
|
|
|
#ifdef CONFIG_X86_64
|
|
#ifdef CONFIG_X86_64
|
|
- wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
|
|
|
|
|
|
+ load_gs_index(vmx->host_state.gs_sel);
|
|
|
|
+ wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
|
|
|
|
+#else
|
|
|
|
+ loadsegment(gs, vmx->host_state.gs_sel);
|
|
#endif
|
|
#endif
|
|
- local_irq_restore(flags);
|
|
|
|
}
|
|
}
|
|
reload_tss();
|
|
reload_tss();
|
|
#ifdef CONFIG_X86_64
|
|
#ifdef CONFIG_X86_64
|
|
@@ -2589,8 +2583,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
|
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
|
|
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
|
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
|
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
|
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
|
- vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
|
|
|
|
- vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
|
|
|
|
|
|
+ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
|
|
|
|
+ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
|
|
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
|
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
|
|
#ifdef CONFIG_X86_64
|
|
#ifdef CONFIG_X86_64
|
|
rdmsrl(MSR_FS_BASE, a);
|
|
rdmsrl(MSR_FS_BASE, a);
|