|
@@ -176,6 +176,8 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static int init_rmode(struct kvm *kvm);
|
|
static int init_rmode(struct kvm *kvm);
|
|
static u64 construct_eptp(unsigned long root_hpa);
|
|
static u64 construct_eptp(unsigned long root_hpa);
|
|
|
|
+static void kvm_cpu_vmxon(u64 addr);
|
|
|
|
+static void kvm_cpu_vmxoff(void);
|
|
|
|
|
|
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
|
|
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
|
|
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
|
|
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
|
|
@@ -847,8 +849,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
{
|
|
{
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
u64 tsc_this, delta, new_offset;
|
|
u64 tsc_this, delta, new_offset;
|
|
|
|
+ u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
|
|
|
|
|
|
- if (vmm_exclusive && vcpu->cpu != cpu)
|
|
|
|
|
|
+ if (!vmm_exclusive)
|
|
|
|
+ kvm_cpu_vmxon(phys_addr);
|
|
|
|
+ else if (vcpu->cpu != cpu)
|
|
vcpu_clear(vmx);
|
|
vcpu_clear(vmx);
|
|
|
|
|
|
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
|
|
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
|
|
@@ -894,8 +899,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
|
|
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
__vmx_load_host_state(to_vmx(vcpu));
|
|
__vmx_load_host_state(to_vmx(vcpu));
|
|
- if (!vmm_exclusive)
|
|
|
|
|
|
+ if (!vmm_exclusive) {
|
|
__vcpu_clear(to_vmx(vcpu));
|
|
__vcpu_clear(to_vmx(vcpu));
|
|
|
|
+ kvm_cpu_vmxoff();
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
|
|
static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
|
|
@@ -1327,9 +1334,11 @@ static int hardware_enable(void *garbage)
|
|
wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
|
|
wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
|
|
}
|
|
}
|
|
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
|
|
write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
|
|
- kvm_cpu_vmxon(phys_addr);
|
|
|
|
|
|
|
|
- ept_sync_global();
|
|
|
|
|
|
+ if (vmm_exclusive) {
|
|
|
|
+ kvm_cpu_vmxon(phys_addr);
|
|
|
|
+ ept_sync_global();
|
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1355,8 +1364,10 @@ static void kvm_cpu_vmxoff(void)
|
|
|
|
|
|
static void hardware_disable(void *garbage)
|
|
static void hardware_disable(void *garbage)
|
|
{
|
|
{
|
|
- vmclear_local_vcpus();
|
|
|
|
- kvm_cpu_vmxoff();
|
|
|
|
|
|
+ if (vmm_exclusive) {
|
|
|
|
+ vmclear_local_vcpus();
|
|
|
|
+ kvm_cpu_vmxoff();
|
|
|
|
+ }
|
|
write_cr4(read_cr4() & ~X86_CR4_VMXE);
|
|
write_cr4(read_cr4() & ~X86_CR4_VMXE);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3991,6 +4002,19 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
|
kmem_cache_free(kvm_vcpu_cache, vmx);
|
|
kmem_cache_free(kvm_vcpu_cache, vmx);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline void vmcs_init(struct vmcs *vmcs)
|
|
|
|
+{
|
|
|
|
+ u64 phys_addr = __pa(per_cpu(vmxarea, raw_smp_processor_id()));
|
|
|
|
+
|
|
|
|
+ if (!vmm_exclusive)
|
|
|
|
+ kvm_cpu_vmxon(phys_addr);
|
|
|
|
+
|
|
|
|
+ vmcs_clear(vmcs);
|
|
|
|
+
|
|
|
|
+ if (!vmm_exclusive)
|
|
|
|
+ kvm_cpu_vmxoff();
|
|
|
|
+}
|
|
|
|
+
|
|
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|
{
|
|
{
|
|
int err;
|
|
int err;
|
|
@@ -4016,7 +4040,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|
if (!vmx->vmcs)
|
|
if (!vmx->vmcs)
|
|
goto free_msrs;
|
|
goto free_msrs;
|
|
|
|
|
|
- vmcs_clear(vmx->vmcs);
|
|
|
|
|
|
+ vmcs_init(vmx->vmcs);
|
|
|
|
|
|
cpu = get_cpu();
|
|
cpu = get_cpu();
|
|
vmx_vcpu_load(&vmx->vcpu, cpu);
|
|
vmx_vcpu_load(&vmx->vcpu, cpu);
|