|
@@ -3118,15 +3118,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
allocate_vpid(vmx);
|
|
|
- if (id == 0 && vm_need_ept()) {
|
|
|
- kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
|
|
|
- VMX_EPT_WRITABLE_MASK |
|
|
|
- VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
|
|
|
- kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
|
|
|
- VMX_EPT_FAKE_DIRTY_MASK, 0ull,
|
|
|
- VMX_EPT_EXECUTABLE_MASK);
|
|
|
- kvm_enable_tdp();
|
|
|
- }
|
|
|
|
|
|
err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
|
|
|
if (err)
|
|
@@ -3305,8 +3296,17 @@ static int __init vmx_init(void)
|
|
|
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
|
|
|
vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
|
|
|
|
|
|
- if (vm_need_ept())
|
|
|
+ if (vm_need_ept()) {
|
|
|
bypass_guest_pf = 0;
|
|
|
+ kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
|
|
|
+ VMX_EPT_WRITABLE_MASK |
|
|
|
+ VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
|
|
|
+ kvm_mmu_set_mask_ptes(0ull, VMX_EPT_FAKE_ACCESSED_MASK,
|
|
|
+ VMX_EPT_FAKE_DIRTY_MASK, 0ull,
|
|
|
+ VMX_EPT_EXECUTABLE_MASK);
|
|
|
+ kvm_enable_tdp();
|
|
|
+ } else
|
|
|
+ kvm_disable_tdp();
|
|
|
|
|
|
if (bypass_guest_pf)
|
|
|
kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
|