|
@@ -1046,6 +1046,11 @@ static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
|
|
|
return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
|
|
|
}
|
|
|
|
|
|
+static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
|
|
|
+{
|
|
|
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
|
|
|
+}
|
|
|
+
|
|
|
static inline bool is_exception(u32 intr_info)
|
|
|
{
|
|
|
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
|
|
@@ -7367,6 +7372,33 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
|
|
|
vmcs12->guest_physical_address = fault->address;
|
|
|
}
|
|
|
|
|
|
+/* Callbacks for nested_ept_init_mmu_context: */
|
|
|
+
|
|
|
+static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ /* return the page table to be shadowed - in our case, EPT12 */
|
|
|
+ return get_vmcs12(vcpu)->ept_pointer;
|
|
|
+}
|
|
|
+
|
|
|
+static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
|
|
|
+ nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
|
|
|
+
|
|
|
+ vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
|
|
|
+ vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
|
|
|
+ vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
|
|
|
+
|
|
|
+ vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
|
|
|
+
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ vcpu->arch.walk_mmu = &vcpu->arch.mmu;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
|
|
|
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
|
|
@@ -7587,6 +7619,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|
|
vmx_flush_tlb(vcpu);
|
|
|
}
|
|
|
|
|
|
+ if (nested_cpu_has_ept(vmcs12)) {
|
|
|
+ kvm_mmu_unload(vcpu);
|
|
|
+ nested_ept_init_mmu_context(vcpu);
|
|
|
+ }
|
|
|
+
|
|
|
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
|
|
|
vcpu->arch.efer = vmcs12->guest_ia32_efer;
|
|
|
else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
|
|
@@ -8059,7 +8096,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
|
|
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
|
|
|
kvm_set_cr4(vcpu, vmcs12->host_cr4);
|
|
|
|
|
|
- /* shadow page tables on either EPT or shadow page tables */
|
|
|
+ if (nested_cpu_has_ept(vmcs12))
|
|
|
+ nested_ept_uninit_mmu_context(vcpu);
|
|
|
+
|
|
|
kvm_set_cr3(vcpu, vmcs12->host_cr3);
|
|
|
kvm_mmu_reset_context(vcpu);
|
|
|
|