|
@@ -712,6 +712,7 @@ static void nested_release_page_clean(struct page *page)
|
|
|
kvm_release_page_clean(page);
|
|
|
}
|
|
|
|
|
|
+static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
|
|
|
static u64 construct_eptp(unsigned long root_hpa);
|
|
|
static void kvm_cpu_vmxon(u64 addr);
|
|
|
static void kvm_cpu_vmxoff(void);
|
|
@@ -2161,6 +2162,7 @@ static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
|
|
|
static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
|
|
|
static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
|
|
|
static u32 nested_vmx_misc_low, nested_vmx_misc_high;
|
|
|
+static u32 nested_vmx_ept_caps;
|
|
|
static __init void nested_vmx_setup_ctls_msrs(void)
|
|
|
{
|
|
|
/*
|
|
@@ -6279,6 +6281,74 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+/* Emulate the INVEPT instruction */
|
|
|
+static int handle_invept(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ u32 vmx_instruction_info, types;
|
|
|
+ unsigned long type;
|
|
|
+ gva_t gva;
|
|
|
+ struct x86_exception e;
|
|
|
+ struct {
|
|
|
+ u64 eptp, gpa;
|
|
|
+ } operand;
|
|
|
+ u64 eptp_mask = ((1ull << 51) - 1) & PAGE_MASK;
|
|
|
+
|
|
|
+ if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
|
|
|
+ !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
|
|
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!nested_vmx_check_permission(vcpu))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
|
|
|
+ kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
|
+ type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
|
|
|
+
|
|
|
+ types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
|
|
|
+
|
|
|
+ if (!(types & (1UL << type))) {
|
|
|
+ nested_vmx_failValid(vcpu,
|
|
|
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* According to the Intel VMX instruction reference, the memory
|
|
|
+ * operand is read even if it isn't needed (e.g., for type==global)
|
|
|
+ */
|
|
|
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
|
|
|
+ vmx_instruction_info, &gva))
|
|
|
+ return 1;
|
|
|
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
|
|
|
+ sizeof(operand), &e)) {
|
|
|
+ kvm_inject_page_fault(vcpu, &e);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ switch (type) {
|
|
|
+ case VMX_EPT_EXTENT_CONTEXT:
|
|
|
+ if ((operand.eptp & eptp_mask) !=
|
|
|
+ (nested_ept_get_cr3(vcpu) & eptp_mask))
|
|
|
+ break;
|
|
|
+ case VMX_EPT_EXTENT_GLOBAL:
|
|
|
+ kvm_mmu_sync_roots(vcpu);
|
|
|
+ kvm_mmu_flush_tlb(vcpu);
|
|
|
+ nested_vmx_succeed(vcpu);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ BUG_ON(1);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ skip_emulated_instruction(vcpu);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* The exit handlers return 1 if the exit was handled fully and guest execution
|
|
|
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
|
|
@@ -6323,6 +6393,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
|
|
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
|
|
|
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
|
|
|
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
|
|
|
+ [EXIT_REASON_INVEPT] = handle_invept,
|
|
|
};
|
|
|
|
|
|
static const int kvm_vmx_max_exit_handlers =
|
|
@@ -6549,6 +6620,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
|
|
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
|
|
|
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
|
|
|
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
|
|
|
+ case EXIT_REASON_INVEPT:
|
|
|
/*
|
|
|
* VMX instructions trap unconditionally. This allows L1 to
|
|
|
* emulate them for its L2 guest, i.e., allows 3-level nesting!
|