|
@@ -84,6 +84,9 @@ module_param(vmm_exclusive, bool, S_IRUGO);
|
|
|
static bool __read_mostly fasteoi = 1;
|
|
|
module_param(fasteoi, bool, S_IRUGO);
|
|
|
|
|
|
+static bool __read_mostly enable_apicv_reg_vid = 1;
|
|
|
+module_param(enable_apicv_reg_vid, bool, S_IRUGO);
|
|
|
+
|
|
|
/*
|
|
|
* If nested=1, nested virtualization is supported, i.e., guests may use
|
|
|
* VMX and be a hypervisor for its own guests. If nested=0, guests may not
|
|
@@ -640,6 +643,8 @@ static unsigned long *vmx_io_bitmap_a;
|
|
|
static unsigned long *vmx_io_bitmap_b;
|
|
|
static unsigned long *vmx_msr_bitmap_legacy;
|
|
|
static unsigned long *vmx_msr_bitmap_longmode;
|
|
|
+static unsigned long *vmx_msr_bitmap_legacy_x2apic;
|
|
|
+static unsigned long *vmx_msr_bitmap_longmode_x2apic;
|
|
|
|
|
|
static bool cpu_has_load_ia32_efer;
|
|
|
static bool cpu_has_load_perf_global_ctrl;
|
|
@@ -764,6 +769,24 @@ static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
|
|
}
|
|
|
|
|
|
+static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
|
|
|
+{
|
|
|
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
|
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
|
|
|
+}
|
|
|
+
|
|
|
+static inline bool cpu_has_vmx_apic_register_virt(void)
|
|
|
+{
|
|
|
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
|
+ SECONDARY_EXEC_APIC_REGISTER_VIRT;
|
|
|
+}
|
|
|
+
|
|
|
+static inline bool cpu_has_vmx_virtual_intr_delivery(void)
|
|
|
+{
|
|
|
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
|
|
|
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
|
|
|
+}
|
|
|
+
|
|
|
static inline bool cpu_has_vmx_flexpriority(void)
|
|
|
{
|
|
|
return cpu_has_vmx_tpr_shadow() &&
|
|
@@ -1821,6 +1844,25 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
|
|
|
vmx->guest_msrs[from] = tmp;
|
|
|
}
|
|
|
|
|
|
+static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ unsigned long *msr_bitmap;
|
|
|
+
|
|
|
+ if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
|
|
|
+ if (is_long_mode(vcpu))
|
|
|
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
|
|
|
+ else
|
|
|
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
|
|
|
+ } else {
|
|
|
+ if (is_long_mode(vcpu))
|
|
|
+ msr_bitmap = vmx_msr_bitmap_longmode;
|
|
|
+ else
|
|
|
+ msr_bitmap = vmx_msr_bitmap_legacy;
|
|
|
+ }
|
|
|
+
|
|
|
+ vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Set up the vmcs to automatically save and restore system
|
|
|
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
|
|
@@ -1829,7 +1871,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
|
|
|
static void setup_msrs(struct vcpu_vmx *vmx)
|
|
|
{
|
|
|
int save_nmsrs, index;
|
|
|
- unsigned long *msr_bitmap;
|
|
|
|
|
|
save_nmsrs = 0;
|
|
|
#ifdef CONFIG_X86_64
|
|
@@ -1861,14 +1902,8 @@ static void setup_msrs(struct vcpu_vmx *vmx)
|
|
|
|
|
|
vmx->save_nmsrs = save_nmsrs;
|
|
|
|
|
|
- if (cpu_has_vmx_msr_bitmap()) {
|
|
|
- if (is_long_mode(&vmx->vcpu))
|
|
|
- msr_bitmap = vmx_msr_bitmap_longmode;
|
|
|
- else
|
|
|
- msr_bitmap = vmx_msr_bitmap_legacy;
|
|
|
-
|
|
|
- vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
|
|
|
- }
|
|
|
+ if (cpu_has_vmx_msr_bitmap())
|
|
|
+ vmx_set_msr_bitmap(&vmx->vcpu);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2534,13 +2569,16 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
|
|
|
if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
|
|
|
min2 = 0;
|
|
|
opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
SECONDARY_EXEC_WBINVD_EXITING |
|
|
|
SECONDARY_EXEC_ENABLE_VPID |
|
|
|
SECONDARY_EXEC_ENABLE_EPT |
|
|
|
SECONDARY_EXEC_UNRESTRICTED_GUEST |
|
|
|
SECONDARY_EXEC_PAUSE_LOOP_EXITING |
|
|
|
SECONDARY_EXEC_RDTSCP |
|
|
|
- SECONDARY_EXEC_ENABLE_INVPCID;
|
|
|
+ SECONDARY_EXEC_ENABLE_INVPCID |
|
|
|
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
|
|
|
if (adjust_vmx_controls(min2, opt2,
|
|
|
MSR_IA32_VMX_PROCBASED_CTLS2,
|
|
|
&_cpu_based_2nd_exec_control) < 0)
|
|
@@ -2551,6 +2589,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
|
|
|
_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
|
|
|
#endif
|
|
|
+
|
|
|
+ if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
|
|
|
+ _cpu_based_2nd_exec_control &= ~(
|
|
|
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
|
|
|
+
|
|
|
if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
|
|
|
/* CR3 accesses and invlpg don't need to cause VM Exits when EPT
|
|
|
enabled */
|
|
@@ -2748,6 +2793,15 @@ static __init int hardware_setup(void)
|
|
|
if (!cpu_has_vmx_ple())
|
|
|
ple_gap = 0;
|
|
|
|
|
|
+ if (!cpu_has_vmx_apic_register_virt() ||
|
|
|
+ !cpu_has_vmx_virtual_intr_delivery())
|
|
|
+ enable_apicv_reg_vid = 0;
|
|
|
+
|
|
|
+ if (enable_apicv_reg_vid)
|
|
|
+ kvm_x86_ops->update_cr8_intercept = NULL;
|
|
|
+ else
|
|
|
+ kvm_x86_ops->hwapic_irr_update = NULL;
|
|
|
+
|
|
|
if (nested)
|
|
|
nested_vmx_setup_ctls_msrs();
|
|
|
|
|
@@ -3173,6 +3227,14 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
|
|
if (!is_paging(vcpu)) {
|
|
|
hw_cr4 &= ~X86_CR4_PAE;
|
|
|
hw_cr4 |= X86_CR4_PSE;
|
|
|
+ /*
|
|
|
+ * SMEP is disabled if CPU is in non-paging mode in
|
|
|
+ * hardware. However KVM always uses paging mode to
|
|
|
+ * emulate guest non-paging mode with TDP.
|
|
|
+ * To emulate this behavior, SMEP needs to be manually
|
|
|
+ * disabled when guest switches to non-paging mode.
|
|
|
+ */
|
|
|
+ hw_cr4 &= ~X86_CR4_SMEP;
|
|
|
} else if (!(cr4 & X86_CR4_PAE)) {
|
|
|
hw_cr4 &= ~X86_CR4_PAE;
|
|
|
}
|
|
@@ -3707,7 +3769,10 @@ static void free_vpid(struct vcpu_vmx *vmx)
|
|
|
spin_unlock(&vmx_vpid_lock);
|
|
|
}
|
|
|
|
|
|
-static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
|
|
|
+#define MSR_TYPE_R 1
|
|
|
+#define MSR_TYPE_W 2
|
|
|
+static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
|
|
|
+ u32 msr, int type)
|
|
|
{
|
|
|
int f = sizeof(unsigned long);
|
|
|
|
|
@@ -3720,20 +3785,93 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
|
|
|
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
|
|
|
*/
|
|
|
if (msr <= 0x1fff) {
|
|
|
- __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
|
|
|
- __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
|
|
|
+ if (type & MSR_TYPE_R)
|
|
|
+ /* read-low */
|
|
|
+ __clear_bit(msr, msr_bitmap + 0x000 / f);
|
|
|
+
|
|
|
+ if (type & MSR_TYPE_W)
|
|
|
+ /* write-low */
|
|
|
+ __clear_bit(msr, msr_bitmap + 0x800 / f);
|
|
|
+
|
|
|
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
|
|
|
msr &= 0x1fff;
|
|
|
- __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
|
|
|
- __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
|
|
|
+ if (type & MSR_TYPE_R)
|
|
|
+ /* read-high */
|
|
|
+ __clear_bit(msr, msr_bitmap + 0x400 / f);
|
|
|
+
|
|
|
+ if (type & MSR_TYPE_W)
|
|
|
+ /* write-high */
|
|
|
+ __clear_bit(msr, msr_bitmap + 0xc00 / f);
|
|
|
+
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
|
|
|
+ u32 msr, int type)
|
|
|
+{
|
|
|
+ int f = sizeof(unsigned long);
|
|
|
+
|
|
|
+ if (!cpu_has_vmx_msr_bitmap())
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
|
|
|
+ * have the write-low and read-high bitmap offsets the wrong way round.
|
|
|
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
|
|
|
+ */
|
|
|
+ if (msr <= 0x1fff) {
|
|
|
+ if (type & MSR_TYPE_R)
|
|
|
+ /* read-low */
|
|
|
+ __set_bit(msr, msr_bitmap + 0x000 / f);
|
|
|
+
|
|
|
+ if (type & MSR_TYPE_W)
|
|
|
+ /* write-low */
|
|
|
+ __set_bit(msr, msr_bitmap + 0x800 / f);
|
|
|
+
|
|
|
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
|
|
|
+ msr &= 0x1fff;
|
|
|
+ if (type & MSR_TYPE_R)
|
|
|
+ /* read-high */
|
|
|
+ __set_bit(msr, msr_bitmap + 0x400 / f);
|
|
|
+
|
|
|
+ if (type & MSR_TYPE_W)
|
|
|
+ /* write-high */
|
|
|
+ __set_bit(msr, msr_bitmap + 0xc00 / f);
|
|
|
+
|
|
|
}
|
|
|
}
|
|
|
|
|
|
static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
|
|
|
{
|
|
|
if (!longmode_only)
|
|
|
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
|
|
|
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
|
|
|
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
|
|
|
+ msr, MSR_TYPE_R | MSR_TYPE_W);
|
|
|
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
|
|
|
+ msr, MSR_TYPE_R | MSR_TYPE_W);
|
|
|
+}
|
|
|
+
|
|
|
+static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
|
|
|
+{
|
|
|
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
|
|
|
+ msr, MSR_TYPE_R);
|
|
|
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
|
|
|
+ msr, MSR_TYPE_R);
|
|
|
+}
|
|
|
+
|
|
|
+static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
|
|
|
+{
|
|
|
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
|
|
|
+ msr, MSR_TYPE_R);
|
|
|
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
|
|
|
+ msr, MSR_TYPE_R);
|
|
|
+}
|
|
|
+
|
|
|
+static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
|
|
|
+{
|
|
|
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
|
|
|
+ msr, MSR_TYPE_W);
|
|
|
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
|
|
|
+ msr, MSR_TYPE_W);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -3812,6 +3950,11 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
|
|
|
return exec_control;
|
|
|
}
|
|
|
|
|
|
+static int vmx_vm_has_apicv(struct kvm *kvm)
|
|
|
+{
|
|
|
+ return enable_apicv_reg_vid && irqchip_in_kernel(kvm);
|
|
|
+}
|
|
|
+
|
|
|
static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
|
|
|
{
|
|
|
u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
|
|
@@ -3829,6 +3972,10 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
|
|
|
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
|
|
|
if (!ple_gap)
|
|
|
exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
|
|
|
+ if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
|
|
|
+ exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
|
|
|
+ exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
|
|
|
return exec_control;
|
|
|
}
|
|
|
|
|
@@ -3873,6 +4020,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|
|
vmx_secondary_exec_control(vmx));
|
|
|
}
|
|
|
|
|
|
+ if (enable_apicv_reg_vid) {
|
|
|
+ vmcs_write64(EOI_EXIT_BITMAP0, 0);
|
|
|
+ vmcs_write64(EOI_EXIT_BITMAP1, 0);
|
|
|
+ vmcs_write64(EOI_EXIT_BITMAP2, 0);
|
|
|
+ vmcs_write64(EOI_EXIT_BITMAP3, 0);
|
|
|
+
|
|
|
+ vmcs_write16(GUEST_INTR_STATUS, 0);
|
|
|
+ }
|
|
|
+
|
|
|
if (ple_gap) {
|
|
|
vmcs_write32(PLE_GAP, ple_gap);
|
|
|
vmcs_write32(PLE_WINDOW, ple_window);
|
|
@@ -4787,6 +4943,26 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
|
|
|
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
+static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
+ int vector = exit_qualification & 0xff;
|
|
|
+
|
|
|
+ /* EOI-induced VM exit is trap-like and thus no need to adjust IP */
|
|
|
+ kvm_apic_set_eoi_accelerated(vcpu, vector);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int handle_apic_write(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
+ u32 offset = exit_qualification & 0xfff;
|
|
|
+
|
|
|
+ /* APIC-write VM exit is trap-like and thus no need to adjust IP */
|
|
|
+ kvm_apic_write_nodecode(vcpu, offset);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
static int handle_task_switch(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
@@ -5721,6 +5897,8 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
|
|
[EXIT_REASON_VMON] = handle_vmon,
|
|
|
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
|
|
|
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
|
|
|
+ [EXIT_REASON_APIC_WRITE] = handle_apic_write,
|
|
|
+ [EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
|
|
|
[EXIT_REASON_WBINVD] = handle_wbinvd,
|
|
|
[EXIT_REASON_XSETBV] = handle_xsetbv,
|
|
|
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
|
|
@@ -6070,6 +6248,85 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
|
|
|
vmcs_write32(TPR_THRESHOLD, irr);
|
|
|
}
|
|
|
|
|
|
+static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
|
|
|
+{
|
|
|
+ u32 sec_exec_control;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * There is not point to enable virtualize x2apic without enable
|
|
|
+ * apicv
|
|
|
+ */
|
|
|
+ if (!cpu_has_vmx_virtualize_x2apic_mode() ||
|
|
|
+ !vmx_vm_has_apicv(vcpu->kvm))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!vm_need_tpr_shadow(vcpu->kvm))
|
|
|
+ return;
|
|
|
+
|
|
|
+ sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
|
|
|
+
|
|
|
+ if (set) {
|
|
|
+ sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
|
|
+ sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
|
|
|
+ } else {
|
|
|
+ sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
|
|
|
+ sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
|
|
+ }
|
|
|
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
|
|
|
+
|
|
|
+ vmx_set_msr_bitmap(vcpu);
|
|
|
+}
|
|
|
+
|
|
|
+static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
|
|
|
+{
|
|
|
+ u16 status;
|
|
|
+ u8 old;
|
|
|
+
|
|
|
+ if (!vmx_vm_has_apicv(kvm))
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (isr == -1)
|
|
|
+ isr = 0;
|
|
|
+
|
|
|
+ status = vmcs_read16(GUEST_INTR_STATUS);
|
|
|
+ old = status >> 8;
|
|
|
+ if (isr != old) {
|
|
|
+ status &= 0xff;
|
|
|
+ status |= isr << 8;
|
|
|
+ vmcs_write16(GUEST_INTR_STATUS, status);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void vmx_set_rvi(int vector)
|
|
|
+{
|
|
|
+ u16 status;
|
|
|
+ u8 old;
|
|
|
+
|
|
|
+ status = vmcs_read16(GUEST_INTR_STATUS);
|
|
|
+ old = (u8)status & 0xff;
|
|
|
+ if ((u8)vector != old) {
|
|
|
+ status &= ~0xff;
|
|
|
+ status |= (u8)vector;
|
|
|
+ vmcs_write16(GUEST_INTR_STATUS, status);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
|
|
|
+{
|
|
|
+ if (max_irr == -1)
|
|
|
+ return;
|
|
|
+
|
|
|
+ vmx_set_rvi(max_irr);
|
|
|
+}
|
|
|
+
|
|
|
+static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
|
|
+{
|
|
|
+ vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
|
|
|
+ vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
|
|
|
+ vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
|
|
|
+ vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
|
|
|
+}
|
|
|
+
|
|
|
static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
|
|
|
{
|
|
|
u32 exit_intr_info;
|
|
@@ -7333,6 +7590,11 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|
|
.enable_nmi_window = enable_nmi_window,
|
|
|
.enable_irq_window = enable_irq_window,
|
|
|
.update_cr8_intercept = update_cr8_intercept,
|
|
|
+ .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
|
|
|
+ .vm_has_apicv = vmx_vm_has_apicv,
|
|
|
+ .load_eoi_exitmap = vmx_load_eoi_exitmap,
|
|
|
+ .hwapic_irr_update = vmx_hwapic_irr_update,
|
|
|
+ .hwapic_isr_update = vmx_hwapic_isr_update,
|
|
|
|
|
|
.set_tss_addr = vmx_set_tss_addr,
|
|
|
.get_tdp_level = get_ept_level,
|
|
@@ -7365,7 +7627,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
|
|
|
|
|
static int __init vmx_init(void)
|
|
|
{
|
|
|
- int r, i;
|
|
|
+ int r, i, msr;
|
|
|
|
|
|
rdmsrl_safe(MSR_EFER, &host_efer);
|
|
|
|
|
@@ -7386,11 +7648,19 @@ static int __init vmx_init(void)
|
|
|
if (!vmx_msr_bitmap_legacy)
|
|
|
goto out1;
|
|
|
|
|
|
+ vmx_msr_bitmap_legacy_x2apic =
|
|
|
+ (unsigned long *)__get_free_page(GFP_KERNEL);
|
|
|
+ if (!vmx_msr_bitmap_legacy_x2apic)
|
|
|
+ goto out2;
|
|
|
|
|
|
vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
|
|
|
if (!vmx_msr_bitmap_longmode)
|
|
|
- goto out2;
|
|
|
+ goto out3;
|
|
|
|
|
|
+ vmx_msr_bitmap_longmode_x2apic =
|
|
|
+ (unsigned long *)__get_free_page(GFP_KERNEL);
|
|
|
+ if (!vmx_msr_bitmap_longmode_x2apic)
|
|
|
+ goto out4;
|
|
|
|
|
|
/*
|
|
|
* Allow direct access to the PC debug port (it is often used for I/O
|
|
@@ -7422,6 +7692,28 @@ static int __init vmx_init(void)
|
|
|
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
|
|
|
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
|
|
|
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
|
|
|
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
|
|
|
+ vmx_msr_bitmap_legacy, PAGE_SIZE);
|
|
|
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
|
|
|
+ vmx_msr_bitmap_longmode, PAGE_SIZE);
|
|
|
+
|
|
|
+ if (enable_apicv_reg_vid) {
|
|
|
+ for (msr = 0x800; msr <= 0x8ff; msr++)
|
|
|
+ vmx_disable_intercept_msr_read_x2apic(msr);
|
|
|
+
|
|
|
+ /* According SDM, in x2apic mode, the whole id reg is used.
|
|
|
+ * But in KVM, it only use the highest eight bits. Need to
|
|
|
+ * intercept it */
|
|
|
+ vmx_enable_intercept_msr_read_x2apic(0x802);
|
|
|
+ /* TMCCT */
|
|
|
+ vmx_enable_intercept_msr_read_x2apic(0x839);
|
|
|
+ /* TPR */
|
|
|
+ vmx_disable_intercept_msr_write_x2apic(0x808);
|
|
|
+ /* EOI */
|
|
|
+ vmx_disable_intercept_msr_write_x2apic(0x80b);
|
|
|
+ /* SELF-IPI */
|
|
|
+ vmx_disable_intercept_msr_write_x2apic(0x83f);
|
|
|
+ }
|
|
|
|
|
|
if (enable_ept) {
|
|
|
kvm_mmu_set_mask_ptes(0ull,
|
|
@@ -7435,8 +7727,10 @@ static int __init vmx_init(void)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-out3:
|
|
|
+out4:
|
|
|
free_page((unsigned long)vmx_msr_bitmap_longmode);
|
|
|
+out3:
|
|
|
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
|
|
|
out2:
|
|
|
free_page((unsigned long)vmx_msr_bitmap_legacy);
|
|
|
out1:
|
|
@@ -7448,6 +7742,8 @@ out:
|
|
|
|
|
|
static void __exit vmx_exit(void)
|
|
|
{
|
|
|
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
|
|
|
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
|
|
|
free_page((unsigned long)vmx_msr_bitmap_legacy);
|
|
|
free_page((unsigned long)vmx_msr_bitmap_longmode);
|
|
|
free_page((unsigned long)vmx_io_bitmap_b);
|