|
@@ -63,6 +63,10 @@ MODULE_LICENSE("GPL");
|
|
|
|
|
|
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
|
|
|
|
|
|
+#define TSC_RATIO_RSVD 0xffffff0000000000ULL
|
|
|
+#define TSC_RATIO_MIN 0x0000000000000001ULL
|
|
|
+#define TSC_RATIO_MAX 0x000000ffffffffffULL
|
|
|
+
|
|
|
static bool erratum_383_found __read_mostly;
|
|
|
|
|
|
static const u32 host_save_user_msrs[] = {
|
|
@@ -93,14 +97,6 @@ struct nested_state {
|
|
|
/* A VMEXIT is required but not yet emulated */
|
|
|
bool exit_required;
|
|
|
|
|
|
- /*
|
|
|
- * If we vmexit during an instruction emulation we need this to restore
|
|
|
- * the l1 guest rip after the emulation
|
|
|
- */
|
|
|
- unsigned long vmexit_rip;
|
|
|
- unsigned long vmexit_rsp;
|
|
|
- unsigned long vmexit_rax;
|
|
|
-
|
|
|
/* cache for intercepts of the guest */
|
|
|
u32 intercept_cr;
|
|
|
u32 intercept_dr;
|
|
@@ -144,8 +140,13 @@ struct vcpu_svm {
|
|
|
unsigned int3_injected;
|
|
|
unsigned long int3_rip;
|
|
|
u32 apf_reason;
|
|
|
+
|
|
|
+ u64 tsc_ratio;
|
|
|
};
|
|
|
|
|
|
+static DEFINE_PER_CPU(u64, current_tsc_ratio);
|
|
|
+#define TSC_RATIO_DEFAULT 0x0100000000ULL
|
|
|
+
|
|
|
#define MSR_INVALID 0xffffffffU
|
|
|
|
|
|
static struct svm_direct_access_msrs {
|
|
@@ -190,6 +191,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm);
|
|
|
static int nested_svm_vmexit(struct vcpu_svm *svm);
|
|
|
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
|
|
|
bool has_error_code, u32 error_code);
|
|
|
+static u64 __scale_tsc(u64 ratio, u64 tsc);
|
|
|
|
|
|
enum {
|
|
|
VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
|
|
@@ -376,7 +378,6 @@ struct svm_cpu_data {
|
|
|
};
|
|
|
|
|
|
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
|
|
|
-static uint32_t svm_features;
|
|
|
|
|
|
struct svm_init_data {
|
|
|
int cpu;
|
|
@@ -569,6 +570,10 @@ static int has_svm(void)
|
|
|
|
|
|
static void svm_hardware_disable(void *garbage)
|
|
|
{
|
|
|
+ /* Make sure we clean up behind us */
|
|
|
+ if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
|
|
|
+ wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
|
|
|
+
|
|
|
cpu_svm_disable();
|
|
|
}
|
|
|
|
|
@@ -610,6 +615,11 @@ static int svm_hardware_enable(void *garbage)
|
|
|
|
|
|
wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
|
|
|
|
|
|
+ if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
|
|
|
+ wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
|
|
|
+ __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
|
|
|
+ }
|
|
|
+
|
|
|
svm_init_erratum_383();
|
|
|
|
|
|
return 0;
|
|
@@ -791,6 +801,23 @@ static __init int svm_hardware_setup(void)
|
|
|
if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
|
|
|
kvm_enable_efer_bits(EFER_FFXSR);
|
|
|
|
|
|
+ if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
|
|
|
+ u64 max;
|
|
|
+
|
|
|
+ kvm_has_tsc_control = true;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Make sure the user can only configure tsc_khz values that
|
|
|
+ * fit into a signed integer.
|
|
|
+ * A min value is not calculated needed because it will always
|
|
|
+ * be 1 on all machines and a value of 0 is used to disable
|
|
|
+ * tsc-scaling for the vcpu.
|
|
|
+ */
|
|
|
+ max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
|
|
|
+
|
|
|
+ kvm_max_guest_tsc_khz = max;
|
|
|
+ }
|
|
|
+
|
|
|
if (nested) {
|
|
|
printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
|
|
|
kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
|
|
@@ -802,8 +829,6 @@ static __init int svm_hardware_setup(void)
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
- svm_features = cpuid_edx(SVM_CPUID_FUNC);
|
|
|
-
|
|
|
if (!boot_cpu_has(X86_FEATURE_NPT))
|
|
|
npt_enabled = false;
|
|
|
|
|
@@ -854,6 +879,64 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
|
|
|
seg->base = 0;
|
|
|
}
|
|
|
|
|
|
+static u64 __scale_tsc(u64 ratio, u64 tsc)
|
|
|
+{
|
|
|
+ u64 mult, frac, _tsc;
|
|
|
+
|
|
|
+ mult = ratio >> 32;
|
|
|
+ frac = ratio & ((1ULL << 32) - 1);
|
|
|
+
|
|
|
+ _tsc = tsc;
|
|
|
+ _tsc *= mult;
|
|
|
+ _tsc += (tsc >> 32) * frac;
|
|
|
+ _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
|
|
|
+
|
|
|
+ return _tsc;
|
|
|
+}
|
|
|
+
|
|
|
+static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
|
|
|
+{
|
|
|
+ struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
+ u64 _tsc = tsc;
|
|
|
+
|
|
|
+ if (svm->tsc_ratio != TSC_RATIO_DEFAULT)
|
|
|
+ _tsc = __scale_tsc(svm->tsc_ratio, tsc);
|
|
|
+
|
|
|
+ return _tsc;
|
|
|
+}
|
|
|
+
|
|
|
+static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
|
|
|
+{
|
|
|
+ struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
+ u64 ratio;
|
|
|
+ u64 khz;
|
|
|
+
|
|
|
+ /* TSC scaling supported? */
|
|
|
+ if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* TSC-Scaling disabled or guest TSC same frequency as host TSC? */
|
|
|
+ if (user_tsc_khz == 0) {
|
|
|
+ vcpu->arch.virtual_tsc_khz = 0;
|
|
|
+ svm->tsc_ratio = TSC_RATIO_DEFAULT;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ khz = user_tsc_khz;
|
|
|
+
|
|
|
+ /* TSC scaling required - calculate ratio */
|
|
|
+ ratio = khz << 32;
|
|
|
+ do_div(ratio, tsc_khz);
|
|
|
+
|
|
|
+ if (ratio == 0 || ratio & TSC_RATIO_RSVD) {
|
|
|
+ WARN_ONCE(1, "Invalid TSC ratio - virtual-tsc-khz=%u\n",
|
|
|
+ user_tsc_khz);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ vcpu->arch.virtual_tsc_khz = user_tsc_khz;
|
|
|
+ svm->tsc_ratio = ratio;
|
|
|
+}
|
|
|
+
|
|
|
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
|
|
|
{
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
@@ -880,6 +963,15 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
|
|
|
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
|
|
|
}
|
|
|
|
|
|
+static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
|
|
|
+{
|
|
|
+ u64 tsc;
|
|
|
+
|
|
|
+ tsc = svm_scale_tsc(vcpu, native_read_tsc());
|
|
|
+
|
|
|
+ return target_tsc - tsc;
|
|
|
+}
|
|
|
+
|
|
|
static void init_vmcb(struct vcpu_svm *svm)
|
|
|
{
|
|
|
struct vmcb_control_area *control = &svm->vmcb->control;
|
|
@@ -975,7 +1067,7 @@ static void init_vmcb(struct vcpu_svm *svm)
|
|
|
svm_set_efer(&svm->vcpu, 0);
|
|
|
save->dr6 = 0xffff0ff0;
|
|
|
save->dr7 = 0x400;
|
|
|
- save->rflags = 2;
|
|
|
+ kvm_set_rflags(&svm->vcpu, 2);
|
|
|
save->rip = 0x0000fff0;
|
|
|
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
|
|
|
|
|
@@ -1048,6 +1140,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ svm->tsc_ratio = TSC_RATIO_DEFAULT;
|
|
|
+
|
|
|
err = kvm_vcpu_init(&svm->vcpu, kvm, id);
|
|
|
if (err)
|
|
|
goto free_svm;
|
|
@@ -1141,6 +1235,12 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
|
|
|
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
|
|
|
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
|
|
|
+
|
|
|
+ if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
|
|
|
+ svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
|
|
|
+ __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
|
|
|
+ wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
|
|
@@ -1365,31 +1465,6 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
|
|
|
{
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
|
|
|
- if (is_guest_mode(vcpu)) {
|
|
|
- /*
|
|
|
- * We are here because we run in nested mode, the host kvm
|
|
|
- * intercepts cr0 writes but the l1 hypervisor does not.
|
|
|
- * But the L1 hypervisor may intercept selective cr0 writes.
|
|
|
- * This needs to be checked here.
|
|
|
- */
|
|
|
- unsigned long old, new;
|
|
|
-
|
|
|
- /* Remove bits that would trigger a real cr0 write intercept */
|
|
|
- old = vcpu->arch.cr0 & SVM_CR0_SELECTIVE_MASK;
|
|
|
- new = cr0 & SVM_CR0_SELECTIVE_MASK;
|
|
|
-
|
|
|
- if (old == new) {
|
|
|
- /* cr0 write with ts and mp unchanged */
|
|
|
- svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
|
|
|
- if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
|
|
|
- svm->nested.vmexit_rip = kvm_rip_read(vcpu);
|
|
|
- svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
|
|
|
- svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
|
|
|
- return;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
#ifdef CONFIG_X86_64
|
|
|
if (vcpu->arch.efer & EFER_LME) {
|
|
|
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
|
|
@@ -2127,7 +2202,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
|
|
|
nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
|
|
|
nested_vmcb->save.cr2 = vmcb->save.cr2;
|
|
|
nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
|
|
|
- nested_vmcb->save.rflags = vmcb->save.rflags;
|
|
|
+ nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
|
|
|
nested_vmcb->save.rip = vmcb->save.rip;
|
|
|
nested_vmcb->save.rsp = vmcb->save.rsp;
|
|
|
nested_vmcb->save.rax = vmcb->save.rax;
|
|
@@ -2184,7 +2259,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
|
|
|
svm->vmcb->save.ds = hsave->save.ds;
|
|
|
svm->vmcb->save.gdtr = hsave->save.gdtr;
|
|
|
svm->vmcb->save.idtr = hsave->save.idtr;
|
|
|
- svm->vmcb->save.rflags = hsave->save.rflags;
|
|
|
+ kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
|
|
|
svm_set_efer(&svm->vcpu, hsave->save.efer);
|
|
|
svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
|
|
|
svm_set_cr4(&svm->vcpu, hsave->save.cr4);
|
|
@@ -2312,7 +2387,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
|
|
|
hsave->save.efer = svm->vcpu.arch.efer;
|
|
|
hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
|
|
|
hsave->save.cr4 = svm->vcpu.arch.cr4;
|
|
|
- hsave->save.rflags = vmcb->save.rflags;
|
|
|
+ hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
|
|
|
hsave->save.rip = kvm_rip_read(&svm->vcpu);
|
|
|
hsave->save.rsp = vmcb->save.rsp;
|
|
|
hsave->save.rax = vmcb->save.rax;
|
|
@@ -2323,7 +2398,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
|
|
|
|
|
|
copy_vmcb_control_area(hsave, vmcb);
|
|
|
|
|
|
- if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
|
|
|
+ if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
|
|
|
svm->vcpu.arch.hflags |= HF_HIF_MASK;
|
|
|
else
|
|
|
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
|
|
@@ -2341,7 +2416,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
|
|
|
svm->vmcb->save.ds = nested_vmcb->save.ds;
|
|
|
svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
|
|
|
svm->vmcb->save.idtr = nested_vmcb->save.idtr;
|
|
|
- svm->vmcb->save.rflags = nested_vmcb->save.rflags;
|
|
|
+ kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
|
|
|
svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
|
|
|
svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
|
|
|
svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
|
|
@@ -2443,13 +2518,13 @@ static int vmload_interception(struct vcpu_svm *svm)
|
|
|
if (nested_svm_check_permissions(svm))
|
|
|
return 1;
|
|
|
|
|
|
- svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
|
|
|
- skip_emulated_instruction(&svm->vcpu);
|
|
|
-
|
|
|
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
|
|
|
if (!nested_vmcb)
|
|
|
return 1;
|
|
|
|
|
|
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
|
|
|
+ skip_emulated_instruction(&svm->vcpu);
|
|
|
+
|
|
|
nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
|
|
|
nested_svm_unmap(page);
|
|
|
|
|
@@ -2464,13 +2539,13 @@ static int vmsave_interception(struct vcpu_svm *svm)
|
|
|
if (nested_svm_check_permissions(svm))
|
|
|
return 1;
|
|
|
|
|
|
- svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
|
|
|
- skip_emulated_instruction(&svm->vcpu);
|
|
|
-
|
|
|
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
|
|
|
if (!nested_vmcb)
|
|
|
return 1;
|
|
|
|
|
|
+ svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
|
|
|
+ skip_emulated_instruction(&svm->vcpu);
|
|
|
+
|
|
|
nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
|
|
|
nested_svm_unmap(page);
|
|
|
|
|
@@ -2676,6 +2751,29 @@ static int emulate_on_interception(struct vcpu_svm *svm)
|
|
|
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
+bool check_selective_cr0_intercepted(struct vcpu_svm *svm, unsigned long val)
|
|
|
+{
|
|
|
+ unsigned long cr0 = svm->vcpu.arch.cr0;
|
|
|
+ bool ret = false;
|
|
|
+ u64 intercept;
|
|
|
+
|
|
|
+ intercept = svm->nested.intercept;
|
|
|
+
|
|
|
+ if (!is_guest_mode(&svm->vcpu) ||
|
|
|
+ (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ cr0 &= ~SVM_CR0_SELECTIVE_MASK;
|
|
|
+ val &= ~SVM_CR0_SELECTIVE_MASK;
|
|
|
+
|
|
|
+ if (cr0 ^ val) {
|
|
|
+ svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
|
|
|
+ ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
#define CR_VALID (1ULL << 63)
|
|
|
|
|
|
static int cr_interception(struct vcpu_svm *svm)
|
|
@@ -2699,7 +2797,11 @@ static int cr_interception(struct vcpu_svm *svm)
|
|
|
val = kvm_register_read(&svm->vcpu, reg);
|
|
|
switch (cr) {
|
|
|
case 0:
|
|
|
- err = kvm_set_cr0(&svm->vcpu, val);
|
|
|
+ if (!check_selective_cr0_intercepted(svm, val))
|
|
|
+ err = kvm_set_cr0(&svm->vcpu, val);
|
|
|
+ else
|
|
|
+ return 1;
|
|
|
+
|
|
|
break;
|
|
|
case 3:
|
|
|
err = kvm_set_cr3(&svm->vcpu, val);
|
|
@@ -2744,23 +2846,6 @@ static int cr_interception(struct vcpu_svm *svm)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-static int cr0_write_interception(struct vcpu_svm *svm)
|
|
|
-{
|
|
|
- struct kvm_vcpu *vcpu = &svm->vcpu;
|
|
|
- int r;
|
|
|
-
|
|
|
- r = cr_interception(svm);
|
|
|
-
|
|
|
- if (svm->nested.vmexit_rip) {
|
|
|
- kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
|
|
|
- kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
|
|
|
- kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
|
|
|
- svm->nested.vmexit_rip = 0;
|
|
|
- }
|
|
|
-
|
|
|
- return r;
|
|
|
-}
|
|
|
-
|
|
|
static int dr_interception(struct vcpu_svm *svm)
|
|
|
{
|
|
|
int reg, dr;
|
|
@@ -2813,7 +2898,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
|
|
|
case MSR_IA32_TSC: {
|
|
|
struct vmcb *vmcb = get_host_vmcb(svm);
|
|
|
|
|
|
- *data = vmcb->control.tsc_offset + native_read_tsc();
|
|
|
+ *data = vmcb->control.tsc_offset +
|
|
|
+ svm_scale_tsc(vcpu, native_read_tsc());
|
|
|
+
|
|
|
break;
|
|
|
}
|
|
|
case MSR_STAR:
|
|
@@ -3048,7 +3135,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
|
|
|
[SVM_EXIT_READ_CR4] = cr_interception,
|
|
|
[SVM_EXIT_READ_CR8] = cr_interception,
|
|
|
[SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
|
|
|
- [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
|
|
|
+ [SVM_EXIT_WRITE_CR0] = cr_interception,
|
|
|
[SVM_EXIT_WRITE_CR3] = cr_interception,
|
|
|
[SVM_EXIT_WRITE_CR4] = cr_interception,
|
|
|
[SVM_EXIT_WRITE_CR8] = cr8_write_interception,
|
|
@@ -3104,97 +3191,109 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
|
|
|
[SVM_EXIT_NPF] = pf_interception,
|
|
|
};
|
|
|
|
|
|
-void dump_vmcb(struct kvm_vcpu *vcpu)
|
|
|
+static void dump_vmcb(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
struct vmcb_control_area *control = &svm->vmcb->control;
|
|
|
struct vmcb_save_area *save = &svm->vmcb->save;
|
|
|
|
|
|
pr_err("VMCB Control Area:\n");
|
|
|
- pr_err("cr_read: %04x\n", control->intercept_cr & 0xffff);
|
|
|
- pr_err("cr_write: %04x\n", control->intercept_cr >> 16);
|
|
|
- pr_err("dr_read: %04x\n", control->intercept_dr & 0xffff);
|
|
|
- pr_err("dr_write: %04x\n", control->intercept_dr >> 16);
|
|
|
- pr_err("exceptions: %08x\n", control->intercept_exceptions);
|
|
|
- pr_err("intercepts: %016llx\n", control->intercept);
|
|
|
- pr_err("pause filter count: %d\n", control->pause_filter_count);
|
|
|
- pr_err("iopm_base_pa: %016llx\n", control->iopm_base_pa);
|
|
|
- pr_err("msrpm_base_pa: %016llx\n", control->msrpm_base_pa);
|
|
|
- pr_err("tsc_offset: %016llx\n", control->tsc_offset);
|
|
|
- pr_err("asid: %d\n", control->asid);
|
|
|
- pr_err("tlb_ctl: %d\n", control->tlb_ctl);
|
|
|
- pr_err("int_ctl: %08x\n", control->int_ctl);
|
|
|
- pr_err("int_vector: %08x\n", control->int_vector);
|
|
|
- pr_err("int_state: %08x\n", control->int_state);
|
|
|
- pr_err("exit_code: %08x\n", control->exit_code);
|
|
|
- pr_err("exit_info1: %016llx\n", control->exit_info_1);
|
|
|
- pr_err("exit_info2: %016llx\n", control->exit_info_2);
|
|
|
- pr_err("exit_int_info: %08x\n", control->exit_int_info);
|
|
|
- pr_err("exit_int_info_err: %08x\n", control->exit_int_info_err);
|
|
|
- pr_err("nested_ctl: %lld\n", control->nested_ctl);
|
|
|
- pr_err("nested_cr3: %016llx\n", control->nested_cr3);
|
|
|
- pr_err("event_inj: %08x\n", control->event_inj);
|
|
|
- pr_err("event_inj_err: %08x\n", control->event_inj_err);
|
|
|
- pr_err("lbr_ctl: %lld\n", control->lbr_ctl);
|
|
|
- pr_err("next_rip: %016llx\n", control->next_rip);
|
|
|
+ pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
|
|
|
+ pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
|
|
|
+ pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
|
|
|
+ pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
|
|
|
+ pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
|
|
|
+ pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
|
|
|
+ pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
|
|
|
+ pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
|
|
|
+ pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
|
|
|
+ pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
|
|
|
+ pr_err("%-20s%d\n", "asid:", control->asid);
|
|
|
+ pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
|
|
|
+ pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
|
|
|
+ pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
|
|
|
+ pr_err("%-20s%08x\n", "int_state:", control->int_state);
|
|
|
+ pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
|
|
|
+ pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
|
|
|
+ pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
|
|
|
+ pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
|
|
|
+ pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
|
|
|
+ pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
|
|
|
+ pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
|
|
|
+ pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
|
|
|
+ pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
|
|
|
+ pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
|
|
|
+ pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
|
|
|
pr_err("VMCB State Save Area:\n");
|
|
|
- pr_err("es: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->es.selector, save->es.attrib,
|
|
|
- save->es.limit, save->es.base);
|
|
|
- pr_err("cs: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->cs.selector, save->cs.attrib,
|
|
|
- save->cs.limit, save->cs.base);
|
|
|
- pr_err("ss: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->ss.selector, save->ss.attrib,
|
|
|
- save->ss.limit, save->ss.base);
|
|
|
- pr_err("ds: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->ds.selector, save->ds.attrib,
|
|
|
- save->ds.limit, save->ds.base);
|
|
|
- pr_err("fs: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->fs.selector, save->fs.attrib,
|
|
|
- save->fs.limit, save->fs.base);
|
|
|
- pr_err("gs: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->gs.selector, save->gs.attrib,
|
|
|
- save->gs.limit, save->gs.base);
|
|
|
- pr_err("gdtr: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->gdtr.selector, save->gdtr.attrib,
|
|
|
- save->gdtr.limit, save->gdtr.base);
|
|
|
- pr_err("ldtr: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->ldtr.selector, save->ldtr.attrib,
|
|
|
- save->ldtr.limit, save->ldtr.base);
|
|
|
- pr_err("idtr: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->idtr.selector, save->idtr.attrib,
|
|
|
- save->idtr.limit, save->idtr.base);
|
|
|
- pr_err("tr: s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
- save->tr.selector, save->tr.attrib,
|
|
|
- save->tr.limit, save->tr.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "es:",
|
|
|
+ save->es.selector, save->es.attrib,
|
|
|
+ save->es.limit, save->es.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "cs:",
|
|
|
+ save->cs.selector, save->cs.attrib,
|
|
|
+ save->cs.limit, save->cs.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "ss:",
|
|
|
+ save->ss.selector, save->ss.attrib,
|
|
|
+ save->ss.limit, save->ss.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "ds:",
|
|
|
+ save->ds.selector, save->ds.attrib,
|
|
|
+ save->ds.limit, save->ds.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "fs:",
|
|
|
+ save->fs.selector, save->fs.attrib,
|
|
|
+ save->fs.limit, save->fs.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "gs:",
|
|
|
+ save->gs.selector, save->gs.attrib,
|
|
|
+ save->gs.limit, save->gs.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "gdtr:",
|
|
|
+ save->gdtr.selector, save->gdtr.attrib,
|
|
|
+ save->gdtr.limit, save->gdtr.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "ldtr:",
|
|
|
+ save->ldtr.selector, save->ldtr.attrib,
|
|
|
+ save->ldtr.limit, save->ldtr.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "idtr:",
|
|
|
+ save->idtr.selector, save->idtr.attrib,
|
|
|
+ save->idtr.limit, save->idtr.base);
|
|
|
+ pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
|
|
|
+ "tr:",
|
|
|
+ save->tr.selector, save->tr.attrib,
|
|
|
+ save->tr.limit, save->tr.base);
|
|
|
pr_err("cpl: %d efer: %016llx\n",
|
|
|
save->cpl, save->efer);
|
|
|
- pr_err("cr0: %016llx cr2: %016llx\n",
|
|
|
- save->cr0, save->cr2);
|
|
|
- pr_err("cr3: %016llx cr4: %016llx\n",
|
|
|
- save->cr3, save->cr4);
|
|
|
- pr_err("dr6: %016llx dr7: %016llx\n",
|
|
|
- save->dr6, save->dr7);
|
|
|
- pr_err("rip: %016llx rflags: %016llx\n",
|
|
|
- save->rip, save->rflags);
|
|
|
- pr_err("rsp: %016llx rax: %016llx\n",
|
|
|
- save->rsp, save->rax);
|
|
|
- pr_err("star: %016llx lstar: %016llx\n",
|
|
|
- save->star, save->lstar);
|
|
|
- pr_err("cstar: %016llx sfmask: %016llx\n",
|
|
|
- save->cstar, save->sfmask);
|
|
|
- pr_err("kernel_gs_base: %016llx sysenter_cs: %016llx\n",
|
|
|
- save->kernel_gs_base, save->sysenter_cs);
|
|
|
- pr_err("sysenter_esp: %016llx sysenter_eip: %016llx\n",
|
|
|
- save->sysenter_esp, save->sysenter_eip);
|
|
|
- pr_err("gpat: %016llx dbgctl: %016llx\n",
|
|
|
- save->g_pat, save->dbgctl);
|
|
|
- pr_err("br_from: %016llx br_to: %016llx\n",
|
|
|
- save->br_from, save->br_to);
|
|
|
- pr_err("excp_from: %016llx excp_to: %016llx\n",
|
|
|
- save->last_excp_from, save->last_excp_to);
|
|
|
-
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "cr0:", save->cr0, "cr2:", save->cr2);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "cr3:", save->cr3, "cr4:", save->cr4);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "dr6:", save->dr6, "dr7:", save->dr7);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "rip:", save->rip, "rflags:", save->rflags);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "rsp:", save->rsp, "rax:", save->rax);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "star:", save->star, "lstar:", save->lstar);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "cstar:", save->cstar, "sfmask:", save->sfmask);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "kernel_gs_base:", save->kernel_gs_base,
|
|
|
+ "sysenter_cs:", save->sysenter_cs);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "sysenter_esp:", save->sysenter_esp,
|
|
|
+ "sysenter_eip:", save->sysenter_eip);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "br_from:", save->br_from, "br_to:", save->br_to);
|
|
|
+ pr_err("%-15s %016llx %-13s %016llx\n",
|
|
|
+ "excp_from:", save->last_excp_from,
|
|
|
+ "excp_to:", save->last_excp_to);
|
|
|
}
|
|
|
|
|
|
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
|
|
@@ -3384,7 +3483,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
|
|
|
(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
|
|
|
return 0;
|
|
|
|
|
|
- ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
|
|
|
+ ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
|
|
|
|
|
|
if (is_guest_mode(vcpu))
|
|
|
return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
|
|
@@ -3871,6 +3970,186 @@ static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
|
|
|
update_cr0_intercept(svm);
|
|
|
}
|
|
|
|
|
|
+#define PRE_EX(exit) { .exit_code = (exit), \
|
|
|
+ .stage = X86_ICPT_PRE_EXCEPT, }
|
|
|
+#define POST_EX(exit) { .exit_code = (exit), \
|
|
|
+ .stage = X86_ICPT_POST_EXCEPT, }
|
|
|
+#define POST_MEM(exit) { .exit_code = (exit), \
|
|
|
+ .stage = X86_ICPT_POST_MEMACCESS, }
|
|
|
+
|
|
|
+static struct __x86_intercept {
|
|
|
+ u32 exit_code;
|
|
|
+ enum x86_intercept_stage stage;
|
|
|
+} x86_intercept_map[] = {
|
|
|
+ [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
|
|
|
+ [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
|
|
|
+ [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
|
|
|
+ [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
|
|
|
+ [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
|
|
|
+ [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
|
|
|
+ [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
|
|
|
+ [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
|
|
|
+ [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
|
|
|
+ [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
|
|
|
+ [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
|
|
|
+ [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
|
|
|
+ [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
|
|
|
+ [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
|
|
|
+ [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
|
|
|
+ [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
|
|
|
+ [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
|
|
|
+ [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
|
|
|
+ [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
|
|
|
+ [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
|
|
|
+ [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
|
|
|
+ [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
|
|
|
+ [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
|
|
|
+ [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
|
|
|
+ [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
|
|
|
+ [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
|
|
|
+ [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
|
|
|
+ [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
|
|
|
+ [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
|
|
|
+ [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
|
|
|
+ [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
|
|
|
+ [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
|
|
|
+ [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
|
|
|
+ [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
|
|
|
+ [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
|
|
|
+ [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
|
|
|
+ [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
|
|
|
+ [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
|
|
|
+ [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
|
|
|
+ [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
|
|
|
+ [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
|
|
|
+ [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
|
|
|
+ [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
|
|
|
+ [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
|
|
|
+ [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
|
|
|
+ [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
|
|
|
+};
|
|
|
+
|
|
|
+#undef PRE_EX
|
|
|
+#undef POST_EX
|
|
|
+#undef POST_MEM
|
|
|
+
|
|
|
+static int svm_check_intercept(struct kvm_vcpu *vcpu,
|
|
|
+ struct x86_instruction_info *info,
|
|
|
+ enum x86_intercept_stage stage)
|
|
|
+{
|
|
|
+ struct vcpu_svm *svm = to_svm(vcpu);
|
|
|
+ int vmexit, ret = X86EMUL_CONTINUE;
|
|
|
+ struct __x86_intercept icpt_info;
|
|
|
+ struct vmcb *vmcb = svm->vmcb;
|
|
|
+
|
|
|
+ if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ icpt_info = x86_intercept_map[info->intercept];
|
|
|
+
|
|
|
+ if (stage != icpt_info.stage)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ switch (icpt_info.exit_code) {
|
|
|
+ case SVM_EXIT_READ_CR0:
|
|
|
+ if (info->intercept == x86_intercept_cr_read)
|
|
|
+ icpt_info.exit_code += info->modrm_reg;
|
|
|
+ break;
|
|
|
+ case SVM_EXIT_WRITE_CR0: {
|
|
|
+ unsigned long cr0, val;
|
|
|
+ u64 intercept;
|
|
|
+
|
|
|
+ if (info->intercept == x86_intercept_cr_write)
|
|
|
+ icpt_info.exit_code += info->modrm_reg;
|
|
|
+
|
|
|
+ if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
|
|
|
+ break;
|
|
|
+
|
|
|
+ intercept = svm->nested.intercept;
|
|
|
+
|
|
|
+ if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
|
|
|
+ break;
|
|
|
+
|
|
|
+ cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
|
|
|
+ val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
|
|
|
+
|
|
|
+ if (info->intercept == x86_intercept_lmsw) {
|
|
|
+ cr0 &= 0xfUL;
|
|
|
+ val &= 0xfUL;
|
|
|
+ /* lmsw can't clear PE - catch this here */
|
|
|
+ if (cr0 & X86_CR0_PE)
|
|
|
+ val |= X86_CR0_PE;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cr0 ^ val)
|
|
|
+ icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
|
|
|
+
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ case SVM_EXIT_READ_DR0:
|
|
|
+ case SVM_EXIT_WRITE_DR0:
|
|
|
+ icpt_info.exit_code += info->modrm_reg;
|
|
|
+ break;
|
|
|
+ case SVM_EXIT_MSR:
|
|
|
+ if (info->intercept == x86_intercept_wrmsr)
|
|
|
+ vmcb->control.exit_info_1 = 1;
|
|
|
+ else
|
|
|
+ vmcb->control.exit_info_1 = 0;
|
|
|
+ break;
|
|
|
+ case SVM_EXIT_PAUSE:
|
|
|
+ /*
|
|
|
+ * We get this for NOP only, but pause
|
|
|
+ * is rep not, check this here
|
|
|
+ */
|
|
|
+ if (info->rep_prefix != REPE_PREFIX)
|
|
|
+ goto out;
|
|
|
+ case SVM_EXIT_IOIO: {
|
|
|
+ u64 exit_info;
|
|
|
+ u32 bytes;
|
|
|
+
|
|
|
+ exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
|
|
|
+
|
|
|
+ if (info->intercept == x86_intercept_in ||
|
|
|
+ info->intercept == x86_intercept_ins) {
|
|
|
+ exit_info |= SVM_IOIO_TYPE_MASK;
|
|
|
+ bytes = info->src_bytes;
|
|
|
+ } else {
|
|
|
+ bytes = info->dst_bytes;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (info->intercept == x86_intercept_outs ||
|
|
|
+ info->intercept == x86_intercept_ins)
|
|
|
+ exit_info |= SVM_IOIO_STR_MASK;
|
|
|
+
|
|
|
+ if (info->rep_prefix)
|
|
|
+ exit_info |= SVM_IOIO_REP_MASK;
|
|
|
+
|
|
|
+ bytes = min(bytes, 4u);
|
|
|
+
|
|
|
+ exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
|
|
|
+
|
|
|
+ exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
|
|
|
+
|
|
|
+ vmcb->control.exit_info_1 = exit_info;
|
|
|
+ vmcb->control.exit_info_2 = info->next_rip;
|
|
|
+
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ vmcb->control.next_rip = info->next_rip;
|
|
|
+ vmcb->control.exit_code = icpt_info.exit_code;
|
|
|
+ vmexit = nested_svm_exit_handled(svm);
|
|
|
+
|
|
|
+ ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
|
|
|
+ : X86EMUL_CONTINUE;
|
|
|
+
|
|
|
+out:
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static struct kvm_x86_ops svm_x86_ops = {
|
|
|
.cpu_has_kvm_support = has_svm,
|
|
|
.disabled_by_bios = is_disabled,
|
|
@@ -3952,10 +4231,14 @@ static struct kvm_x86_ops svm_x86_ops = {
|
|
|
|
|
|
.has_wbinvd_exit = svm_has_wbinvd_exit,
|
|
|
|
|
|
+ .set_tsc_khz = svm_set_tsc_khz,
|
|
|
.write_tsc_offset = svm_write_tsc_offset,
|
|
|
.adjust_tsc_offset = svm_adjust_tsc_offset,
|
|
|
+ .compute_tsc_offset = svm_compute_tsc_offset,
|
|
|
|
|
|
.set_tdp_cr3 = set_tdp_cr3,
|
|
|
+
|
|
|
+ .check_intercept = svm_check_intercept,
|
|
|
};
|
|
|
|
|
|
static int __init svm_init(void)
|