Browse Source

KVM: SVM: Add xsetbv intercept

This patch implements the xsetbv intercept to the AMD part
of KVM. This makes AVX usable in a save way for the guest on
AVX capable AMD hardware.

The patch is tested by using AVX in the guest and host in
parallel and checking for data corruption. I also used the
KVM xsave unit-tests and they all pass.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Joerg Roedel 14 years ago
parent
commit
81dd35d42c
2 changed files with 18 additions and 4 deletions
  1. 2 0
      arch/x86/include/asm/svm.h
  2. 16 4
      arch/x86/kvm/svm.c

+ 2 - 0
arch/x86/include/asm/svm.h

@@ -47,6 +47,7 @@ enum {
 	INTERCEPT_MONITOR,
 	INTERCEPT_MONITOR,
 	INTERCEPT_MWAIT,
 	INTERCEPT_MWAIT,
 	INTERCEPT_MWAIT_COND,
 	INTERCEPT_MWAIT_COND,
+	INTERCEPT_XSETBV,
 };
 };
 
 
 
 
@@ -329,6 +330,7 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_EXIT_MONITOR	0x08a
 #define SVM_EXIT_MONITOR	0x08a
 #define SVM_EXIT_MWAIT		0x08b
 #define SVM_EXIT_MWAIT		0x08b
 #define SVM_EXIT_MWAIT_COND	0x08c
 #define SVM_EXIT_MWAIT_COND	0x08c
+#define SVM_EXIT_XSETBV		0x08d
 #define SVM_EXIT_NPF  		0x400
 #define SVM_EXIT_NPF  		0x400
 
 
 #define SVM_EXIT_ERR		-1
 #define SVM_EXIT_ERR		-1

+ 16 - 4
arch/x86/kvm/svm.c

@@ -935,6 +935,7 @@ static void init_vmcb(struct vcpu_svm *svm)
 	set_intercept(svm, INTERCEPT_WBINVD);
 	set_intercept(svm, INTERCEPT_WBINVD);
 	set_intercept(svm, INTERCEPT_MONITOR);
 	set_intercept(svm, INTERCEPT_MONITOR);
 	set_intercept(svm, INTERCEPT_MWAIT);
 	set_intercept(svm, INTERCEPT_MWAIT);
+	set_intercept(svm, INTERCEPT_XSETBV);
 
 
 	control->iopm_base_pa = iopm_base;
 	control->iopm_base_pa = iopm_base;
 	control->msrpm_base_pa = __pa(svm->msrpm);
 	control->msrpm_base_pa = __pa(svm->msrpm);
@@ -2546,6 +2547,19 @@ static int skinit_interception(struct vcpu_svm *svm)
 	return 1;
 	return 1;
 }
 }
 
 
+static int xsetbv_interception(struct vcpu_svm *svm)
+{
+	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
+	u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+
+	if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
+		svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+		skip_emulated_instruction(&svm->vcpu);
+	}
+
+	return 1;
+}
+
 static int invalid_op_interception(struct vcpu_svm *svm)
 static int invalid_op_interception(struct vcpu_svm *svm)
 {
 {
 	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
 	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
@@ -2971,6 +2985,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
 	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
 	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
 	[SVM_EXIT_MONITOR]			= invalid_op_interception,
 	[SVM_EXIT_MONITOR]			= invalid_op_interception,
 	[SVM_EXIT_MWAIT]			= invalid_op_interception,
 	[SVM_EXIT_MWAIT]			= invalid_op_interception,
+	[SVM_EXIT_XSETBV]			= xsetbv_interception,
 	[SVM_EXIT_NPF]				= pf_interception,
 	[SVM_EXIT_NPF]				= pf_interception,
 };
 };
 
 
@@ -3624,10 +3639,6 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
 {
 	switch (func) {
 	switch (func) {
-	case 0x00000001:
-		/* Mask out xsave bit as long as it is not supported by SVM */
-		entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
-		break;
 	case 0x80000001:
 	case 0x80000001:
 		if (nested)
 		if (nested)
 			entry->ecx |= (1 << 2); /* Set SVM bit */
 			entry->ecx |= (1 << 2); /* Set SVM bit */
@@ -3701,6 +3712,7 @@ static const struct trace_print_flags svm_exit_reasons_str[] = {
 	{ SVM_EXIT_WBINVD,			"wbinvd" },
 	{ SVM_EXIT_WBINVD,			"wbinvd" },
 	{ SVM_EXIT_MONITOR,			"monitor" },
 	{ SVM_EXIT_MONITOR,			"monitor" },
 	{ SVM_EXIT_MWAIT,			"mwait" },
 	{ SVM_EXIT_MWAIT,			"mwait" },
+	{ SVM_EXIT_XSETBV,			"xsetbv" },
 	{ SVM_EXIT_NPF,				"npf" },
 	{ SVM_EXIT_NPF,				"npf" },
 	{ -1, NULL }
 	{ -1, NULL }
 };
 };