浏览代码

KVM: SVM: Restore unconditional cr0 intercept under npt

Currently we don't intercept cr0 at all when npt is enabled.  This improves
performance but requires us to activate the fpu at all times.

Remove this behaviour in preparation for adding selective cr0 intercepts.

Acked-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Avi Kivity 15 年之前
父节点
当前提交
888f9f3e0c
共有 1 个文件被更改,包括 7 次插入22 次删除
  1. 7 22
      arch/x86/kvm/svm.c

+ 7 - 22
arch/x86/kvm/svm.c

@@ -643,10 +643,8 @@ static void init_vmcb(struct vcpu_svm *svm)
 		control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
 		control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
 					(1ULL << INTERCEPT_INVLPG));
 					(1ULL << INTERCEPT_INVLPG));
 		control->intercept_exceptions &= ~(1 << PF_VECTOR);
 		control->intercept_exceptions &= ~(1 << PF_VECTOR);
-		control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
-						INTERCEPT_CR3_MASK);
-		control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
-						 INTERCEPT_CR3_MASK);
+		control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
+		control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
 		save->g_pat = 0x0007040600070406ULL;
 		save->g_pat = 0x0007040600070406ULL;
 		save->cr3 = 0;
 		save->cr3 = 0;
 		save->cr4 = 0;
 		save->cr4 = 0;
@@ -982,15 +980,13 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 		}
 		}
 	}
 	}
 #endif
 #endif
-	if (npt_enabled)
-		goto set;
-
 	vcpu->arch.cr0 = cr0;
 	vcpu->arch.cr0 = cr0;
-	cr0 |= X86_CR0_PG | X86_CR0_WP;
+
+	if (!npt_enabled)
+		cr0 |= X86_CR0_PG | X86_CR0_WP;
 
 
 	if (!vcpu->fpu_active)
 	if (!vcpu->fpu_active)
 		cr0 |= X86_CR0_TS;
 		cr0 |= X86_CR0_TS;
-set:
 	/*
 	/*
 	 * re-enable caching here because the QEMU bios
 	 * re-enable caching here because the QEMU bios
 	 * does not do it - this results in some delay at
 	 * does not do it - this results in some delay at
@@ -2386,21 +2382,10 @@ static int handle_exit(struct kvm_vcpu *vcpu)
 
 
 	svm_complete_interrupts(svm);
 	svm_complete_interrupts(svm);
 
 
-	if (npt_enabled) {
-		int mmu_reload = 0;
-		if ((kvm_read_cr0_bits(vcpu, X86_CR0_PG) ^ svm->vmcb->save.cr0)
-		    & X86_CR0_PG) {
-			svm_set_cr0(vcpu, svm->vmcb->save.cr0);
-			mmu_reload = 1;
-		}
+	if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
 		vcpu->arch.cr0 = svm->vmcb->save.cr0;
 		vcpu->arch.cr0 = svm->vmcb->save.cr0;
+	if (npt_enabled)
 		vcpu->arch.cr3 = svm->vmcb->save.cr3;
 		vcpu->arch.cr3 = svm->vmcb->save.cr3;
-		if (mmu_reload) {
-			kvm_mmu_reset_context(vcpu);
-			kvm_mmu_load(vcpu);
-		}
-	}
-
 
 
 	if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
 	if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
 		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
 		kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;