瀏覽代碼

Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm

* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
  KVM: MMU: avoid creation of unreachable pages in the shadow
  KVM: ppc: stop leaking host memory on VM exit
  KVM: MMU: fix sync of ptes addressed at owner pagetable
  KVM: ia64: Fix: Use correct calling convention for PAL_VPS_RESUME_HANDLER
  KVM: ia64: Fix incorrect kbuild CFLAGS override
  KVM: VMX: Fix interrupt loss during race with NMI
  KVM: s390: Fix problem state handling in guest sigp handler
Linus Torvalds 16 年之前
父節點
當前提交
b7d6266062

+ 1 - 1
arch/ia64/kvm/Makefile

@@ -58,7 +58,7 @@ endif
 kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
 obj-$(CONFIG_KVM) += kvm.o
 
-EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
+CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
 kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
 	vtlb.o process.o
 #Add link memcpy and memset to avoid possible structure assignment error

+ 7 - 4
arch/ia64/kvm/optvfault.S

@@ -107,10 +107,10 @@ END(kvm_vps_resume_normal)
 GLOBAL_ENTRY(kvm_vps_resume_handler)
 	movl r30 = PAL_VPS_RESUME_HANDLER
 	;;
-	ld8 r27=[r25]
+	ld8 r26=[r25]
 	shr r17=r17,IA64_ISR_IR_BIT
 	;;
-	dep r27=r17,r27,63,1   // bit 63 of r27 indicate whether enable CFLE
+	dep r26=r17,r26,63,1   // bit 63 of r26 indicate whether enable CFLE
 	mov pr=r23,-2
 	br.sptk.many kvm_vps_entry
 END(kvm_vps_resume_handler)
@@ -894,12 +894,15 @@ ENTRY(kvm_resume_to_guest)
 	;;
 	ld8 r19=[r19]
 	mov b0=r29
-	cmp.ne p6,p7 = r0,r0
+	mov r27=cr.isr
 	;;
-	tbit.z p6,p7 = r19,IA64_PSR_IC_BIT		// p1=vpsr.ic
+	tbit.z p6,p7 = r19,IA64_PSR_IC_BIT		// p7=vpsr.ic
+	shr r27=r27,IA64_ISR_IR_BIT
 	;;
 	(p6) ld8 r26=[r25]
 	(p7) mov b0=r28
+	;;
+	(p6) dep r26=r27,r26,63,1
 	mov pr=r31,-2
 	br.sptk.many b0             // call pal service
 	;;

+ 2 - 0
arch/powerpc/include/asm/kvm_ppc.h

@@ -104,4 +104,6 @@ static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
 	}
 }
 
+extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
+
 #endif /* __POWERPC_KVM_PPC_H__ */

+ 8 - 0
arch/powerpc/kvm/44x_tlb.c

@@ -124,6 +124,14 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
 	}
 }
 
+void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
+{
+	int i;
+
+	for (i = 0; i <= tlb_44x_hwater; i++)
+		kvmppc_44x_shadow_release(vcpu, i);
+}
+
 void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
 {
     vcpu->arch.shadow_tlb_mod[i] = 1;

+ 1 - 0
arch/powerpc/kvm/powerpc.c

@@ -238,6 +238,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
+	kvmppc_core_destroy_mmu(vcpu);
 }
 
 /* Note: clearing MSR[DE] just means that the debug interrupt will not be

+ 5 - 0
arch/s390/kvm/sigp.c

@@ -237,6 +237,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
 	u8 order_code;
 	int rc;
 
+	/* sigp in userspace can exit */
+	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
+		return kvm_s390_inject_program_int(vcpu,
+						   PGM_PRIVILEGED_OPERATION);
+
 	order_code = disp2;
 	if (base2)
 		order_code += vcpu->arch.guest_gprs[base2];

+ 1 - 1
arch/x86/kvm/mmu.c

@@ -1038,13 +1038,13 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 	}
 
 	rmap_write_protect(vcpu->kvm, sp->gfn);
+	kvm_unlink_unsync_page(vcpu->kvm, sp);
 	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
 		kvm_mmu_zap_page(vcpu->kvm, sp);
 		return 1;
 	}
 
 	kvm_mmu_flush_tlb(vcpu);
-	kvm_unlink_unsync_page(vcpu->kvm, sp);
 	return 0;
 }
 

+ 1 - 0
arch/x86/kvm/paging_tmpl.h

@@ -331,6 +331,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
 		r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
 					  &curr_pte, sizeof(curr_pte));
 		if (r || curr_pte != gw->ptes[level - 2]) {
+			kvm_mmu_put_page(shadow_page, sptep);
 			kvm_release_pfn_clean(sw->pfn);
 			sw->sptep = NULL;
 			return 1;

+ 3 - 1
arch/x86/kvm/vmx.c

@@ -3149,7 +3149,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
 
 	if (cpu_has_virtual_nmis()) {
 		if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
-			if (vmx_nmi_enabled(vcpu)) {
+			if (vcpu->arch.interrupt.pending) {
+				enable_nmi_window(vcpu);
+			} else if (vmx_nmi_enabled(vcpu)) {
 				vcpu->arch.nmi_pending = false;
 				vcpu->arch.nmi_injected = true;
 			} else {