Browse Source

KVM: MMU: Fix SMEP failure during fetch

This patch fix kvm-unit-tests hanging and incorrect PT_ACCESSED_MASK
bit set in the case of SMEP fault.  The code updated 'eperm' after
the variable was checked.

Signed-off-by: Yang, Wei <wei.y.yang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Yang, Wei Y 14 years ago
parent
commit
cd46868c7f
1 changed files with 13 additions and 9 deletions
  1. 13 9
      arch/x86/kvm/paging_tmpl.h

+ 13 - 9
arch/x86/kvm/paging_tmpl.h

@@ -147,7 +147,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
 	gfn_t table_gfn;
 	gfn_t table_gfn;
 	unsigned index, pt_access, uninitialized_var(pte_access);
 	unsigned index, pt_access, uninitialized_var(pte_access);
 	gpa_t pte_gpa;
 	gpa_t pte_gpa;
-	bool eperm;
+	bool eperm, last_gpte;
 	int offset;
 	int offset;
 	const int write_fault = access & PFERR_WRITE_MASK;
 	const int write_fault = access & PFERR_WRITE_MASK;
 	const int user_fault  = access & PFERR_USER_MASK;
 	const int user_fault  = access & PFERR_USER_MASK;
@@ -221,6 +221,17 @@ retry_walk:
 			eperm = true;
 			eperm = true;
 #endif
 #endif
 
 
+		last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
+		if (last_gpte) {
+			pte_access = pt_access &
+				     FNAME(gpte_access)(vcpu, pte, true);
+			/* check if the kernel is fetching from user page */
+			if (unlikely(pte_access & PT_USER_MASK) &&
+			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+				if (fetch_fault && !user_fault)
+					eperm = true;
+		}
+
 		if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
 		if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
 			int ret;
 			int ret;
 			trace_kvm_mmu_set_accessed_bit(table_gfn, index,
 			trace_kvm_mmu_set_accessed_bit(table_gfn, index,
@@ -238,18 +249,12 @@ retry_walk:
 
 
 		walker->ptes[walker->level - 1] = pte;
 		walker->ptes[walker->level - 1] = pte;
 
 
-		if (FNAME(is_last_gpte)(walker, vcpu, mmu, pte)) {
+		if (last_gpte) {
 			int lvl = walker->level;
 			int lvl = walker->level;
 			gpa_t real_gpa;
 			gpa_t real_gpa;
 			gfn_t gfn;
 			gfn_t gfn;
 			u32 ac;
 			u32 ac;
 
 
-			/* check if the kernel is fetching from user page */
-			if (unlikely(pte_access & PT_USER_MASK) &&
-			    kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
-				if (fetch_fault && !user_fault)
-					eperm = true;
-
 			gfn = gpte_to_gfn_lvl(pte, lvl);
 			gfn = gpte_to_gfn_lvl(pte, lvl);
 			gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
 			gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
 
 
@@ -295,7 +300,6 @@ retry_walk:
 		walker->ptes[walker->level - 1] = pte;
 		walker->ptes[walker->level - 1] = pte;
 	}
 	}
 
 
-	pte_access = pt_access & FNAME(gpte_access)(vcpu, pte, true);
 	walker->pt_access = pt_access;
 	walker->pt_access = pt_access;
 	walker->pte_access = pte_access;
 	walker->pte_access = pte_access;
 	pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
 	pgprintk("%s: pte %llx pte_access %x pt_access %x\n",