瀏覽代碼

KVM: MMU: Set the accessed bit on non-speculative shadow ptes

If we populate a shadow pte due to a fault (and not speculatively due to a
pte write) then we can set the accessed bit on it, as we know it will be
set immediately on the next guest instruction.  This saves a read-modify-write
operation.

Signed-off-by: Avi Kivity <avi@qumranet.com>
Avi Kivity 17 年之前
父節點
當前提交
947da53830
共有 2 個文件被更改,包括 7 次插入5 次删除
  1. 5 3
      arch/x86/kvm/mmu.c
  2. 2 2
      arch/x86/kvm/paging_tmpl.h

+ 5 - 3
arch/x86/kvm/mmu.c

@@ -1020,7 +1020,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 			 unsigned pt_access, unsigned pte_access,
 			 unsigned pt_access, unsigned pte_access,
 			 int user_fault, int write_fault, int dirty,
 			 int user_fault, int write_fault, int dirty,
 			 int *ptwrite, int largepage, gfn_t gfn,
 			 int *ptwrite, int largepage, gfn_t gfn,
-			 struct page *page)
+			 struct page *page, bool speculative)
 {
 {
 	u64 spte;
 	u64 spte;
 	int was_rmapped = 0;
 	int was_rmapped = 0;
@@ -1061,6 +1061,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
 	 * demand paging).
 	 * demand paging).
 	 */
 	 */
 	spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
 	spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
+	if (!speculative)
+		pte_access |= PT_ACCESSED_MASK;
 	if (!dirty)
 	if (!dirty)
 		pte_access &= ~ACC_WRITE_MASK;
 		pte_access &= ~ACC_WRITE_MASK;
 	if (!(pte_access & ACC_EXEC_MASK))
 	if (!(pte_access & ACC_EXEC_MASK))
@@ -1148,13 +1150,13 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
 
 
 		if (level == 1) {
 		if (level == 1) {
 			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
 			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
-				     0, write, 1, &pt_write, 0, gfn, page);
+				     0, write, 1, &pt_write, 0, gfn, page, false);
 			return pt_write;
 			return pt_write;
 		}
 		}
 
 
 		if (largepage && level == 2) {
 		if (largepage && level == 2) {
 			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
 			mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
-				    0, write, 1, &pt_write, 1, gfn, page);
+				     0, write, 1, &pt_write, 1, gfn, page, false);
 			return pt_write;
 			return pt_write;
 		}
 		}
 
 

+ 2 - 2
arch/x86/kvm/paging_tmpl.h

@@ -266,7 +266,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
 	get_page(npage);
 	get_page(npage);
 	mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
 	mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
 		     gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
 		     gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
-		     npage);
+		     npage, true);
 }
 }
 
 
 /*
 /*
@@ -349,7 +349,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 	mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
 	mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
 		     user_fault, write_fault,
 		     user_fault, write_fault,
 		     walker->ptes[walker->level-1] & PT_DIRTY_MASK,
 		     walker->ptes[walker->level-1] & PT_DIRTY_MASK,
-		     ptwrite, largepage, walker->gfn, page);
+		     ptwrite, largepage, walker->gfn, page, false);
 
 
 	return shadow_ent;
 	return shadow_ent;
 }
 }