浏览代码

KVM: MMU: Split gfn_to_rmap() into two functions

rmap_write_protect() calls gfn_to_rmap() for each level with gfn fixed.
This results in calling gfn_to_memslot() repeatedly with that gfn.

This patch introduces __gfn_to_rmap() which takes the slot as an
argument to avoid this.

This is also needed for the following dirty logging optimization.

Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
Takuya Yoshikawa 13 年之前
父节点
当前提交
9b9b149236
共有 1 个文件被更改,包括 17 次插入9 次删除
  1. 17 9
      arch/x86/kvm/mmu.c

+ 17 - 9
arch/x86/kvm/mmu.c

@@ -958,23 +958,29 @@ static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
 	}
 	}
 }
 }
 
 
-/*
- * Take gfn and return the reverse mapping to it.
- */
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
+static unsigned long *__gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level,
+				    struct kvm_memory_slot *slot)
 {
 {
-	struct kvm_memory_slot *slot;
 	struct kvm_lpage_info *linfo;
 	struct kvm_lpage_info *linfo;
 
 
-	slot = gfn_to_memslot(kvm, gfn);
 	if (likely(level == PT_PAGE_TABLE_LEVEL))
 	if (likely(level == PT_PAGE_TABLE_LEVEL))
 		return &slot->rmap[gfn - slot->base_gfn];
 		return &slot->rmap[gfn - slot->base_gfn];
 
 
 	linfo = lpage_info_slot(gfn, slot, level);
 	linfo = lpage_info_slot(gfn, slot, level);
-
 	return &linfo->rmap_pde;
 	return &linfo->rmap_pde;
 }
 }
 
 
+/*
+ * Take gfn and return the reverse mapping to it.
+ */
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
+{
+	struct kvm_memory_slot *slot;
+
+	slot = gfn_to_memslot(kvm, gfn);
+	return __gfn_to_rmap(kvm, gfn, level, slot);
+}
+
 static bool rmap_can_add(struct kvm_vcpu *vcpu)
 static bool rmap_can_add(struct kvm_vcpu *vcpu)
 {
 {
 	struct kvm_mmu_memory_cache *cache;
 	struct kvm_mmu_memory_cache *cache;
@@ -1019,12 +1025,14 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
 
 
 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
 {
 {
+	struct kvm_memory_slot *slot;
 	unsigned long *rmapp;
 	unsigned long *rmapp;
 	u64 *spte;
 	u64 *spte;
 	int i, write_protected = 0;
 	int i, write_protected = 0;
 
 
-	rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
+	slot = gfn_to_memslot(kvm, gfn);
 
 
+	rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot);
 	spte = rmap_next(kvm, rmapp, NULL);
 	spte = rmap_next(kvm, rmapp, NULL);
 	while (spte) {
 	while (spte) {
 		BUG_ON(!(*spte & PT_PRESENT_MASK));
 		BUG_ON(!(*spte & PT_PRESENT_MASK));
@@ -1039,7 +1047,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
 	/* check for huge page mappings */
 	/* check for huge page mappings */
 	for (i = PT_DIRECTORY_LEVEL;
 	for (i = PT_DIRECTORY_LEVEL;
 	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
 	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-		rmapp = gfn_to_rmap(kvm, gfn, i);
+		rmapp = __gfn_to_rmap(kvm, gfn, i, slot);
 		spte = rmap_next(kvm, rmapp, NULL);
 		spte = rmap_next(kvm, rmapp, NULL);
 		while (spte) {
 		while (spte) {
 			BUG_ON(!(*spte & PT_PRESENT_MASK));
 			BUG_ON(!(*spte & PT_PRESENT_MASK));