|
@@ -487,6 +487,38 @@ out_gpte_changed:
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * To see whether the mapped gfn can write its page table in the current
|
|
|
+ * mapping.
|
|
|
+ *
|
|
|
+ * It is the helper function of FNAME(page_fault). When guest uses large page
|
|
|
+ * size to map the writable gfn which is used as current page table, we should
|
|
|
+ * force kvm to use small page size to map it because new shadow page will be
|
|
|
+ * created when kvm establishes shadow page table that stop kvm using large
|
|
|
+ * page size. Do it early can avoid unnecessary #PF and emulation.
|
|
|
+ *
|
|
|
+ * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
|
|
|
+ * since the PDPT is always shadowed, that means, we can not use large page
|
|
|
+ * size to map the gfn which is used as PDPT.
|
|
|
+ */
|
|
|
+static bool
|
|
|
+FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
|
|
|
+ struct guest_walker *walker, int user_fault)
|
|
|
+{
|
|
|
+ int level;
|
|
|
+ gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
|
|
|
+
|
|
|
+ if (!(walker->pte_access & ACC_WRITE_MASK ||
|
|
|
+ (!is_write_protection(vcpu) && !user_fault)))
|
|
|
+ return false;
|
|
|
+
|
|
|
+ for (level = walker->level; level <= walker->max_level; level++)
|
|
|
+ if (!((walker->gfn ^ walker->table_gfn[level - 1]) & mask))
|
|
|
+ return true;
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Page fault handler. There are several causes for a page fault:
|
|
|
* - there is no shadow pte for the guest pte
|
|
@@ -541,7 +573,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
|
|
}
|
|
|
|
|
|
if (walker.level >= PT_DIRECTORY_LEVEL)
|
|
|
- force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
|
|
|
+ force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
|
|
|
+ || FNAME(is_self_change_mapping)(vcpu, &walker, user_fault);
|
|
|
else
|
|
|
force_pt_level = 1;
|
|
|
if (!force_pt_level) {
|