|
@@ -3551,7 +3551,7 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
|
|
|
* If we're seeing too many writes to a page, it may no longer be a page table,
|
|
|
* or we may be forking, in which case it is better to unmap the page.
|
|
|
*/
|
|
|
-static bool detect_write_flooding(struct kvm_mmu_page *sp, u64 *spte)
|
|
|
+static bool detect_write_flooding(struct kvm_mmu_page *sp)
|
|
|
{
|
|
|
/*
|
|
|
* Skip write-flooding detected for the sp whose level is 1, because
|
|
@@ -3660,10 +3660,8 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
|
|
|
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
|
|
|
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
|
|
|
- spte = get_written_sptes(sp, gpa, &npte);
|
|
|
-
|
|
|
if (detect_write_misaligned(sp, gpa, bytes) ||
|
|
|
- detect_write_flooding(sp, spte)) {
|
|
|
+ detect_write_flooding(sp)) {
|
|
|
zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
|
|
|
&invalid_list);
|
|
|
++vcpu->kvm->stat.mmu_flooded;
|