|
@@ -1122,8 +1122,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
|
|
|
else
|
|
|
kvm_release_pfn_clean(pfn);
|
|
|
}
|
|
|
- if (!ptwrite || !*ptwrite)
|
|
|
+ if (speculative) {
|
|
|
vcpu->arch.last_pte_updated = shadow_pte;
|
|
|
+ vcpu->arch.last_pte_gfn = gfn;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
|
|
@@ -1671,6 +1673,18 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
vcpu->arch.update_pte.pfn = pfn;
|
|
|
}
|
|
|
|
|
|
+static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
|
|
|
+{
|
|
|
+ u64 *spte = vcpu->arch.last_pte_updated;
|
|
|
+
|
|
|
+ if (spte
|
|
|
+ && vcpu->arch.last_pte_gfn == gfn
|
|
|
+ && shadow_accessed_mask
|
|
|
+ && !(*spte & shadow_accessed_mask)
|
|
|
+ && is_shadow_present_pte(*spte))
|
|
|
+ set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
|
|
|
+}
|
|
|
+
|
|
|
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
const u8 *new, int bytes)
|
|
|
{
|
|
@@ -1694,6 +1708,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
|
|
|
mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
+ kvm_mmu_access_page(vcpu, gfn);
|
|
|
kvm_mmu_free_some_pages(vcpu);
|
|
|
++vcpu->kvm->stat.mmu_pte_write;
|
|
|
kvm_mmu_audit(vcpu, "pre pte write");
|