|
@@ -2697,6 +2697,9 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+ if (is_rsvd_bits_set(vcpu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
|
|
|
+ return;
|
|
|
+
|
|
|
++vcpu->kvm->stat.mmu_pte_updated;
|
|
|
if (!sp->role.cr4_pae)
|
|
|
paging32_update_pte(vcpu, sp, spte, new);
|
|
@@ -2775,6 +2778,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
bool guest_initiated)
|
|
|
{
|
|
|
gfn_t gfn = gpa >> PAGE_SHIFT;
|
|
|
+ union kvm_mmu_page_role mask = { .word = 0 };
|
|
|
struct kvm_mmu_page *sp;
|
|
|
struct hlist_node *node;
|
|
|
LIST_HEAD(invalid_list);
|
|
@@ -2849,6 +2853,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
|
|
|
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
|
|
|
pte_size = sp->role.cr4_pae ? 8 : 4;
|
|
|
misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
|
|
@@ -2896,7 +2901,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
while (npte--) {
|
|
|
entry = *spte;
|
|
|
mmu_pte_write_zap_pte(vcpu, sp, spte);
|
|
|
- if (gentry)
|
|
|
+ if (gentry &&
|
|
|
+ !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
|
|
|
+ & mask.word))
|
|
|
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
|
|
|
if (!remote_flush && need_remote_flush(entry, *spte))
|
|
|
remote_flush = true;
|