|
@@ -395,8 +395,10 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
|
|
|
|
|
|
gpte = gptep[i];
|
|
gpte = gptep[i];
|
|
|
|
|
|
- if (!is_present_gpte(gpte) ||
|
|
|
|
- is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL)) {
|
|
|
|
|
|
+ if (is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ if (!is_present_gpte(gpte)) {
|
|
if (!sp->unsync)
|
|
if (!sp->unsync)
|
|
__set_spte(spte, shadow_notrap_nonpresent_pte);
|
|
__set_spte(spte, shadow_notrap_nonpresent_pte);
|
|
continue;
|
|
continue;
|
|
@@ -760,6 +762,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|
pt_element_t gpte;
|
|
pt_element_t gpte;
|
|
gpa_t pte_gpa;
|
|
gpa_t pte_gpa;
|
|
gfn_t gfn;
|
|
gfn_t gfn;
|
|
|
|
+ bool rsvd_bits_set;
|
|
|
|
|
|
if (!is_shadow_present_pte(sp->spt[i]))
|
|
if (!is_shadow_present_pte(sp->spt[i]))
|
|
continue;
|
|
continue;
|
|
@@ -771,12 +774,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
|
|
gfn = gpte_to_gfn(gpte);
|
|
gfn = gpte_to_gfn(gpte);
|
|
- if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)
|
|
|
|
- || gfn != sp->gfns[i] || !is_present_gpte(gpte)
|
|
|
|
- || !(gpte & PT_ACCESSED_MASK)) {
|
|
|
|
|
|
+ rsvd_bits_set = is_rsvd_bits_set(&vcpu->arch.mmu, gpte,
|
|
|
|
+ PT_PAGE_TABLE_LEVEL);
|
|
|
|
+ if (rsvd_bits_set || gfn != sp->gfns[i] ||
|
|
|
|
+ !is_present_gpte(gpte) || !(gpte & PT_ACCESSED_MASK)) {
|
|
u64 nonpresent;
|
|
u64 nonpresent;
|
|
|
|
|
|
- if (is_present_gpte(gpte) || !clear_unsync)
|
|
|
|
|
|
+ if (rsvd_bits_set || is_present_gpte(gpte) ||
|
|
|
|
+ !clear_unsync)
|
|
nonpresent = shadow_trap_nonpresent_pte;
|
|
nonpresent = shadow_trap_nonpresent_pte;
|
|
else
|
|
else
|
|
nonpresent = shadow_notrap_nonpresent_pte;
|
|
nonpresent = shadow_notrap_nonpresent_pte;
|