|
@@ -461,6 +461,7 @@ out_unlock:
|
|
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
|
static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
|
{
|
|
{
|
|
struct kvm_shadow_walk_iterator iterator;
|
|
struct kvm_shadow_walk_iterator iterator;
|
|
|
|
+ struct kvm_mmu_page *sp;
|
|
gpa_t pte_gpa = -1;
|
|
gpa_t pte_gpa = -1;
|
|
int level;
|
|
int level;
|
|
u64 *sptep;
|
|
u64 *sptep;
|
|
@@ -472,10 +473,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
|
level = iterator.level;
|
|
level = iterator.level;
|
|
sptep = iterator.sptep;
|
|
sptep = iterator.sptep;
|
|
|
|
|
|
|
|
+ sp = page_header(__pa(sptep));
|
|
if (is_last_spte(*sptep, level)) {
|
|
if (is_last_spte(*sptep, level)) {
|
|
- struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
|
|
|
int offset, shift;
|
|
int offset, shift;
|
|
|
|
|
|
|
|
+ if (!sp->unsync)
|
|
|
|
+ break;
|
|
|
|
+
|
|
shift = PAGE_SHIFT -
|
|
shift = PAGE_SHIFT -
|
|
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
|
|
(PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
|
|
offset = sp->role.quadrant << shift;
|
|
offset = sp->role.quadrant << shift;
|
|
@@ -493,7 +497,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- if (!is_shadow_present_pte(*sptep))
|
|
|
|
|
|
+ if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|