|
@@ -472,14 +472,19 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
|
|
struct shadow_walker *sw =
|
|
struct shadow_walker *sw =
|
|
container_of(_sw, struct shadow_walker, walker);
|
|
container_of(_sw, struct shadow_walker, walker);
|
|
|
|
|
|
- if (level == PT_PAGE_TABLE_LEVEL) {
|
|
|
|
|
|
+ /* FIXME: properly handle invlpg on large guest pages */
|
|
|
|
+ if (level == PT_PAGE_TABLE_LEVEL ||
|
|
|
|
+ ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
|
|
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
|
struct kvm_mmu_page *sp = page_header(__pa(sptep));
|
|
|
|
|
|
sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
|
|
sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
|
|
sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
|
|
sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
|
|
|
|
|
|
- if (is_shadow_present_pte(*sptep))
|
|
|
|
|
|
+ if (is_shadow_present_pte(*sptep)) {
|
|
rmap_remove(vcpu->kvm, sptep);
|
|
rmap_remove(vcpu->kvm, sptep);
|
|
|
|
+ if (is_large_pte(*sptep))
|
|
|
|
+ --vcpu->kvm->stat.lpages;
|
|
|
|
+ }
|
|
set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
|
|
set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|