|
@@ -4211,7 +4211,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
|
|
static void kvm_zap_obsolete_pages(struct kvm *kvm)
|
|
|
{
|
|
|
struct kvm_mmu_page *sp, *node;
|
|
|
- LIST_HEAD(invalid_list);
|
|
|
int batch = 0;
|
|
|
|
|
|
restart:
|
|
@@ -4244,7 +4243,8 @@ restart:
|
|
|
goto restart;
|
|
|
}
|
|
|
|
|
|
- ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
|
|
+ ret = kvm_mmu_prepare_zap_page(kvm, sp,
|
|
|
+ &kvm->arch.zapped_obsolete_pages);
|
|
|
batch += ret;
|
|
|
|
|
|
if (ret)
|
|
@@ -4255,7 +4255,7 @@ restart:
|
|
|
* Should flush tlb before free page tables since lockless-walking
|
|
|
* may use the pages.
|
|
|
*/
|
|
|
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
|
|
+ kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -4306,6 +4306,11 @@ restart:
|
|
|
spin_unlock(&kvm->mmu_lock);
|
|
|
}
|
|
|
|
|
|
+static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
|
|
|
+{
|
|
|
+ return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
|
|
|
+}
|
|
|
+
|
|
|
static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
{
|
|
|
struct kvm *kvm;
|
|
@@ -4334,15 +4339,23 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
|
|
|
* want to shrink a VM that only started to populate its MMU
|
|
|
* anyway.
|
|
|
*/
|
|
|
- if (!kvm->arch.n_used_mmu_pages)
|
|
|
+ if (!kvm->arch.n_used_mmu_pages &&
|
|
|
+ !kvm_has_zapped_obsolete_pages(kvm))
|
|
|
continue;
|
|
|
|
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
|
|
|
+ if (kvm_has_zapped_obsolete_pages(kvm)) {
|
|
|
+ kvm_mmu_commit_zap_page(kvm,
|
|
|
+ &kvm->arch.zapped_obsolete_pages);
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
+
|
|
|
prepare_zap_oldest_mmu_page(kvm, &invalid_list);
|
|
|
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
|
|
|
|
|
+unlock:
|
|
|
spin_unlock(&kvm->mmu_lock);
|
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
|
|