|
@@ -291,7 +291,6 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
int r;
|
|
|
|
|
|
- kvm_mmu_free_some_pages(vcpu);
|
|
|
r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
|
|
|
pte_chain_cache, 4);
|
|
|
if (r)
|
|
@@ -569,9 +568,6 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
|
|
|
{
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
|
- if (!vcpu->kvm->arch.n_free_mmu_pages)
|
|
|
- return NULL;
|
|
|
-
|
|
|
sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
|
|
|
sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
|
|
|
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
|
|
@@ -1024,6 +1020,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
|
|
|
page = gfn_to_page(vcpu->kvm, gfn);
|
|
|
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
+ kvm_mmu_free_some_pages(vcpu);
|
|
|
r = __nonpaging_map(vcpu, v, write, gfn, page);
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
|
|
@@ -1275,6 +1272,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
|
|
|
if (r)
|
|
|
goto out;
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
+ kvm_mmu_free_some_pages(vcpu);
|
|
|
mmu_alloc_roots(vcpu);
|
|
|
spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
|
|
@@ -1413,6 +1411,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
|
|
|
mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
|
|
|
spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
+ kvm_mmu_free_some_pages(vcpu);
|
|
|
++vcpu->kvm->stat.mmu_pte_write;
|
|
|
kvm_mmu_audit(vcpu, "pre pte write");
|
|
|
if (gfn == vcpu->arch.last_pt_write_gfn
|
|
@@ -1505,7 +1504,6 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
|
|
|
|
|
|
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
- spin_lock(&vcpu->kvm->mmu_lock);
|
|
|
while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
|
|
|
struct kvm_mmu_page *sp;
|
|
|
|
|
@@ -1514,7 +1512,6 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
|
|
|
kvm_mmu_zap_page(vcpu->kvm, sp);
|
|
|
++vcpu->kvm->stat.mmu_recycled;
|
|
|
}
|
|
|
- spin_unlock(&vcpu->kvm->mmu_lock);
|
|
|
}
|
|
|
|
|
|
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
|