|
@@ -1625,10 +1625,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
|
|
|
return -EINVAL;
|
|
|
|
|
|
down_write(&kvm->slots_lock);
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
|
|
|
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
|
|
|
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
|
|
|
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
up_write(&kvm->slots_lock);
|
|
|
return 0;
|
|
|
}
|
|
@@ -1804,7 +1806,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
|
|
|
|
|
|
/* If nothing is dirty, don't bother messing with page tables. */
|
|
|
if (is_dirty) {
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
kvm_mmu_slot_remove_write_access(kvm, log->slot);
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
memslot = &kvm->memslots[log->slot];
|
|
|
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
|
|
@@ -4548,12 +4552,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
if (!kvm->arch.n_requested_mmu_pages) {
|
|
|
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
|
|
|
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
|
|
|
}
|
|
|
|
|
|
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
kvm_flush_remote_tlbs(kvm);
|
|
|
|
|
|
return 0;
|