|
@@ -6897,7 +6897,13 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
if (nr_mmu_pages)
|
|
|
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
|
|
|
- kvm_mmu_slot_remove_write_access(kvm, mem->slot);
|
|
|
+ /*
|
|
|
+ * Write protect all pages for dirty logging.
|
|
|
+ * Existing largepage mappings are destroyed here and new ones will
|
|
|
+ * not be created until the end of the logging.
|
|
|
+ */
|
|
|
+ if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
|
|
|
+ kvm_mmu_slot_remove_write_access(kvm, mem->slot);
|
|
|
spin_unlock(&kvm->mmu_lock);
|
|
|
/*
|
|
|
* If memory slot is created, or moved, we need to clear all
|