|
@@ -375,7 +375,15 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
|
memset(new.rmap, 0, npages * sizeof(*new.rmap));
|
|
|
|
|
|
new.user_alloc = user_alloc;
|
|
|
- new.userspace_addr = mem->userspace_addr;
|
|
|
+ /*
|
|
|
+ * hva_to_rmmap() serialzies with the mmu_lock and to be
|
|
|
+ * safe it has to ignore memslots with !user_alloc &&
|
|
|
+ * !userspace_addr.
|
|
|
+ */
|
|
|
+ if (user_alloc)
|
|
|
+ new.userspace_addr = mem->userspace_addr;
|
|
|
+ else
|
|
|
+ new.userspace_addr = 0;
|
|
|
}
|
|
|
if (npages && !new.lpage_info) {
|
|
|
int largepages = npages / KVM_PAGES_PER_HPAGE;
|
|
@@ -408,17 +416,21 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
|
}
|
|
|
#endif /* not defined CONFIG_S390 */
|
|
|
|
|
|
- if (mem->slot >= kvm->nmemslots)
|
|
|
- kvm->nmemslots = mem->slot + 1;
|
|
|
-
|
|
|
if (!npages)
|
|
|
kvm_arch_flush_shadow(kvm);
|
|
|
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
+ if (mem->slot >= kvm->nmemslots)
|
|
|
+ kvm->nmemslots = mem->slot + 1;
|
|
|
+
|
|
|
*memslot = new;
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
|
|
|
r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
|
|
|
if (r) {
|
|
|
+ spin_lock(&kvm->mmu_lock);
|
|
|
*memslot = old;
|
|
|
+ spin_unlock(&kvm->mmu_lock);
|
|
|
goto out_free;
|
|
|
}
|
|
|
|