|
@@ -728,7 +728,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
|
int r;
|
|
|
gfn_t base_gfn;
|
|
|
unsigned long npages;
|
|
|
- struct kvm_memory_slot *memslot, *slot;
|
|
|
+ struct kvm_memory_slot *slot;
|
|
|
struct kvm_memory_slot old, new;
|
|
|
struct kvm_memslots *slots = NULL, *old_memslots;
|
|
|
|
|
@@ -754,7 +754,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
|
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
|
|
|
goto out;
|
|
|
|
|
|
- memslot = id_to_memslot(kvm->memslots, mem->slot);
|
|
|
+ slot = id_to_memslot(kvm->memslots, mem->slot);
|
|
|
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
|
|
|
npages = mem->memory_size >> PAGE_SHIFT;
|
|
|
|
|
@@ -765,7 +765,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
|
if (!npages)
|
|
|
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
|
|
|
|
|
|
- new = old = *memslot;
|
|
|
+ new = old = *slot;
|
|
|
|
|
|
new.id = mem->slot;
|
|
|
new.base_gfn = base_gfn;
|
|
@@ -786,7 +786,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
|
/* Check for overlaps */
|
|
|
r = -EEXIST;
|
|
|
kvm_for_each_memslot(slot, kvm->memslots) {
|
|
|
- if (slot->id >= KVM_USER_MEM_SLOTS || slot == memslot)
|
|
|
+ if ((slot->id >= KVM_USER_MEM_SLOTS) ||
|
|
|
+ (slot->id == mem->slot))
|
|
|
continue;
|
|
|
if (!((base_gfn + npages <= slot->base_gfn) ||
|
|
|
(base_gfn >= slot->base_gfn + slot->npages)))
|
|
@@ -822,8 +823,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
|
}
|
|
|
|
|
|
if (!npages || base_gfn != old.base_gfn) {
|
|
|
- struct kvm_memory_slot *slot;
|
|
|
-
|
|
|
r = -ENOMEM;
|
|
|
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
|
|
|
GFP_KERNEL);
|