|
@@ -745,8 +745,7 @@ enum kvm_mr_change {
|
|
|
* Must be called holding mmap_sem for write.
|
|
|
*/
|
|
|
int __kvm_set_memory_region(struct kvm *kvm,
|
|
|
- struct kvm_userspace_memory_region *mem,
|
|
|
- bool user_alloc)
|
|
|
+ struct kvm_userspace_memory_region *mem)
|
|
|
{
|
|
|
int r;
|
|
|
gfn_t base_gfn;
|
|
@@ -767,7 +766,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
|
|
|
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
|
|
|
goto out;
|
|
|
/* We can read the guest memory with __xxx_user() later on. */
|
|
|
- if (user_alloc &&
|
|
|
+ if ((mem->slot < KVM_USER_MEM_SLOTS) &&
|
|
|
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
|
|
|
!access_ok(VERIFY_WRITE,
|
|
|
(void __user *)(unsigned long)mem->userspace_addr,
|
|
@@ -932,26 +931,23 @@ out:
|
|
|
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
|
|
|
|
|
|
int kvm_set_memory_region(struct kvm *kvm,
|
|
|
- struct kvm_userspace_memory_region *mem,
|
|
|
- bool user_alloc)
|
|
|
+ struct kvm_userspace_memory_region *mem)
|
|
|
{
|
|
|
int r;
|
|
|
|
|
|
mutex_lock(&kvm->slots_lock);
|
|
|
- r = __kvm_set_memory_region(kvm, mem, user_alloc);
|
|
|
+ r = __kvm_set_memory_region(kvm, mem);
|
|
|
mutex_unlock(&kvm->slots_lock);
|
|
|
return r;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
|
|
|
|
|
|
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
|
|
|
- struct
|
|
|
- kvm_userspace_memory_region *mem,
|
|
|
- bool user_alloc)
|
|
|
+ struct kvm_userspace_memory_region *mem)
|
|
|
{
|
|
|
if (mem->slot >= KVM_USER_MEM_SLOTS)
|
|
|
return -EINVAL;
|
|
|
- return kvm_set_memory_region(kvm, mem, user_alloc);
|
|
|
+ return kvm_set_memory_region(kvm, mem);
|
|
|
}
|
|
|
|
|
|
int kvm_get_dirty_log(struct kvm *kvm,
|
|
@@ -2198,7 +2194,7 @@ static long kvm_vm_ioctl(struct file *filp,
|
|
|
sizeof kvm_userspace_mem))
|
|
|
goto out;
|
|
|
|
|
|
- r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, true);
|
|
|
+ r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
|
|
|
break;
|
|
|
}
|
|
|
case KVM_GET_DIRTY_LOG: {
|