|
@@ -70,7 +70,7 @@ MODULE_LICENSE("GPL");
|
|
|
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
|
|
|
*/
|
|
|
|
|
|
-DEFINE_RAW_SPINLOCK(kvm_lock);
|
|
|
+DEFINE_SPINLOCK(kvm_lock);
|
|
|
static DEFINE_RAW_SPINLOCK(kvm_count_lock);
|
|
|
LIST_HEAD(vm_list);
|
|
|
|
|
@@ -491,9 +491,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
|
|
|
if (r)
|
|
|
goto out_err;
|
|
|
|
|
|
- raw_spin_lock(&kvm_lock);
|
|
|
+ spin_lock(&kvm_lock);
|
|
|
list_add(&kvm->vm_list, &vm_list);
|
|
|
- raw_spin_unlock(&kvm_lock);
|
|
|
+ spin_unlock(&kvm_lock);
|
|
|
|
|
|
return kvm;
|
|
|
|
|
@@ -582,9 +582,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
|
|
|
struct mm_struct *mm = kvm->mm;
|
|
|
|
|
|
kvm_arch_sync_events(kvm);
|
|
|
- raw_spin_lock(&kvm_lock);
|
|
|
+ spin_lock(&kvm_lock);
|
|
|
list_del(&kvm->vm_list);
|
|
|
- raw_spin_unlock(&kvm_lock);
|
|
|
+ spin_unlock(&kvm_lock);
|
|
|
kvm_free_irq_routing(kvm);
|
|
|
for (i = 0; i < KVM_NR_BUSES; i++)
|
|
|
kvm_io_bus_destroy(kvm->buses[i]);
|
|
@@ -3054,10 +3054,10 @@ static int vm_stat_get(void *_offset, u64 *val)
|
|
|
struct kvm *kvm;
|
|
|
|
|
|
*val = 0;
|
|
|
- raw_spin_lock(&kvm_lock);
|
|
|
+ spin_lock(&kvm_lock);
|
|
|
list_for_each_entry(kvm, &vm_list, vm_list)
|
|
|
*val += *(u32 *)((void *)kvm + offset);
|
|
|
- raw_spin_unlock(&kvm_lock);
|
|
|
+ spin_unlock(&kvm_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -3071,12 +3071,12 @@ static int vcpu_stat_get(void *_offset, u64 *val)
|
|
|
int i;
|
|
|
|
|
|
*val = 0;
|
|
|
- raw_spin_lock(&kvm_lock);
|
|
|
+ spin_lock(&kvm_lock);
|
|
|
list_for_each_entry(kvm, &vm_list, vm_list)
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
|
|
*val += *(u32 *)((void *)vcpu + offset);
|
|
|
|
|
|
- raw_spin_unlock(&kvm_lock);
|
|
|
+ spin_unlock(&kvm_lock);
|
|
|
return 0;
|
|
|
}
|
|
|
|