|
@@ -138,7 +138,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|
|
|
|
|
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
|
|
|
|
|
|
- spin_lock(&kvm->requests_lock);
|
|
|
+ raw_spin_lock(&kvm->requests_lock);
|
|
|
me = smp_processor_id();
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
|
if (test_and_set_bit(req, &vcpu->requests))
|
|
@@ -153,7 +153,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|
|
smp_call_function_many(cpus, ack_flush, NULL, 1);
|
|
|
else
|
|
|
called = false;
|
|
|
- spin_unlock(&kvm->requests_lock);
|
|
|
+ raw_spin_unlock(&kvm->requests_lock);
|
|
|
free_cpumask_var(cpus);
|
|
|
return called;
|
|
|
}
|
|
@@ -409,7 +409,7 @@ static struct kvm *kvm_create_vm(void)
|
|
|
kvm->mm = current->mm;
|
|
|
atomic_inc(&kvm->mm->mm_count);
|
|
|
spin_lock_init(&kvm->mmu_lock);
|
|
|
- spin_lock_init(&kvm->requests_lock);
|
|
|
+ raw_spin_lock_init(&kvm->requests_lock);
|
|
|
kvm_eventfd_init(kvm);
|
|
|
mutex_init(&kvm->lock);
|
|
|
mutex_init(&kvm->irq_lock);
|