|
@@ -165,11 +165,9 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|
|
|
|
|
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
|
|
|
|
|
|
- raw_spin_lock(&kvm->requests_lock);
|
|
|
- me = smp_processor_id();
|
|
|
+ me = get_cpu();
|
|
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
|
|
- if (kvm_make_check_request(req, vcpu))
|
|
|
- continue;
|
|
|
+ kvm_make_request(req, vcpu);
|
|
|
cpu = vcpu->cpu;
|
|
|
|
|
|
/* Set ->requests bit before we read ->mode */
|
|
@@ -185,7 +183,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
|
|
smp_call_function_many(cpus, ack_flush, NULL, 1);
|
|
|
else
|
|
|
called = false;
|
|
|
- raw_spin_unlock(&kvm->requests_lock);
|
|
|
+ put_cpu();
|
|
|
free_cpumask_var(cpus);
|
|
|
return called;
|
|
|
}
|
|
@@ -468,7 +466,6 @@ static struct kvm *kvm_create_vm(void)
|
|
|
kvm->mm = current->mm;
|
|
|
atomic_inc(&kvm->mm->mm_count);
|
|
|
spin_lock_init(&kvm->mmu_lock);
|
|
|
- raw_spin_lock_init(&kvm->requests_lock);
|
|
|
kvm_eventfd_init(kvm);
|
|
|
mutex_init(&kvm->lock);
|
|
|
mutex_init(&kvm->irq_lock);
|