|
@@ -198,21 +198,15 @@ static void vcpu_put(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static void ack_flush(void *_completed)
|
|
static void ack_flush(void *_completed)
|
|
{
|
|
{
|
|
- atomic_t *completed = _completed;
|
|
|
|
-
|
|
|
|
- atomic_inc(completed);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|
void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|
{
|
|
{
|
|
- int i, cpu, needed;
|
|
|
|
|
|
+ int i, cpu;
|
|
cpumask_t cpus;
|
|
cpumask_t cpus;
|
|
struct kvm_vcpu *vcpu;
|
|
struct kvm_vcpu *vcpu;
|
|
- atomic_t completed;
|
|
|
|
|
|
|
|
- atomic_set(&completed, 0);
|
|
|
|
cpus_clear(cpus);
|
|
cpus_clear(cpus);
|
|
- needed = 0;
|
|
|
|
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
|
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
|
|
vcpu = kvm->vcpus[i];
|
|
vcpu = kvm->vcpus[i];
|
|
if (!vcpu)
|
|
if (!vcpu)
|
|
@@ -221,23 +215,9 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
|
|
continue;
|
|
continue;
|
|
cpu = vcpu->cpu;
|
|
cpu = vcpu->cpu;
|
|
if (cpu != -1 && cpu != raw_smp_processor_id())
|
|
if (cpu != -1 && cpu != raw_smp_processor_id())
|
|
- if (!cpu_isset(cpu, cpus)) {
|
|
|
|
- cpu_set(cpu, cpus);
|
|
|
|
- ++needed;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * We really want smp_call_function_mask() here. But that's not
|
|
|
|
- * available, so ipi all cpus in parallel and wait for them
|
|
|
|
- * to complete.
|
|
|
|
- */
|
|
|
|
- for (cpu = first_cpu(cpus); cpu != NR_CPUS; cpu = next_cpu(cpu, cpus))
|
|
|
|
- smp_call_function_single(cpu, ack_flush, &completed, 1, 0);
|
|
|
|
- while (atomic_read(&completed) != needed) {
|
|
|
|
- cpu_relax();
|
|
|
|
- barrier();
|
|
|
|
|
|
+ cpu_set(cpu, cpus);
|
|
}
|
|
}
|
|
|
|
+ smp_call_function_mask(cpus, ack_flush, NULL, 1);
|
|
}
|
|
}
|
|
|
|
|
|
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
|
|
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
|