|
@@ -450,7 +450,7 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
{
|
|
{
|
|
struct call_function_data *data;
|
|
struct call_function_data *data;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- int cpu, next_cpu, this_cpu = smp_processor_id();
|
|
|
|
|
|
+ int refs, cpu, next_cpu, this_cpu = smp_processor_id();
|
|
|
|
|
|
/*
|
|
/*
|
|
* Can deadlock when called with interrupts disabled.
|
|
* Can deadlock when called with interrupts disabled.
|
|
@@ -461,7 +461,7 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
|
|
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
|
|
&& !oops_in_progress && !early_boot_irqs_disabled);
|
|
&& !oops_in_progress && !early_boot_irqs_disabled);
|
|
|
|
|
|
- /* So, what's a CPU they want? Ignoring this one. */
|
|
|
|
|
|
+ /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
|
|
cpu = cpumask_first_and(mask, cpu_online_mask);
|
|
cpu = cpumask_first_and(mask, cpu_online_mask);
|
|
if (cpu == this_cpu)
|
|
if (cpu == this_cpu)
|
|
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
|
|
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
|
|
@@ -519,6 +519,13 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
/* We rely on the "and" being processed before the store */
|
|
/* We rely on the "and" being processed before the store */
|
|
cpumask_and(data->cpumask, mask, cpu_online_mask);
|
|
cpumask_and(data->cpumask, mask, cpu_online_mask);
|
|
cpumask_clear_cpu(this_cpu, data->cpumask);
|
|
cpumask_clear_cpu(this_cpu, data->cpumask);
|
|
|
|
+ refs = cpumask_weight(data->cpumask);
|
|
|
|
+
|
|
|
|
+ /* Some callers race with other cpus changing the passed mask */
|
|
|
|
+ if (unlikely(!refs)) {
|
|
|
|
+ csd_unlock(&data->csd);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
|
|
raw_spin_lock_irqsave(&call_function.lock, flags);
|
|
raw_spin_lock_irqsave(&call_function.lock, flags);
|
|
/*
|
|
/*
|
|
@@ -532,7 +539,7 @@ void smp_call_function_many(const struct cpumask *mask,
|
|
* to the cpumask before this write to refs, which indicates
|
|
* to the cpumask before this write to refs, which indicates
|
|
* data is on the list and is ready to be processed.
|
|
* data is on the list and is ready to be processed.
|
|
*/
|
|
*/
|
|
- atomic_set(&data->refs, cpumask_weight(data->cpumask));
|
|
|
|
|
|
+ atomic_set(&data->refs, refs);
|
|
raw_spin_unlock_irqrestore(&call_function.lock, flags);
|
|
raw_spin_unlock_irqrestore(&call_function.lock, flags);
|
|
|
|
|
|
/*
|
|
/*
|