|
@@ -265,9 +265,7 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data);
|
|
|
* @info: An arbitrary pointer to pass to the function.
|
|
|
* @wait: If true, wait until function has completed on other CPUs.
|
|
|
*
|
|
|
- * Returns 0 on success, else a negative status code. Note that @wait
|
|
|
- * will be implicitly turned on in case of allocation failures, since
|
|
|
- * we fall back to on-stack allocation.
|
|
|
+ * Returns 0 on success, else a negative status code.
|
|
|
*/
|
|
|
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
|
int wait)
|
|
@@ -321,6 +319,51 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
|
}
|
|
|
EXPORT_SYMBOL(smp_call_function_single);
|
|
|
|
|
|
+/*
|
|
|
+ * smp_call_function_any - Run a function on any of the given cpus
|
|
|
+ * @mask: The mask of cpus it can run on.
|
|
|
+ * @func: The function to run. This must be fast and non-blocking.
|
|
|
+ * @info: An arbitrary pointer to pass to the function.
|
|
|
+ * @wait: If true, wait until function has completed.
|
|
|
+ *
|
|
|
+ * Returns 0 on success, else a negative status code (if no cpus were online).
|
|
|
+ * Note that @wait will be implicitly turned on in case of allocation failures,
|
|
|
+ * since we fall back to on-stack allocation.
|
|
|
+ *
|
|
|
+ * Selection preference:
|
|
|
+ * 1) current cpu if in @mask
|
|
|
+ * 2) any cpu of current node if in @mask
|
|
|
+ * 3) any other online cpu in @mask
|
|
|
+ */
|
|
|
+int smp_call_function_any(const struct cpumask *mask,
|
|
|
+ void (*func)(void *info), void *info, int wait)
|
|
|
+{
|
|
|
+ unsigned int cpu;
|
|
|
+ const struct cpumask *nodemask;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ /* Try for same CPU (cheapest) */
|
|
|
+ cpu = get_cpu();
|
|
|
+ if (cpumask_test_cpu(cpu, mask))
|
|
|
+ goto call;
|
|
|
+
|
|
|
+ /* Try for same node. */
|
|
|
+ nodemask = cpumask_of_node(cpu);
|
|
|
+ for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
|
|
|
+ cpu = cpumask_next_and(cpu, nodemask, mask)) {
|
|
|
+ if (cpu_online(cpu))
|
|
|
+ goto call;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
|
|
|
+ cpu = cpumask_any_and(mask, cpu_online_mask);
|
|
|
+call:
|
|
|
+ ret = smp_call_function_single(cpu, func, info, wait);
|
|
|
+ put_cpu();
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(smp_call_function_any);
|
|
|
+
|
|
|
/**
|
|
|
* __smp_call_function_single(): Run a function on another CPU
|
|
|
* @cpu: The CPU to run on.
|
|
@@ -355,9 +398,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
|
|
|
* @wait: If true, wait (atomically) until function has completed
|
|
|
* on other CPUs.
|
|
|
*
|
|
|
- * If @wait is true, then returns once @func has returned. Note that @wait
|
|
|
- * will be implicitly turned on in case of allocation failures, since
|
|
|
- * we fall back to on-stack allocation.
|
|
|
+ * If @wait is true, then returns once @func has returned.
|
|
|
*
|
|
|
* You must not call this function with disabled interrupts or from a
|
|
|
* hardware interrupt handler or from a bottom half handler. Preemption
|
|
@@ -443,8 +484,7 @@ EXPORT_SYMBOL(smp_call_function_many);
|
|
|
* Returns 0.
|
|
|
*
|
|
|
* If @wait is true, then returns once @func has returned; otherwise
|
|
|
- * it returns just before the target cpu calls @func. In case of allocation
|
|
|
- * failure, @wait will be implicitly turned on.
|
|
|
+ * it returns just before the target cpu calls @func.
|
|
|
*
|
|
|
* You must not call this function with disabled interrupts or from a
|
|
|
* hardware interrupt handler or from a bottom half handler.
|