|
@@ -131,148 +131,29 @@ asmlinkage __cpuinit void start_secondary(void)
|
|
|
cpu_idle();
|
|
|
}
|
|
|
|
|
|
-DEFINE_SPINLOCK(smp_call_lock);
|
|
|
-
|
|
|
-struct call_data_struct *call_data;
|
|
|
-
|
|
|
-/*
|
|
|
- * Run a function on all other CPUs.
|
|
|
- *
|
|
|
- * <mask> cpuset_t of all processors to run the function on.
|
|
|
- * <func> The function to run. This must be fast and non-blocking.
|
|
|
- * <info> An arbitrary pointer to pass to the function.
|
|
|
- * <retry> If true, keep retrying until ready.
|
|
|
- * <wait> If true, wait until function has completed on other CPUs.
|
|
|
- * [RETURNS] 0 on success, else a negative status code.
|
|
|
- *
|
|
|
- * Does not return until remote CPUs are nearly ready to execute <func>
|
|
|
- * or are or have executed.
|
|
|
- *
|
|
|
- * You must not call this function with disabled interrupts or from a
|
|
|
- * hardware interrupt handler or from a bottom half handler:
|
|
|
- *
|
|
|
- * CPU A CPU B
|
|
|
- * Disable interrupts
|
|
|
- * smp_call_function()
|
|
|
- * Take call_lock
|
|
|
- * Send IPIs
|
|
|
- * Wait for all cpus to acknowledge IPI
|
|
|
- * CPU A has not responded, spin waiting
|
|
|
- * for cpu A to respond, holding call_lock
|
|
|
- * smp_call_function()
|
|
|
- * Spin waiting for call_lock
|
|
|
- * Deadlock Deadlock
|
|
|
- */
|
|
|
-int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
|
|
|
- void *info, int retry, int wait)
|
|
|
+void arch_send_call_function_ipi(cpumask_t mask)
|
|
|
{
|
|
|
- struct call_data_struct data;
|
|
|
- int cpu = smp_processor_id();
|
|
|
- int cpus;
|
|
|
-
|
|
|
- /*
|
|
|
- * Can die spectacularly if this CPU isn't yet marked online
|
|
|
- */
|
|
|
- BUG_ON(!cpu_online(cpu));
|
|
|
-
|
|
|
- cpu_clear(cpu, mask);
|
|
|
- cpus = cpus_weight(mask);
|
|
|
- if (!cpus)
|
|
|
- return 0;
|
|
|
-
|
|
|
- /* Can deadlock when called with interrupts disabled */
|
|
|
- WARN_ON(irqs_disabled());
|
|
|
-
|
|
|
- data.func = func;
|
|
|
- data.info = info;
|
|
|
- atomic_set(&data.started, 0);
|
|
|
- data.wait = wait;
|
|
|
- if (wait)
|
|
|
- atomic_set(&data.finished, 0);
|
|
|
-
|
|
|
- spin_lock(&smp_call_lock);
|
|
|
- call_data = &data;
|
|
|
- smp_mb();
|
|
|
-
|
|
|
- /* Send a message to all other CPUs and wait for them to respond */
|
|
|
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
|
|
|
-
|
|
|
- /* Wait for response */
|
|
|
- /* FIXME: lock-up detection, backtrace on lock-up */
|
|
|
- while (atomic_read(&data.started) != cpus)
|
|
|
- barrier();
|
|
|
-
|
|
|
- if (wait)
|
|
|
- while (atomic_read(&data.finished) != cpus)
|
|
|
- barrier();
|
|
|
- call_data = NULL;
|
|
|
- spin_unlock(&smp_call_lock);
|
|
|
-
|
|
|
- return 0;
|
|
|
}
|
|
|
|
|
|
-int smp_call_function(void (*func) (void *info), void *info, int retry,
|
|
|
- int wait)
|
|
|
+/*
|
|
|
+ * We reuse the same vector for the single IPI
|
|
|
+ */
|
|
|
+void arch_send_call_function_single_ipi(int cpu)
|
|
|
{
|
|
|
- return smp_call_function_mask(cpu_online_map, func, info, retry, wait);
|
|
|
+ mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
|
|
|
}
|
|
|
-EXPORT_SYMBOL(smp_call_function);
|
|
|
|
|
|
+/*
|
|
|
+ * Call into both interrupt handlers, as we share the IPI for them
|
|
|
+ */
|
|
|
void smp_call_function_interrupt(void)
|
|
|
{
|
|
|
- void (*func) (void *info) = call_data->func;
|
|
|
- void *info = call_data->info;
|
|
|
- int wait = call_data->wait;
|
|
|
-
|
|
|
- /*
|
|
|
- * Notify initiating CPU that I've grabbed the data and am
|
|
|
- * about to execute the function.
|
|
|
- */
|
|
|
- smp_mb();
|
|
|
- atomic_inc(&call_data->started);
|
|
|
-
|
|
|
- /*
|
|
|
- * At this point the info structure may be out of scope unless wait==1.
|
|
|
- */
|
|
|
irq_enter();
|
|
|
- (*func)(info);
|
|
|
+ generic_smp_call_function_single_interrupt();
|
|
|
+ generic_smp_call_function_interrupt();
|
|
|
irq_exit();
|
|
|
-
|
|
|
- if (wait) {
|
|
|
- smp_mb();
|
|
|
- atomic_inc(&call_data->finished);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
|
- int retry, int wait)
|
|
|
-{
|
|
|
- int ret, me;
|
|
|
-
|
|
|
- /*
|
|
|
- * Can die spectacularly if this CPU isn't yet marked online
|
|
|
- */
|
|
|
- if (!cpu_online(cpu))
|
|
|
- return 0;
|
|
|
-
|
|
|
- me = get_cpu();
|
|
|
- BUG_ON(!cpu_online(me));
|
|
|
-
|
|
|
- if (cpu == me) {
|
|
|
- local_irq_disable();
|
|
|
- func(info);
|
|
|
- local_irq_enable();
|
|
|
- put_cpu();
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry,
|
|
|
- wait);
|
|
|
-
|
|
|
- put_cpu();
|
|
|
- return 0;
|
|
|
}
|
|
|
-EXPORT_SYMBOL(smp_call_function_single);
|
|
|
|
|
|
static void stop_this_cpu(void *dummy)
|
|
|
{
|