|
@@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops;
|
|
|
|
|
|
static volatile unsigned int cpu_callin_map[NR_CPUS];
|
|
|
|
|
|
-void smp_call_function_interrupt(void);
|
|
|
-
|
|
|
int smt_enabled_at_boot = 1;
|
|
|
|
|
|
-static int ipi_fail_ok;
|
|
|
-
|
|
|
static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
|
|
|
|
|
|
#ifdef CONFIG_PPC64
|
|
@@ -99,12 +95,15 @@ void smp_message_recv(int msg)
|
|
|
{
|
|
|
switch(msg) {
|
|
|
case PPC_MSG_CALL_FUNCTION:
|
|
|
- smp_call_function_interrupt();
|
|
|
+ generic_smp_call_function_interrupt();
|
|
|
break;
|
|
|
case PPC_MSG_RESCHEDULE:
|
|
|
/* XXX Do we have to do this? */
|
|
|
set_need_resched();
|
|
|
break;
|
|
|
+ case PPC_MSG_CALL_FUNC_SINGLE:
|
|
|
+ generic_smp_call_function_single_interrupt();
|
|
|
+ break;
|
|
|
case PPC_MSG_DEBUGGER_BREAK:
|
|
|
if (crash_ipi_function_ptr) {
|
|
|
crash_ipi_function_ptr(get_irq_regs());
|
|
@@ -128,6 +127,19 @@ void smp_send_reschedule(int cpu)
|
|
|
smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
|
|
|
}
|
|
|
|
|
|
+void arch_send_call_function_single_ipi(int cpu)
|
|
|
+{
|
|
|
+ smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE);
|
|
|
+}
|
|
|
+
|
|
|
+void arch_send_call_function_ipi(cpumask_t mask)
|
|
|
+{
|
|
|
+ unsigned int cpu;
|
|
|
+
|
|
|
+ for_each_cpu_mask(cpu, mask)
|
|
|
+ smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_DEBUGGER
|
|
|
void smp_send_debugger_break(int cpu)
|
|
|
{
|
|
@@ -154,215 +166,9 @@ static void stop_this_cpu(void *dummy)
|
|
|
;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Structure and data for smp_call_function(). This is designed to minimise
|
|
|
- * static memory requirements. It also looks cleaner.
|
|
|
- * Stolen from the i386 version.
|
|
|
- */
|
|
|
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
|
|
|
-
|
|
|
-static struct call_data_struct {
|
|
|
- void (*func) (void *info);
|
|
|
- void *info;
|
|
|
- atomic_t started;
|
|
|
- atomic_t finished;
|
|
|
- int wait;
|
|
|
-} *call_data;
|
|
|
-
|
|
|
-/* delay of at least 8 seconds */
|
|
|
-#define SMP_CALL_TIMEOUT 8
|
|
|
-
|
|
|
-/*
|
|
|
- * These functions send a 'generic call function' IPI to other online
|
|
|
- * CPUS in the system.
|
|
|
- *
|
|
|
- * [SUMMARY] Run a function on other CPUs.
|
|
|
- * <func> The function to run. This must be fast and non-blocking.
|
|
|
- * <info> An arbitrary pointer to pass to the function.
|
|
|
- * <nonatomic> currently unused.
|
|
|
- * <wait> If true, wait (atomically) until function has completed on other CPUs.
|
|
|
- * [RETURNS] 0 on success, else a negative status code. Does not return until
|
|
|
- * remote CPUs are nearly ready to execute <<func>> or are or have executed.
|
|
|
- * <map> is a cpu map of the cpus to send IPI to.
|
|
|
- *
|
|
|
- * You must not call this function with disabled interrupts or from a
|
|
|
- * hardware interrupt handler or from a bottom half handler.
|
|
|
- */
|
|
|
-static int __smp_call_function_map(void (*func) (void *info), void *info,
|
|
|
- int nonatomic, int wait, cpumask_t map)
|
|
|
-{
|
|
|
- struct call_data_struct data;
|
|
|
- int ret = -1, num_cpus;
|
|
|
- int cpu;
|
|
|
- u64 timeout;
|
|
|
-
|
|
|
- if (unlikely(smp_ops == NULL))
|
|
|
- return ret;
|
|
|
-
|
|
|
- data.func = func;
|
|
|
- data.info = info;
|
|
|
- atomic_set(&data.started, 0);
|
|
|
- data.wait = wait;
|
|
|
- if (wait)
|
|
|
- atomic_set(&data.finished, 0);
|
|
|
-
|
|
|
- /* remove 'self' from the map */
|
|
|
- if (cpu_isset(smp_processor_id(), map))
|
|
|
- cpu_clear(smp_processor_id(), map);
|
|
|
-
|
|
|
- /* sanity check the map, remove any non-online processors. */
|
|
|
- cpus_and(map, map, cpu_online_map);
|
|
|
-
|
|
|
- num_cpus = cpus_weight(map);
|
|
|
- if (!num_cpus)
|
|
|
- goto done;
|
|
|
-
|
|
|
- call_data = &data;
|
|
|
- smp_wmb();
|
|
|
- /* Send a message to all CPUs in the map */
|
|
|
- for_each_cpu_mask(cpu, map)
|
|
|
- smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION);
|
|
|
-
|
|
|
- timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec;
|
|
|
-
|
|
|
- /* Wait for indication that they have received the message */
|
|
|
- while (atomic_read(&data.started) != num_cpus) {
|
|
|
- HMT_low();
|
|
|
- if (get_tb() >= timeout) {
|
|
|
- printk("smp_call_function on cpu %d: other cpus not "
|
|
|
- "responding (%d)\n", smp_processor_id(),
|
|
|
- atomic_read(&data.started));
|
|
|
- if (!ipi_fail_ok)
|
|
|
- debugger(NULL);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* optionally wait for the CPUs to complete */
|
|
|
- if (wait) {
|
|
|
- while (atomic_read(&data.finished) != num_cpus) {
|
|
|
- HMT_low();
|
|
|
- if (get_tb() >= timeout) {
|
|
|
- printk("smp_call_function on cpu %d: other "
|
|
|
- "cpus not finishing (%d/%d)\n",
|
|
|
- smp_processor_id(),
|
|
|
- atomic_read(&data.finished),
|
|
|
- atomic_read(&data.started));
|
|
|
- debugger(NULL);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- done:
|
|
|
- ret = 0;
|
|
|
-
|
|
|
- out:
|
|
|
- call_data = NULL;
|
|
|
- HMT_medium();
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-static int __smp_call_function(void (*func)(void *info), void *info,
|
|
|
- int nonatomic, int wait)
|
|
|
-{
|
|
|
- int ret;
|
|
|
- spin_lock(&call_lock);
|
|
|
- ret =__smp_call_function_map(func, info, nonatomic, wait,
|
|
|
- cpu_online_map);
|
|
|
- spin_unlock(&call_lock);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
|
|
|
- int wait)
|
|
|
-{
|
|
|
- /* Can deadlock when called with interrupts disabled */
|
|
|
- WARN_ON(irqs_disabled());
|
|
|
-
|
|
|
- return __smp_call_function(func, info, nonatomic, wait);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(smp_call_function);
|
|
|
-
|
|
|
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
|
- int nonatomic, int wait)
|
|
|
-{
|
|
|
- cpumask_t map = CPU_MASK_NONE;
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- /* Can deadlock when called with interrupts disabled */
|
|
|
- WARN_ON(irqs_disabled());
|
|
|
-
|
|
|
- if (!cpu_online(cpu))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- cpu_set(cpu, map);
|
|
|
- if (cpu != get_cpu()) {
|
|
|
- spin_lock(&call_lock);
|
|
|
- ret = __smp_call_function_map(func, info, nonatomic, wait, map);
|
|
|
- spin_unlock(&call_lock);
|
|
|
- } else {
|
|
|
- local_irq_disable();
|
|
|
- func(info);
|
|
|
- local_irq_enable();
|
|
|
- }
|
|
|
- put_cpu();
|
|
|
- return ret;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(smp_call_function_single);
|
|
|
-
|
|
|
void smp_send_stop(void)
|
|
|
{
|
|
|
- int nolock;
|
|
|
-
|
|
|
- /* It's OK to fail sending the IPI, since the alternative is to
|
|
|
- * be stuck forever waiting on the other CPU to take the interrupt.
|
|
|
- *
|
|
|
- * It's better to at least continue and go through reboot, since this
|
|
|
- * function is usually called at panic or reboot time in the first
|
|
|
- * place.
|
|
|
- */
|
|
|
- ipi_fail_ok = 1;
|
|
|
-
|
|
|
- /* Don't deadlock in case we got called through panic */
|
|
|
- nolock = !spin_trylock(&call_lock);
|
|
|
- __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map);
|
|
|
- if (!nolock)
|
|
|
- spin_unlock(&call_lock);
|
|
|
-}
|
|
|
-
|
|
|
-void smp_call_function_interrupt(void)
|
|
|
-{
|
|
|
- void (*func) (void *info);
|
|
|
- void *info;
|
|
|
- int wait;
|
|
|
-
|
|
|
- /* call_data will be NULL if the sender timed out while
|
|
|
- * waiting on us to receive the call.
|
|
|
- */
|
|
|
- if (!call_data)
|
|
|
- return;
|
|
|
-
|
|
|
- func = call_data->func;
|
|
|
- info = call_data->info;
|
|
|
- wait = call_data->wait;
|
|
|
-
|
|
|
- if (!wait)
|
|
|
- smp_mb__before_atomic_inc();
|
|
|
-
|
|
|
- /*
|
|
|
- * Notify initiating CPU that I've grabbed the data and am
|
|
|
- * about to execute the function
|
|
|
- */
|
|
|
- atomic_inc(&call_data->started);
|
|
|
- /*
|
|
|
- * At this point the info structure may be out of scope unless wait==1
|
|
|
- */
|
|
|
- (*func)(info);
|
|
|
- if (wait) {
|
|
|
- smp_mb__before_atomic_inc();
|
|
|
- atomic_inc(&call_data->finished);
|
|
|
- }
|
|
|
+ smp_call_function(stop_this_cpu, NULL, 0, 0);
|
|
|
}
|
|
|
|
|
|
extern struct gettimeofday_struct do_gtod;
|
|
@@ -596,9 +402,9 @@ int __devinit start_secondary(void *unused)
|
|
|
|
|
|
secondary_cpu_time_init();
|
|
|
|
|
|
- spin_lock(&call_lock);
|
|
|
+ ipi_call_lock();
|
|
|
cpu_set(cpu, cpu_online_map);
|
|
|
- spin_unlock(&call_lock);
|
|
|
+ ipi_call_unlock();
|
|
|
|
|
|
local_irq_enable();
|
|
|
|