|
@@ -3,7 +3,7 @@
|
|
|
*
|
|
|
* SMP support for the SuperH processors.
|
|
|
*
|
|
|
- * Copyright (C) 2002 - 2008 Paul Mundt
|
|
|
+ * Copyright (C) 2002 - 2010 Paul Mundt
|
|
|
* Copyright (C) 2006 - 2007 Akio Idehara
|
|
|
*
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
@@ -31,7 +31,20 @@
|
|
|
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
|
|
|
int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
|
|
|
|
|
|
-static inline void __init smp_store_cpu_info(unsigned int cpu)
|
|
|
+struct plat_smp_ops *mp_ops = NULL;
|
|
|
+
|
|
|
+/* State of each CPU */
|
|
|
+DEFINE_PER_CPU(int, cpu_state) = { 0 };
|
|
|
+
|
|
|
+void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
|
|
|
+{
|
|
|
+ if (mp_ops)
|
|
|
+ printk(KERN_WARNING "Overriding previously set SMP ops\n");
|
|
|
+
|
|
|
+ mp_ops = ops;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __cpuinit smp_store_cpu_info(unsigned int cpu)
|
|
|
{
|
|
|
struct sh_cpuinfo *c = cpu_data + cpu;
|
|
|
|
|
@@ -46,14 +59,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
|
|
|
init_new_context(current, &init_mm);
|
|
|
current_thread_info()->cpu = cpu;
|
|
|
- plat_prepare_cpus(max_cpus);
|
|
|
+ mp_ops->prepare_cpus(max_cpus);
|
|
|
|
|
|
#ifndef CONFIG_HOTPLUG_CPU
|
|
|
init_cpu_present(&cpu_possible_map);
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
-void __devinit smp_prepare_boot_cpu(void)
|
|
|
+void __init smp_prepare_boot_cpu(void)
|
|
|
{
|
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
|
@@ -62,37 +75,137 @@ void __devinit smp_prepare_boot_cpu(void)
|
|
|
|
|
|
set_cpu_online(cpu, true);
|
|
|
set_cpu_possible(cpu, true);
|
|
|
+
|
|
|
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+void native_cpu_die(unsigned int cpu)
|
|
|
+{
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ for (i = 0; i < 10; i++) {
|
|
|
+ smp_rmb();
|
|
|
+ if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
|
|
|
+ if (system_state == SYSTEM_RUNNING)
|
|
|
+ pr_info("CPU %u is now offline\n", cpu);
|
|
|
+
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ msleep(100);
|
|
|
+ }
|
|
|
+
|
|
|
+ pr_err("CPU %u didn't die...\n", cpu);
|
|
|
+}
|
|
|
+
|
|
|
+int native_cpu_disable(unsigned int cpu)
|
|
|
+{
|
|
|
+ return cpu == 0 ? -EPERM : 0;
|
|
|
+}
|
|
|
+
|
|
|
+void play_dead_common(void)
|
|
|
+{
|
|
|
+ idle_task_exit();
|
|
|
+ irq_ctx_exit(raw_smp_processor_id());
|
|
|
+ mb();
|
|
|
+
|
|
|
+ __get_cpu_var(cpu_state) = CPU_DEAD;
|
|
|
+ local_irq_disable();
|
|
|
+}
|
|
|
+
|
|
|
+void native_play_dead(void)
|
|
|
+{
|
|
|
+ play_dead_common();
|
|
|
}
|
|
|
|
|
|
+int __cpu_disable(void)
|
|
|
+{
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
+ struct task_struct *p;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = mp_ops->cpu_disable(cpu);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Take this CPU offline. Once we clear this, we can't return,
|
|
|
+ * and we must not schedule until we're ready to give up the cpu.
|
|
|
+ */
|
|
|
+ set_cpu_online(cpu, false);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * OK - migrate IRQs away from this CPU
|
|
|
+ */
|
|
|
+ migrate_irqs();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Stop the local timer for this CPU.
|
|
|
+ */
|
|
|
+ local_timer_stop(cpu);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Flush user cache and TLB mappings, and then remove this CPU
|
|
|
+ * from the vm mask set of all processes.
|
|
|
+ */
|
|
|
+ flush_cache_all();
|
|
|
+ local_flush_tlb_all();
|
|
|
+
|
|
|
+ read_lock(&tasklist_lock);
|
|
|
+ for_each_process(p)
|
|
|
+ if (p->mm)
|
|
|
+ cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
|
|
|
+ read_unlock(&tasklist_lock);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+#else /* ... !CONFIG_HOTPLUG_CPU */
|
|
|
+int native_cpu_disable(void)
|
|
|
+{
|
|
|
+ return -ENOSYS;
|
|
|
+}
|
|
|
+
|
|
|
+void native_cpu_die(unsigned int cpu)
|
|
|
+{
|
|
|
+ /* We said "no" in __cpu_disable */
|
|
|
+ BUG();
|
|
|
+}
|
|
|
+
|
|
|
+void native_play_dead(void)
|
|
|
+{
|
|
|
+ BUG();
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
asmlinkage void __cpuinit start_secondary(void)
|
|
|
{
|
|
|
- unsigned int cpu;
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
struct mm_struct *mm = &init_mm;
|
|
|
|
|
|
enable_mmu();
|
|
|
atomic_inc(&mm->mm_count);
|
|
|
atomic_inc(&mm->mm_users);
|
|
|
current->active_mm = mm;
|
|
|
- BUG_ON(current->mm);
|
|
|
enter_lazy_tlb(mm, current);
|
|
|
+ local_flush_tlb_all();
|
|
|
|
|
|
per_cpu_trap_init();
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
|
- notify_cpu_starting(smp_processor_id());
|
|
|
+ notify_cpu_starting(cpu);
|
|
|
|
|
|
local_irq_enable();
|
|
|
|
|
|
- cpu = smp_processor_id();
|
|
|
-
|
|
|
/* Enable local timers */
|
|
|
local_timer_setup(cpu);
|
|
|
calibrate_delay();
|
|
|
|
|
|
smp_store_cpu_info(cpu);
|
|
|
|
|
|
- cpu_set(cpu, cpu_online_map);
|
|
|
+ set_cpu_online(cpu, true);
|
|
|
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
|
|
|
|
|
|
cpu_idle();
|
|
|
}
|
|
@@ -111,12 +224,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|
|
struct task_struct *tsk;
|
|
|
unsigned long timeout;
|
|
|
|
|
|
- tsk = fork_idle(cpu);
|
|
|
- if (IS_ERR(tsk)) {
|
|
|
- printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu);
|
|
|
- return PTR_ERR(tsk);
|
|
|
+ tsk = cpu_data[cpu].idle;
|
|
|
+ if (!tsk) {
|
|
|
+ tsk = fork_idle(cpu);
|
|
|
+ if (IS_ERR(tsk)) {
|
|
|
+ pr_err("Failed forking idle task for cpu %d\n", cpu);
|
|
|
+ return PTR_ERR(tsk);
|
|
|
+ }
|
|
|
+
|
|
|
+ cpu_data[cpu].idle = tsk;
|
|
|
}
|
|
|
|
|
|
+ per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
|
|
|
+
|
|
|
/* Fill in data in head.S for secondary cpus */
|
|
|
stack_start.sp = tsk->thread.sp;
|
|
|
stack_start.thread_info = tsk->stack;
|
|
@@ -127,7 +247,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|
|
(unsigned long)&stack_start + sizeof(stack_start));
|
|
|
wmb();
|
|
|
|
|
|
- plat_start_cpu(cpu, (unsigned long)_stext);
|
|
|
+ mp_ops->start_cpu(cpu, (unsigned long)_stext);
|
|
|
|
|
|
timeout = jiffies + HZ;
|
|
|
while (time_before(jiffies, timeout)) {
|
|
@@ -135,6 +255,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|
|
break;
|
|
|
|
|
|
udelay(10);
|
|
|
+ barrier();
|
|
|
}
|
|
|
|
|
|
if (cpu_online(cpu))
|
|
@@ -159,7 +280,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|
|
|
|
|
void smp_send_reschedule(int cpu)
|
|
|
{
|
|
|
- plat_send_ipi(cpu, SMP_MSG_RESCHEDULE);
|
|
|
+ mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
|
|
|
}
|
|
|
|
|
|
void smp_send_stop(void)
|
|
@@ -172,12 +293,12 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|
|
int cpu;
|
|
|
|
|
|
for_each_cpu(cpu, mask)
|
|
|
- plat_send_ipi(cpu, SMP_MSG_FUNCTION);
|
|
|
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
|
|
|
}
|
|
|
|
|
|
void arch_send_call_function_single_ipi(int cpu)
|
|
|
{
|
|
|
- plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
|
|
|
+ mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
|
|
|
}
|
|
|
|
|
|
void smp_timer_broadcast(const struct cpumask *mask)
|
|
@@ -185,7 +306,7 @@ void smp_timer_broadcast(const struct cpumask *mask)
|
|
|
int cpu;
|
|
|
|
|
|
for_each_cpu(cpu, mask)
|
|
|
- plat_send_ipi(cpu, SMP_MSG_TIMER);
|
|
|
+ mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
|
|
|
}
|
|
|
|
|
|
static void ipi_timer(void)
|
|
@@ -249,7 +370,6 @@ static void flush_tlb_mm_ipi(void *mm)
|
|
|
* behalf of debugees, kswapd stealing pages from another process etc).
|
|
|
* Kanoj 07/00.
|
|
|
*/
|
|
|
-
|
|
|
void flush_tlb_mm(struct mm_struct *mm)
|
|
|
{
|
|
|
preempt_disable();
|