|
@@ -19,7 +19,6 @@
|
|
|
#include <linux/mm.h>
|
|
|
#include <linux/err.h>
|
|
|
#include <linux/cpu.h>
|
|
|
-#include <linux/smp.h>
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/irq.h>
|
|
|
#include <linux/percpu.h>
|
|
@@ -27,6 +26,7 @@
|
|
|
#include <linux/completion.h>
|
|
|
|
|
|
#include <linux/atomic.h>
|
|
|
+#include <asm/smp.h>
|
|
|
#include <asm/cacheflush.h>
|
|
|
#include <asm/cpu.h>
|
|
|
#include <asm/cputype.h>
|
|
@@ -42,6 +42,7 @@
|
|
|
#include <asm/ptrace.h>
|
|
|
#include <asm/localtimer.h>
|
|
|
#include <asm/smp_plat.h>
|
|
|
+#include <asm/mach/arch.h>
|
|
|
|
|
|
/*
|
|
|
* as from 2.5, kernels no longer have an init_tasks structure
|
|
@@ -60,6 +61,14 @@ enum ipi_msg_type {
|
|
|
|
|
|
static DECLARE_COMPLETION(cpu_running);
|
|
|
|
|
|
+static struct smp_operations smp_ops;
|
|
|
+
|
|
|
+void __init smp_set_ops(struct smp_operations *ops)
|
|
|
+{
|
|
|
+ if (ops)
|
|
|
+ smp_ops = *ops;
|
|
|
+};
|
|
|
+
|
|
|
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|
|
{
|
|
|
int ret;
|
|
@@ -100,9 +109,60 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/* platform specific SMP operations */
|
|
|
+void __attribute__((weak)) __init smp_init_cpus(void)
|
|
|
+{
|
|
|
+ if (smp_ops.smp_init_cpus)
|
|
|
+ smp_ops.smp_init_cpus();
|
|
|
+}
|
|
|
+
|
|
|
+void __attribute__((weak)) __init platform_smp_prepare_cpus(unsigned int max_cpus)
|
|
|
+{
|
|
|
+ if (smp_ops.smp_prepare_cpus)
|
|
|
+ smp_ops.smp_prepare_cpus(max_cpus);
|
|
|
+}
|
|
|
+
|
|
|
+void __attribute__((weak)) __cpuinit platform_secondary_init(unsigned int cpu)
|
|
|
+{
|
|
|
+ if (smp_ops.smp_secondary_init)
|
|
|
+ smp_ops.smp_secondary_init(cpu);
|
|
|
+}
|
|
|
+
|
|
|
+int __attribute__((weak)) __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
|
+{
|
|
|
+ if (smp_ops.smp_boot_secondary)
|
|
|
+ return smp_ops.smp_boot_secondary(cpu, idle);
|
|
|
+ return -ENOSYS;
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
static void percpu_timer_stop(void);
|
|
|
|
|
|
+int __attribute__((weak)) platform_cpu_kill(unsigned int cpu)
|
|
|
+{
|
|
|
+ if (smp_ops.cpu_kill)
|
|
|
+ return smp_ops.cpu_kill(cpu);
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+void __attribute__((weak)) platform_cpu_die(unsigned int cpu)
|
|
|
+{
|
|
|
+ if (smp_ops.cpu_die)
|
|
|
+ smp_ops.cpu_die(cpu);
|
|
|
+}
|
|
|
+
|
|
|
+int __attribute__((weak)) platform_cpu_disable(unsigned int cpu)
|
|
|
+{
|
|
|
+ if (smp_ops.cpu_disable)
|
|
|
+ return smp_ops.cpu_disable(cpu);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * By default, allow disabling all CPUs except the first one,
|
|
|
+ * since this is special on a lot of platforms, e.g. because
|
|
|
+ * of clock tick interrupts.
|
|
|
+ */
|
|
|
+ return cpu == 0 ? -EPERM : 0;
|
|
|
+}
|
|
|
/*
|
|
|
* __cpu_disable runs on the processor to be shutdown.
|
|
|
*/
|