|
@@ -39,6 +39,7 @@
|
|
|
#include <asm/atomic.h>
|
|
|
#include <asm/cacheflush.h>
|
|
|
#include <asm/cputype.h>
|
|
|
+#include <asm/cpu_ops.h>
|
|
|
#include <asm/mmu_context.h>
|
|
|
#include <asm/pgtable.h>
|
|
|
#include <asm/pgalloc.h>
|
|
@@ -54,7 +55,6 @@
|
|
|
* where to place its SVC stack
|
|
|
*/
|
|
|
struct secondary_data secondary_data;
|
|
|
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
|
|
|
|
|
|
enum ipi_msg_type {
|
|
|
IPI_RESCHEDULE,
|
|
@@ -63,61 +63,16 @@ enum ipi_msg_type {
|
|
|
IPI_CPU_STOP,
|
|
|
};
|
|
|
|
|
|
-static DEFINE_RAW_SPINLOCK(boot_lock);
|
|
|
-
|
|
|
-/*
|
|
|
- * Write secondary_holding_pen_release in a way that is guaranteed to be
|
|
|
- * visible to all observers, irrespective of whether they're taking part
|
|
|
- * in coherency or not. This is necessary for the hotplug code to work
|
|
|
- * reliably.
|
|
|
- */
|
|
|
-static void write_pen_release(u64 val)
|
|
|
-{
|
|
|
- void *start = (void *)&secondary_holding_pen_release;
|
|
|
- unsigned long size = sizeof(secondary_holding_pen_release);
|
|
|
-
|
|
|
- secondary_holding_pen_release = val;
|
|
|
- __flush_dcache_area(start, size);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Boot a secondary CPU, and assign it the specified idle task.
|
|
|
* This also gives us the initial stack to use for this CPU.
|
|
|
*/
|
|
|
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|
|
{
|
|
|
- unsigned long timeout;
|
|
|
-
|
|
|
- /*
|
|
|
- * Set synchronisation state between this boot processor
|
|
|
- * and the secondary one
|
|
|
- */
|
|
|
- raw_spin_lock(&boot_lock);
|
|
|
-
|
|
|
- /*
|
|
|
- * Update the pen release flag.
|
|
|
- */
|
|
|
- write_pen_release(cpu_logical_map(cpu));
|
|
|
-
|
|
|
- /*
|
|
|
- * Send an event, causing the secondaries to read pen_release.
|
|
|
- */
|
|
|
- sev();
|
|
|
-
|
|
|
- timeout = jiffies + (1 * HZ);
|
|
|
- while (time_before(jiffies, timeout)) {
|
|
|
- if (secondary_holding_pen_release == INVALID_HWID)
|
|
|
- break;
|
|
|
- udelay(10);
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * Now the secondary core is starting up let it run its
|
|
|
- * calibrations, then wait for it to finish
|
|
|
- */
|
|
|
- raw_spin_unlock(&boot_lock);
|
|
|
+ if (cpu_ops[cpu]->cpu_boot)
|
|
|
+ return cpu_ops[cpu]->cpu_boot(cpu);
|
|
|
|
|
|
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
|
|
|
+ return -EOPNOTSUPP;
|
|
|
}
|
|
|
|
|
|
static DECLARE_COMPLETION(cpu_running);
|
|
@@ -187,17 +142,13 @@ asmlinkage void secondary_start_kernel(void)
|
|
|
preempt_disable();
|
|
|
trace_hardirqs_off();
|
|
|
|
|
|
- /*
|
|
|
- * Let the primary processor know we're out of the
|
|
|
- * pen, then head off into the C entry point
|
|
|
- */
|
|
|
- write_pen_release(INVALID_HWID);
|
|
|
+ if (cpu_ops[cpu]->cpu_postboot)
|
|
|
+ cpu_ops[cpu]->cpu_postboot();
|
|
|
|
|
|
/*
|
|
|
- * Synchronise with the boot thread.
|
|
|
+ * Enable GIC and timers.
|
|
|
*/
|
|
|
- raw_spin_lock(&boot_lock);
|
|
|
- raw_spin_unlock(&boot_lock);
|
|
|
+ notify_cpu_starting(cpu);
|
|
|
|
|
|
/*
|
|
|
* OK, now it's safe to let the boot CPU continue. Wait for
|
|
@@ -207,11 +158,6 @@ asmlinkage void secondary_start_kernel(void)
|
|
|
set_cpu_online(cpu, true);
|
|
|
complete(&cpu_running);
|
|
|
|
|
|
- /*
|
|
|
- * Enable GIC and timers.
|
|
|
- */
|
|
|
- notify_cpu_starting(cpu);
|
|
|
-
|
|
|
local_irq_enable();
|
|
|
local_fiq_enable();
|
|
|
|
|
@@ -221,39 +167,113 @@ asmlinkage void secondary_start_kernel(void)
|
|
|
cpu_startup_entry(CPUHP_ONLINE);
|
|
|
}
|
|
|
|
|
|
-void __init smp_cpus_done(unsigned int max_cpus)
|
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+static int op_cpu_disable(unsigned int cpu)
|
|
|
{
|
|
|
- pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
|
|
|
+ /*
|
|
|
+ * If we don't have a cpu_die method, abort before we reach the point
|
|
|
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
|
|
|
+ */
|
|
|
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We may need to abort a hot unplug for some other mechanism-specific
|
|
|
+ * reason.
|
|
|
+ */
|
|
|
+ if (cpu_ops[cpu]->cpu_disable)
|
|
|
+ return cpu_ops[cpu]->cpu_disable(cpu);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-void __init smp_prepare_boot_cpu(void)
|
|
|
+/*
|
|
|
+ * __cpu_disable runs on the processor to be shutdown.
|
|
|
+ */
|
|
|
+int __cpu_disable(void)
|
|
|
{
|
|
|
-}
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
+ int ret;
|
|
|
|
|
|
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
|
|
|
+ ret = op_cpu_disable(cpu);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
|
|
|
-static const struct smp_enable_ops *enable_ops[] __initconst = {
|
|
|
- &smp_spin_table_ops,
|
|
|
- &smp_psci_ops,
|
|
|
- NULL,
|
|
|
-};
|
|
|
+ /*
|
|
|
+ * Take this CPU offline. Once we clear this, we can't return,
|
|
|
+ * and we must not schedule until we're ready to give up the cpu.
|
|
|
+ */
|
|
|
+ set_cpu_online(cpu, false);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * OK - migrate IRQs away from this CPU
|
|
|
+ */
|
|
|
+ migrate_irqs();
|
|
|
|
|
|
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
|
|
|
+ /*
|
|
|
+ * Remove this CPU from the vm mask set of all processes.
|
|
|
+ */
|
|
|
+ clear_tasks_mm_cpumask(cpu);
|
|
|
|
|
|
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
|
|
|
-{
|
|
|
- const struct smp_enable_ops **ops = enable_ops;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
- while (*ops) {
|
|
|
- if (!strcmp(name, (*ops)->name))
|
|
|
- return *ops;
|
|
|
+static DECLARE_COMPLETION(cpu_died);
|
|
|
|
|
|
- ops++;
|
|
|
+/*
|
|
|
+ * called on the thread which is asking for a CPU to be shutdown -
|
|
|
+ * waits until shutdown has completed, or it is timed out.
|
|
|
+ */
|
|
|
+void __cpu_die(unsigned int cpu)
|
|
|
+{
|
|
|
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
|
|
|
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
|
|
|
+ return;
|
|
|
}
|
|
|
+ pr_notice("CPU%u: shutdown\n", cpu);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Called from the idle thread for the CPU which has been shutdown.
|
|
|
+ *
|
|
|
+ * Note that we disable IRQs here, but do not re-enable them
|
|
|
+ * before returning to the caller. This is also the behaviour
|
|
|
+ * of the other hotplug-cpu capable cores, so presumably coming
|
|
|
+ * out of idle fixes this.
|
|
|
+ */
|
|
|
+void cpu_die(void)
|
|
|
+{
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
+
|
|
|
+ idle_task_exit();
|
|
|
|
|
|
- return NULL;
|
|
|
+ local_irq_disable();
|
|
|
+
|
|
|
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
|
|
|
+ complete(&cpu_died);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
|
|
|
+ * mechanism must perform all required cache maintenance to ensure that
|
|
|
+ * no dirty lines are lost in the process of shutting down the CPU.
|
|
|
+ */
|
|
|
+ cpu_ops[cpu]->cpu_die(cpu);
|
|
|
+
|
|
|
+ BUG();
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+void __init smp_cpus_done(unsigned int max_cpus)
|
|
|
+{
|
|
|
+ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
|
|
|
}
|
|
|
|
|
|
+void __init smp_prepare_boot_cpu(void)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
|
|
|
+
|
|
|
/*
|
|
|
* Enumerate the possible CPU set from the device tree and build the
|
|
|
* cpu logical map array containing MPIDR values related to logical
|
|
@@ -261,9 +281,8 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
|
|
|
*/
|
|
|
void __init smp_init_cpus(void)
|
|
|
{
|
|
|
- const char *enable_method;
|
|
|
struct device_node *dn = NULL;
|
|
|
- int i, cpu = 1;
|
|
|
+ unsigned int i, cpu = 1;
|
|
|
bool bootcpu_valid = false;
|
|
|
|
|
|
while ((dn = of_find_node_by_type(dn, "cpu"))) {
|
|
@@ -332,25 +351,10 @@ void __init smp_init_cpus(void)
|
|
|
if (cpu >= NR_CPUS)
|
|
|
goto next;
|
|
|
|
|
|
- /*
|
|
|
- * We currently support only the "spin-table" enable-method.
|
|
|
- */
|
|
|
- enable_method = of_get_property(dn, "enable-method", NULL);
|
|
|
- if (!enable_method) {
|
|
|
- pr_err("%s: missing enable-method property\n",
|
|
|
- dn->full_name);
|
|
|
+ if (cpu_read_ops(dn, cpu) != 0)
|
|
|
goto next;
|
|
|
- }
|
|
|
-
|
|
|
- smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
|
|
|
-
|
|
|
- if (!smp_enable_ops[cpu]) {
|
|
|
- pr_err("%s: invalid enable-method property: %s\n",
|
|
|
- dn->full_name, enable_method);
|
|
|
- goto next;
|
|
|
- }
|
|
|
|
|
|
- if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
|
|
|
+ if (cpu_ops[cpu]->cpu_init(dn, cpu))
|
|
|
goto next;
|
|
|
|
|
|
pr_debug("cpu logical map 0x%llx\n", hwid);
|
|
@@ -380,8 +384,8 @@ next:
|
|
|
|
|
|
void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
{
|
|
|
- int cpu, err;
|
|
|
- unsigned int ncores = num_possible_cpus();
|
|
|
+ int err;
|
|
|
+ unsigned int cpu, ncores = num_possible_cpus();
|
|
|
|
|
|
/*
|
|
|
* are we trying to boot more cores than exist?
|
|
@@ -408,10 +412,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|
|
if (cpu == smp_processor_id())
|
|
|
continue;
|
|
|
|
|
|
- if (!smp_enable_ops[cpu])
|
|
|
+ if (!cpu_ops[cpu])
|
|
|
continue;
|
|
|
|
|
|
- err = smp_enable_ops[cpu]->prepare_cpu(cpu);
|
|
|
+ err = cpu_ops[cpu]->cpu_prepare(cpu);
|
|
|
if (err)
|
|
|
continue;
|
|
|
|
|
@@ -451,7 +455,7 @@ void show_ipi_list(struct seq_file *p, int prec)
|
|
|
for (i = 0; i < NR_IPI; i++) {
|
|
|
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
|
|
|
prec >= 4 ? " " : "");
|
|
|
- for_each_present_cpu(cpu)
|
|
|
+ for_each_online_cpu(cpu)
|
|
|
seq_printf(p, "%10u ",
|
|
|
__get_irq_stat(cpu, ipi_irqs[i]));
|
|
|
seq_printf(p, " %s\n", ipi_types[i]);
|