|
@@ -167,6 +167,102 @@ asmlinkage void secondary_start_kernel(void)
|
|
|
cpu_startup_entry(CPUHP_ONLINE);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_HOTPLUG_CPU
|
|
|
+static int op_cpu_disable(unsigned int cpu)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * If we don't have a cpu_die method, abort before we reach the point
|
|
|
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
|
|
|
+ */
|
|
|
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * We may need to abort a hot unplug for some other mechanism-specific
|
|
|
+ * reason.
|
|
|
+ */
|
|
|
+ if (cpu_ops[cpu]->cpu_disable)
|
|
|
+ return cpu_ops[cpu]->cpu_disable(cpu);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * __cpu_disable runs on the processor to be shutdown.
|
|
|
+ */
|
|
|
+int __cpu_disable(void)
|
|
|
+{
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = op_cpu_disable(cpu);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Take this CPU offline. Once we clear this, we can't return,
|
|
|
+ * and we must not schedule until we're ready to give up the cpu.
|
|
|
+ */
|
|
|
+ set_cpu_online(cpu, false);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * OK - migrate IRQs away from this CPU
|
|
|
+ */
|
|
|
+ migrate_irqs();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Remove this CPU from the vm mask set of all processes.
|
|
|
+ */
|
|
|
+ clear_tasks_mm_cpumask(cpu);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static DECLARE_COMPLETION(cpu_died);
|
|
|
+
|
|
|
+/*
|
|
|
+ * called on the thread which is asking for a CPU to be shutdown -
|
|
|
+ * waits until shutdown has completed, or it is timed out.
|
|
|
+ */
|
|
|
+void __cpu_die(unsigned int cpu)
|
|
|
+{
|
|
|
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
|
|
|
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ pr_notice("CPU%u: shutdown\n", cpu);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Called from the idle thread for the CPU which has been shutdown.
|
|
|
+ *
|
|
|
+ * Note that we disable IRQs here, but do not re-enable them
|
|
|
+ * before returning to the caller. This is also the behaviour
|
|
|
+ * of the other hotplug-cpu capable cores, so presumably coming
|
|
|
+ * out of idle fixes this.
|
|
|
+ */
|
|
|
+void cpu_die(void)
|
|
|
+{
|
|
|
+ unsigned int cpu = smp_processor_id();
|
|
|
+
|
|
|
+ idle_task_exit();
|
|
|
+
|
|
|
+ local_irq_disable();
|
|
|
+
|
|
|
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
|
|
|
+ complete(&cpu_died);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
|
|
|
+ * mechanism must perform all required cache maintenance to ensure that
|
|
|
+ * no dirty lines are lost in the process of shutting down the CPU.
|
|
|
+ */
|
|
|
+ cpu_ops[cpu]->cpu_die(cpu);
|
|
|
+
|
|
|
+ BUG();
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
void __init smp_cpus_done(unsigned int max_cpus)
|
|
|
{
|
|
|
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
|