|
@@ -24,9 +24,11 @@
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/irq.h>
|
|
|
#include <linux/types.h>
|
|
|
+#include <linux/irq.h>
|
|
|
|
|
|
#include <asm/processor.h>
|
|
|
#include <asm/machdep.h>
|
|
|
+#include <asm/kexec.h>
|
|
|
#include <asm/kdump.h>
|
|
|
#include <asm/lmb.h>
|
|
|
#include <asm/firmware.h>
|
|
@@ -41,6 +43,7 @@
|
|
|
|
|
|
/* This keeps a track of which one is crashing cpu. */
|
|
|
int crashing_cpu = -1;
|
|
|
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
|
|
|
|
|
|
static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
|
|
|
size_t data_len)
|
|
@@ -98,34 +101,66 @@ static void crash_save_this_cpu(struct pt_regs *regs, int cpu)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
-static atomic_t waiting_for_crash_ipi;
|
|
|
+static atomic_t enter_on_soft_reset = ATOMIC_INIT(0);
|
|
|
|
|
|
void crash_ipi_callback(struct pt_regs *regs)
|
|
|
{
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
- if (cpu == crashing_cpu)
|
|
|
- return;
|
|
|
-
|
|
|
if (!cpu_online(cpu))
|
|
|
return;
|
|
|
|
|
|
- if (ppc_md.kexec_cpu_down)
|
|
|
- ppc_md.kexec_cpu_down(1, 1);
|
|
|
-
|
|
|
local_irq_disable();
|
|
|
+ if (!cpu_isset(cpu, cpus_in_crash))
|
|
|
+ crash_save_this_cpu(regs, cpu);
|
|
|
+ cpu_set(cpu, cpus_in_crash);
|
|
|
|
|
|
- crash_save_this_cpu(regs, cpu);
|
|
|
- atomic_dec(&waiting_for_crash_ipi);
|
|
|
+ /*
|
|
|
+ * Entered via soft-reset - could be the kdump
|
|
|
+ * process is invoked using soft-reset or user activated
|
|
|
+ * it if some CPU did not respond to an IPI.
|
|
|
+ * For soft-reset, the secondary CPU can enter this func
|
|
|
+ * twice. 1 - using IPI, and 2. soft-reset.
|
|
|
+ * Tell the kexec CPU that entered via soft-reset and ready
|
|
|
+ * to go down.
|
|
|
+ */
|
|
|
+ if (cpu_isset(cpu, cpus_in_sr)) {
|
|
|
+ cpu_clear(cpu, cpus_in_sr);
|
|
|
+ atomic_inc(&enter_on_soft_reset);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Starting the kdump boot.
|
|
|
+ * This barrier is needed to make sure that all CPUs are stopped.
|
|
|
+ * If not, soft-reset will be invoked to bring other CPUs.
|
|
|
+ */
|
|
|
+ while (!cpu_isset(crashing_cpu, cpus_in_crash))
|
|
|
+ cpu_relax();
|
|
|
+
|
|
|
+ if (ppc_md.kexec_cpu_down)
|
|
|
+ ppc_md.kexec_cpu_down(1, 1);
|
|
|
kexec_smp_wait();
|
|
|
/* NOTREACHED */
|
|
|
}
|
|
|
|
|
|
-static void crash_kexec_prepare_cpus(void)
|
|
|
+/*
|
|
|
+ * Wait until all CPUs are entered via soft-reset.
|
|
|
+ */
|
|
|
+static void crash_soft_reset_check(int cpu)
|
|
|
+{
|
|
|
+ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
|
|
|
+
|
|
|
+ cpu_clear(cpu, cpus_in_sr);
|
|
|
+ while (atomic_read(&enter_on_soft_reset) != ncpus)
|
|
|
+ cpu_relax();
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+static void crash_kexec_prepare_cpus(int cpu)
|
|
|
{
|
|
|
unsigned int msecs;
|
|
|
|
|
|
- atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
|
|
+ unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
|
|
|
|
|
|
crash_send_ipi(crash_ipi_callback);
|
|
|
smp_wmb();
|
|
@@ -133,14 +168,13 @@ static void crash_kexec_prepare_cpus(void)
|
|
|
/*
|
|
|
* FIXME: Until we will have the way to stop other CPUSs reliabally,
|
|
|
* the crash CPU will send an IPI and wait for other CPUs to
|
|
|
- * respond. If not, proceed the kexec boot even though we failed to
|
|
|
- * capture other CPU states.
|
|
|
+ * respond.
|
|
|
* Delay of at least 10 seconds.
|
|
|
*/
|
|
|
- printk(KERN_ALERT "Sending IPI to other cpus...\n");
|
|
|
+ printk(KERN_EMERG "Sending IPI to other cpus...\n");
|
|
|
msecs = 10000;
|
|
|
- while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) {
|
|
|
- barrier();
|
|
|
+ while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) {
|
|
|
+ cpu_relax();
|
|
|
mdelay(1);
|
|
|
}
|
|
|
|
|
@@ -149,18 +183,71 @@ static void crash_kexec_prepare_cpus(void)
|
|
|
/*
|
|
|
* FIXME: In case if we do not get all CPUs, one possibility: ask the
|
|
|
* user to do soft reset such that we get all.
|
|
|
- * IPI handler is already set by the panic cpu initially. Therefore,
|
|
|
- * all cpus could invoke this handler from die() and the panic CPU
|
|
|
- * will call machine_kexec() directly from this handler to do
|
|
|
- * kexec boot.
|
|
|
+ * Soft-reset will be used until better mechanism is implemented.
|
|
|
+ */
|
|
|
+ if (cpus_weight(cpus_in_crash) < ncpus) {
|
|
|
+ printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n",
|
|
|
+ ncpus - cpus_weight(cpus_in_crash));
|
|
|
+ printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n");
|
|
|
+ cpus_in_sr = CPU_MASK_NONE;
|
|
|
+ atomic_set(&enter_on_soft_reset, 0);
|
|
|
+ while (cpus_weight(cpus_in_crash) < ncpus)
|
|
|
+ cpu_relax();
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Make sure all CPUs are entered via soft-reset if the kdump is
|
|
|
+ * invoked using soft-reset.
|
|
|
*/
|
|
|
- if (atomic_read(&waiting_for_crash_ipi))
|
|
|
- printk(KERN_ALERT "done waiting: %d cpus not responding\n",
|
|
|
- atomic_read(&waiting_for_crash_ipi));
|
|
|
+ if (cpu_isset(cpu, cpus_in_sr))
|
|
|
+ crash_soft_reset_check(cpu);
|
|
|
/* Leave the IPI callback set */
|
|
|
}
|
|
|
+
|
|
|
+/*
|
|
|
+ * This function will be called by secondary cpus or by kexec cpu
|
|
|
+ * if soft-reset is activated to stop some CPUs.
|
|
|
+ */
|
|
|
+void crash_kexec_secondary(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ int cpu = smp_processor_id();
|
|
|
+ unsigned long flags;
|
|
|
+ int msecs = 5;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ /* Wait 5ms if the kexec CPU is not entered yet. */
|
|
|
+ while (crashing_cpu < 0) {
|
|
|
+ if (--msecs < 0) {
|
|
|
+ /*
|
|
|
+ * Either kdump image is not loaded or
|
|
|
+ * kdump process is not started - Probably xmon
|
|
|
+ * exited using 'x'(exit and recover) or
|
|
|
+ * kexec_should_crash() failed for all running tasks.
|
|
|
+ */
|
|
|
+ cpu_clear(cpu, cpus_in_sr);
|
|
|
+ local_irq_restore(flags);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ mdelay(1);
|
|
|
+ cpu_relax();
|
|
|
+ }
|
|
|
+ if (cpu == crashing_cpu) {
|
|
|
+ /*
|
|
|
+ * Panic CPU will enter this func only via soft-reset.
|
|
|
+ * Wait until all secondary CPUs entered and
|
|
|
+ * then start kexec boot.
|
|
|
+ */
|
|
|
+ crash_soft_reset_check(cpu);
|
|
|
+ cpu_set(crashing_cpu, cpus_in_crash);
|
|
|
+ if (ppc_md.kexec_cpu_down)
|
|
|
+ ppc_md.kexec_cpu_down(1, 0);
|
|
|
+ machine_kexec(kexec_crash_image);
|
|
|
+ /* NOTREACHED */
|
|
|
+ }
|
|
|
+ crash_ipi_callback(regs);
|
|
|
+}
|
|
|
+
|
|
|
#else
|
|
|
-static void crash_kexec_prepare_cpus(void)
|
|
|
+static void crash_kexec_prepare_cpus(int cpu)
|
|
|
{
|
|
|
/*
|
|
|
* move the secondarys to us so that we can copy
|
|
@@ -171,6 +258,10 @@ static void crash_kexec_prepare_cpus(void)
|
|
|
smp_release_cpus();
|
|
|
}
|
|
|
|
|
|
+void crash_kexec_secondary(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ cpus_in_sr = CPU_MASK_NONE;
|
|
|
+}
|
|
|
#endif
|
|
|
|
|
|
void default_machine_crash_shutdown(struct pt_regs *regs)
|
|
@@ -199,14 +290,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
|
|
|
desc->handler->disable(irq);
|
|
|
}
|
|
|
|
|
|
- if (ppc_md.kexec_cpu_down)
|
|
|
- ppc_md.kexec_cpu_down(1, 0);
|
|
|
-
|
|
|
/*
|
|
|
* Make a note of crashing cpu. Will be used in machine_kexec
|
|
|
* such that another IPI will not be sent.
|
|
|
*/
|
|
|
crashing_cpu = smp_processor_id();
|
|
|
- crash_kexec_prepare_cpus();
|
|
|
crash_save_this_cpu(regs, crashing_cpu);
|
|
|
+ crash_kexec_prepare_cpus(crashing_cpu);
|
|
|
+ cpu_set(crashing_cpu, cpus_in_crash);
|
|
|
+ if (ppc_md.kexec_cpu_down)
|
|
|
+ ppc_md.kexec_cpu_down(1, 0);
|
|
|
}
|