|
@@ -575,8 +575,12 @@ return_normal:
|
|
|
raw_spin_lock(&dbg_slave_lock);
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
+ /* If send_ready set, slaves are already waiting */
|
|
|
+ if (ks->send_ready)
|
|
|
+ atomic_set(ks->send_ready, 1);
|
|
|
+
|
|
|
/* Signal the other CPUs to enter kgdb_wait() */
|
|
|
- if ((!kgdb_single_step) && kgdb_do_roundup)
|
|
|
+ else if ((!kgdb_single_step) && kgdb_do_roundup)
|
|
|
kgdb_roundup_cpus(flags);
|
|
|
#endif
|
|
|
|
|
@@ -678,11 +682,11 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
|
|
|
if (arch_kgdb_ops.enable_nmi)
|
|
|
arch_kgdb_ops.enable_nmi(0);
|
|
|
|
|
|
+ memset(ks, 0, sizeof(struct kgdb_state));
|
|
|
ks->cpu = raw_smp_processor_id();
|
|
|
ks->ex_vector = evector;
|
|
|
ks->signo = signo;
|
|
|
ks->err_code = ecode;
|
|
|
- ks->kgdb_usethreadid = 0;
|
|
|
ks->linux_regs = regs;
|
|
|
|
|
|
if (kgdb_reenter_check(ks))
|
|
@@ -732,6 +736,30 @@ int kgdb_nmicallback(int cpu, void *regs)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+int kgdb_nmicallin(int cpu, int trapnr, void *regs, atomic_t *send_ready)
|
|
|
+{
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ if (!kgdb_io_ready(0) || !send_ready)
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ if (kgdb_info[cpu].enter_kgdb == 0) {
|
|
|
+ struct kgdb_state kgdb_var;
|
|
|
+ struct kgdb_state *ks = &kgdb_var;
|
|
|
+
|
|
|
+ memset(ks, 0, sizeof(struct kgdb_state));
|
|
|
+ ks->cpu = cpu;
|
|
|
+ ks->ex_vector = trapnr;
|
|
|
+ ks->signo = SIGTRAP;
|
|
|
+ ks->err_code = KGDB_KDB_REASON_SYSTEM_NMI;
|
|
|
+ ks->linux_regs = regs;
|
|
|
+ ks->send_ready = send_ready;
|
|
|
+ kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+#endif
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
static void kgdb_console_write(struct console *co, const char *s,
|
|
|
unsigned count)
|
|
|
{
|