|
@@ -129,6 +129,7 @@ struct task_struct *kgdb_usethread;
|
|
|
struct task_struct *kgdb_contthread;
|
|
|
|
|
|
int kgdb_single_step;
|
|
|
+pid_t kgdb_sstep_pid;
|
|
|
|
|
|
/* Our I/O buffers. */
|
|
|
static char remcom_in_buffer[BUFMAX];
|
|
@@ -1400,6 +1401,7 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
|
|
|
struct kgdb_state kgdb_var;
|
|
|
struct kgdb_state *ks = &kgdb_var;
|
|
|
unsigned long flags;
|
|
|
+ int sstep_tries = 100;
|
|
|
int error = 0;
|
|
|
int i, cpu;
|
|
|
|
|
@@ -1430,13 +1432,14 @@ acquirelock:
|
|
|
cpu_relax();
|
|
|
|
|
|
/*
|
|
|
- * Do not start the debugger connection on this CPU if the last
|
|
|
- * instance of the exception handler wanted to come into the
|
|
|
- * debugger on a different CPU via a single step
|
|
|
+ * For single stepping, try to only enter on the processor
|
|
|
+ * that was single stepping. To gaurd against a deadlock, the
|
|
|
+ * kernel will only try for the value of sstep_tries before
|
|
|
+ * giving up and continuing on.
|
|
|
*/
|
|
|
if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
|
|
|
- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
|
|
|
-
|
|
|
+ (kgdb_info[cpu].task &&
|
|
|
+ kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
|
|
|
atomic_set(&kgdb_active, -1);
|
|
|
touch_softlockup_watchdog();
|
|
|
clocksource_touch_watchdog();
|
|
@@ -1529,6 +1532,13 @@ acquirelock:
|
|
|
}
|
|
|
|
|
|
kgdb_restore:
|
|
|
+ if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
|
|
|
+ int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
|
|
|
+ if (kgdb_info[sstep_cpu].task)
|
|
|
+ kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
|
|
|
+ else
|
|
|
+ kgdb_sstep_pid = 0;
|
|
|
+ }
|
|
|
/* Free kgdb_active */
|
|
|
atomic_set(&kgdb_active, -1);
|
|
|
touch_softlockup_watchdog();
|