|
@@ -1577,6 +1577,17 @@ static inline int may_ptrace_stop(void)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Return nonzero if there is a SIGKILL that should be waking us up.
|
|
|
+ * Called with the siglock held.
|
|
|
+ */
|
|
|
+static int sigkill_pending(struct task_struct *tsk)
|
|
|
+{
|
|
|
+ return ((sigismember(&tsk->pending.signal, SIGKILL) ||
|
|
|
+ sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
|
|
|
+ !unlikely(sigismember(&tsk->blocked, SIGKILL)));
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This must be called with current->sighand->siglock held.
|
|
|
*
|
|
@@ -1590,6 +1601,26 @@ static inline int may_ptrace_stop(void)
|
|
|
*/
|
|
|
static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
|
|
|
{
|
|
|
+ int killed = 0;
|
|
|
+
|
|
|
+ if (arch_ptrace_stop_needed(exit_code, info)) {
|
|
|
+ /*
|
|
|
+ * The arch code has something special to do before a
|
|
|
+ * ptrace stop. This is allowed to block, e.g. for faults
|
|
|
+ * on user stack pages. We can't keep the siglock while
|
|
|
+ * calling arch_ptrace_stop, so we must release it now.
|
|
|
+ * To preserve proper semantics, we must do this before
|
|
|
+ * any signal bookkeeping like checking group_stop_count.
|
|
|
+ * Meanwhile, a SIGKILL could come in before we retake the
|
|
|
+ * siglock. That must prevent us from sleeping in TASK_TRACED.
|
|
|
+ * So after regaining the lock, we must check for SIGKILL.
|
|
|
+ */
|
|
|
+ spin_unlock_irq(¤t->sighand->siglock);
|
|
|
+ arch_ptrace_stop(exit_code, info);
|
|
|
+ spin_lock_irq(¤t->sighand->siglock);
|
|
|
+ killed = sigkill_pending(current);
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* If there is a group stop in progress,
|
|
|
* we must participate in the bookkeeping.
|
|
@@ -1605,7 +1636,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
try_to_freeze();
|
|
|
read_lock(&tasklist_lock);
|
|
|
- if (may_ptrace_stop()) {
|
|
|
+ if (!unlikely(killed) && may_ptrace_stop()) {
|
|
|
do_notify_parent_cldstop(current, CLD_TRAPPED);
|
|
|
read_unlock(&tasklist_lock);
|
|
|
schedule();
|