|
@@ -705,7 +705,7 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
|
|
|
|
|
|
if (why) {
|
|
|
/*
|
|
|
- * The first thread which returns from finish_stop()
|
|
|
+ * The first thread which returns from do_signal_stop()
|
|
|
* will take ->siglock, notice SIGNAL_CLD_MASK, and
|
|
|
* notify its parent. See get_signal_to_deliver().
|
|
|
*/
|
|
@@ -1664,29 +1664,6 @@ void ptrace_notify(int exit_code)
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-finish_stop(int stop_count)
|
|
|
-{
|
|
|
- /*
|
|
|
- * If there are no other threads in the group, or if there is
|
|
|
- * a group stop in progress and we are the last to stop,
|
|
|
- * report to the parent. When ptraced, every thread reports itself.
|
|
|
- */
|
|
|
- if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
|
|
|
- read_lock(&tasklist_lock);
|
|
|
- do_notify_parent_cldstop(current, CLD_STOPPED);
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
- }
|
|
|
-
|
|
|
- do {
|
|
|
- schedule();
|
|
|
- } while (try_to_freeze());
|
|
|
- /*
|
|
|
- * Now we don't run again until continued.
|
|
|
- */
|
|
|
- current->exit_code = 0;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* This performs the stopping for SIGSTOP and other stop signals.
|
|
|
* We have to stop all threads in the thread group.
|
|
@@ -1696,15 +1673,9 @@ finish_stop(int stop_count)
|
|
|
static int do_signal_stop(int signr)
|
|
|
{
|
|
|
struct signal_struct *sig = current->signal;
|
|
|
- int stop_count;
|
|
|
+ int notify;
|
|
|
|
|
|
- if (sig->group_stop_count > 0) {
|
|
|
- /*
|
|
|
- * There is a group stop in progress. We don't need to
|
|
|
- * start another one.
|
|
|
- */
|
|
|
- stop_count = --sig->group_stop_count;
|
|
|
- } else {
|
|
|
+ if (!sig->group_stop_count) {
|
|
|
struct task_struct *t;
|
|
|
|
|
|
if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
|
|
@@ -1716,7 +1687,7 @@ static int do_signal_stop(int signr)
|
|
|
*/
|
|
|
sig->group_exit_code = signr;
|
|
|
|
|
|
- stop_count = 0;
|
|
|
+ sig->group_stop_count = 1;
|
|
|
for (t = next_thread(current); t != current; t = next_thread(t))
|
|
|
/*
|
|
|
* Setting state to TASK_STOPPED for a group
|
|
@@ -1725,19 +1696,44 @@ static int do_signal_stop(int signr)
|
|
|
*/
|
|
|
if (!(t->flags & PF_EXITING) &&
|
|
|
!task_is_stopped_or_traced(t)) {
|
|
|
- stop_count++;
|
|
|
+ sig->group_stop_count++;
|
|
|
signal_wake_up(t, 0);
|
|
|
}
|
|
|
- sig->group_stop_count = stop_count;
|
|
|
}
|
|
|
+ /*
|
|
|
+ * If there are no other threads in the group, or if there is
|
|
|
+ * a group stop in progress and we are the last to stop, report
|
|
|
+ * to the parent. When ptraced, every thread reports itself.
|
|
|
+ */
|
|
|
+ notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
|
|
|
+ notify = tracehook_notify_jctl(notify, CLD_STOPPED);
|
|
|
+ /*
|
|
|
+ * tracehook_notify_jctl() can drop and reacquire siglock, so
|
|
|
+ * we keep ->group_stop_count != 0 before the call. If SIGCONT
|
|
|
+ * or SIGKILL comes in between ->group_stop_count == 0.
|
|
|
+ */
|
|
|
+ if (sig->group_stop_count) {
|
|
|
+ if (!--sig->group_stop_count)
|
|
|
+ sig->flags = SIGNAL_STOP_STOPPED;
|
|
|
+ current->exit_code = sig->group_exit_code;
|
|
|
+ __set_current_state(TASK_STOPPED);
|
|
|
+ }
|
|
|
+ spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
|
|
- if (stop_count == 0)
|
|
|
- sig->flags = SIGNAL_STOP_STOPPED;
|
|
|
- current->exit_code = sig->group_exit_code;
|
|
|
- __set_current_state(TASK_STOPPED);
|
|
|
+ if (notify) {
|
|
|
+ read_lock(&tasklist_lock);
|
|
|
+ do_notify_parent_cldstop(current, notify);
|
|
|
+ read_unlock(&tasklist_lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Now we don't run again until woken by SIGCONT or SIGKILL */
|
|
|
+ do {
|
|
|
+ schedule();
|
|
|
+ } while (try_to_freeze());
|
|
|
+
|
|
|
+ tracehook_finish_jctl();
|
|
|
+ current->exit_code = 0;
|
|
|
|
|
|
- spin_unlock_irq(¤t->sighand->siglock);
|
|
|
- finish_stop(stop_count);
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -1806,14 +1802,15 @@ relock:
|
|
|
int why = (signal->flags & SIGNAL_STOP_CONTINUED)
|
|
|
? CLD_CONTINUED : CLD_STOPPED;
|
|
|
signal->flags &= ~SIGNAL_CLD_MASK;
|
|
|
- spin_unlock_irq(&sighand->siglock);
|
|
|
|
|
|
- if (unlikely(!tracehook_notify_jctl(1, why)))
|
|
|
- goto relock;
|
|
|
+ why = tracehook_notify_jctl(why, CLD_CONTINUED);
|
|
|
+ spin_unlock_irq(&sighand->siglock);
|
|
|
|
|
|
- read_lock(&tasklist_lock);
|
|
|
- do_notify_parent_cldstop(current->group_leader, why);
|
|
|
- read_unlock(&tasklist_lock);
|
|
|
+ if (why) {
|
|
|
+ read_lock(&tasklist_lock);
|
|
|
+ do_notify_parent_cldstop(current->group_leader, why);
|
|
|
+ read_unlock(&tasklist_lock);
|
|
|
+ }
|
|
|
goto relock;
|
|
|
}
|
|
|
|
|
@@ -1978,14 +1975,14 @@ void exit_signals(struct task_struct *tsk)
|
|
|
if (unlikely(tsk->signal->group_stop_count) &&
|
|
|
!--tsk->signal->group_stop_count) {
|
|
|
tsk->signal->flags = SIGNAL_STOP_STOPPED;
|
|
|
- group_stop = 1;
|
|
|
+ group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
|
|
|
}
|
|
|
out:
|
|
|
spin_unlock_irq(&tsk->sighand->siglock);
|
|
|
|
|
|
- if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
|
|
|
+ if (unlikely(group_stop)) {
|
|
|
read_lock(&tasklist_lock);
|
|
|
- do_notify_parent_cldstop(tsk, CLD_STOPPED);
|
|
|
+ do_notify_parent_cldstop(tsk, group_stop);
|
|
|
read_unlock(&tasklist_lock);
|
|
|
}
|
|
|
}
|