|
@@ -1371,13 +1371,7 @@ static void format_corename(char *corename, const char *pattern, long signr)
|
|
|
static void zap_process(struct task_struct *start)
|
|
|
{
|
|
|
struct task_struct *t;
|
|
|
- unsigned long flags;
|
|
|
|
|
|
- /*
|
|
|
- * start->sighand can't disappear, but may be
|
|
|
- * changed by de_thread()
|
|
|
- */
|
|
|
- lock_task_sighand(start, &flags);
|
|
|
start->signal->flags = SIGNAL_GROUP_EXIT;
|
|
|
start->signal->group_stop_count = 0;
|
|
|
|
|
@@ -1389,40 +1383,51 @@ static void zap_process(struct task_struct *start)
|
|
|
signal_wake_up(t, 1);
|
|
|
}
|
|
|
} while ((t = next_thread(t)) != start);
|
|
|
-
|
|
|
- unlock_task_sighand(start, &flags);
|
|
|
}
|
|
|
|
|
|
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
int exit_code)
|
|
|
{
|
|
|
struct task_struct *g, *p;
|
|
|
+ unsigned long flags;
|
|
|
int err = -EAGAIN;
|
|
|
|
|
|
spin_lock_irq(&tsk->sighand->siglock);
|
|
|
if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
|
|
|
- tsk->signal->flags = SIGNAL_GROUP_EXIT;
|
|
|
tsk->signal->group_exit_code = exit_code;
|
|
|
- tsk->signal->group_stop_count = 0;
|
|
|
+ zap_process(tsk);
|
|
|
err = 0;
|
|
|
}
|
|
|
spin_unlock_irq(&tsk->sighand->siglock);
|
|
|
if (err)
|
|
|
return err;
|
|
|
|
|
|
+ if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
|
|
|
+ goto done;
|
|
|
+
|
|
|
rcu_read_lock();
|
|
|
for_each_process(g) {
|
|
|
+ if (g == tsk->group_leader)
|
|
|
+ continue;
|
|
|
+
|
|
|
p = g;
|
|
|
do {
|
|
|
if (p->mm) {
|
|
|
- if (p->mm == mm)
|
|
|
+ if (p->mm == mm) {
|
|
|
+ /*
|
|
|
+ * p->sighand can't disappear, but
|
|
|
+ * may be changed by de_thread()
|
|
|
+ */
|
|
|
+ lock_task_sighand(p, &flags);
|
|
|
zap_process(p);
|
|
|
+ unlock_task_sighand(p, &flags);
|
|
|
+ }
|
|
|
break;
|
|
|
}
|
|
|
} while ((p = next_thread(p)) != g);
|
|
|
}
|
|
|
rcu_read_unlock();
|
|
|
-
|
|
|
+done:
|
|
|
return mm->core_waiters;
|
|
|
}
|
|
|
|