|
@@ -1502,9 +1502,10 @@ out:
|
|
return ispipe;
|
|
return ispipe;
|
|
}
|
|
}
|
|
|
|
|
|
-static void zap_process(struct task_struct *start)
|
|
|
|
|
|
+static int zap_process(struct task_struct *start)
|
|
{
|
|
{
|
|
struct task_struct *t;
|
|
struct task_struct *t;
|
|
|
|
+ int nr = 0;
|
|
|
|
|
|
start->signal->flags = SIGNAL_GROUP_EXIT;
|
|
start->signal->flags = SIGNAL_GROUP_EXIT;
|
|
start->signal->group_stop_count = 0;
|
|
start->signal->group_stop_count = 0;
|
|
@@ -1512,31 +1513,33 @@ static void zap_process(struct task_struct *start)
|
|
t = start;
|
|
t = start;
|
|
do {
|
|
do {
|
|
if (t != current && t->mm) {
|
|
if (t != current && t->mm) {
|
|
- t->mm->core_state->nr_threads++;
|
|
|
|
sigaddset(&t->pending.signal, SIGKILL);
|
|
sigaddset(&t->pending.signal, SIGKILL);
|
|
signal_wake_up(t, 1);
|
|
signal_wake_up(t, 1);
|
|
|
|
+ nr++;
|
|
}
|
|
}
|
|
} while_each_thread(start, t);
|
|
} while_each_thread(start, t);
|
|
|
|
+
|
|
|
|
+ return nr;
|
|
}
|
|
}
|
|
|
|
|
|
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
- int exit_code)
|
|
|
|
|
|
+ struct core_state *core_state, int exit_code)
|
|
{
|
|
{
|
|
struct task_struct *g, *p;
|
|
struct task_struct *g, *p;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- int err = -EAGAIN;
|
|
|
|
|
|
+ int nr = -EAGAIN;
|
|
|
|
|
|
spin_lock_irq(&tsk->sighand->siglock);
|
|
spin_lock_irq(&tsk->sighand->siglock);
|
|
if (!signal_group_exit(tsk->signal)) {
|
|
if (!signal_group_exit(tsk->signal)) {
|
|
|
|
+ mm->core_state = core_state;
|
|
tsk->signal->group_exit_code = exit_code;
|
|
tsk->signal->group_exit_code = exit_code;
|
|
- zap_process(tsk);
|
|
|
|
- err = 0;
|
|
|
|
|
|
+ nr = zap_process(tsk);
|
|
}
|
|
}
|
|
spin_unlock_irq(&tsk->sighand->siglock);
|
|
spin_unlock_irq(&tsk->sighand->siglock);
|
|
- if (err)
|
|
|
|
- return err;
|
|
|
|
|
|
+ if (unlikely(nr < 0))
|
|
|
|
+ return nr;
|
|
|
|
|
|
- if (atomic_read(&mm->mm_users) == mm->core_state->nr_threads + 1)
|
|
|
|
|
|
+ if (atomic_read(&mm->mm_users) == nr + 1)
|
|
goto done;
|
|
goto done;
|
|
/*
|
|
/*
|
|
* We should find and kill all tasks which use this mm, and we should
|
|
* We should find and kill all tasks which use this mm, and we should
|
|
@@ -1579,7 +1582,7 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
if (p->mm) {
|
|
if (p->mm) {
|
|
if (unlikely(p->mm == mm)) {
|
|
if (unlikely(p->mm == mm)) {
|
|
lock_task_sighand(p, &flags);
|
|
lock_task_sighand(p, &flags);
|
|
- zap_process(p);
|
|
|
|
|
|
+ nr += zap_process(p);
|
|
unlock_task_sighand(p, &flags);
|
|
unlock_task_sighand(p, &flags);
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
@@ -1588,7 +1591,8 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
done:
|
|
done:
|
|
- return mm->core_state->nr_threads;
|
|
|
|
|
|
+ core_state->nr_threads = nr;
|
|
|
|
+ return nr;
|
|
}
|
|
}
|
|
|
|
|
|
static int coredump_wait(int exit_code)
|
|
static int coredump_wait(int exit_code)
|
|
@@ -1601,12 +1605,7 @@ static int coredump_wait(int exit_code)
|
|
|
|
|
|
init_completion(&mm->core_done);
|
|
init_completion(&mm->core_done);
|
|
init_completion(&core_state.startup);
|
|
init_completion(&core_state.startup);
|
|
- core_state.nr_threads = 0;
|
|
|
|
- mm->core_state = &core_state;
|
|
|
|
-
|
|
|
|
- core_waiters = zap_threads(tsk, mm, exit_code);
|
|
|
|
- if (core_waiters < 0)
|
|
|
|
- mm->core_state = NULL;
|
|
|
|
|
|
+ core_waiters = zap_threads(tsk, mm, &core_state, exit_code);
|
|
up_write(&mm->mmap_sem);
|
|
up_write(&mm->mmap_sem);
|
|
|
|
|
|
if (unlikely(core_waiters < 0))
|
|
if (unlikely(core_waiters < 0))
|