|
@@ -1517,7 +1517,7 @@ static void zap_process(struct task_struct *start)
|
|
sigaddset(&t->pending.signal, SIGKILL);
|
|
sigaddset(&t->pending.signal, SIGKILL);
|
|
signal_wake_up(t, 1);
|
|
signal_wake_up(t, 1);
|
|
}
|
|
}
|
|
- } while ((t = next_thread(t)) != start);
|
|
|
|
|
|
+ } while_each_thread(start, t);
|
|
}
|
|
}
|
|
|
|
|
|
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
@@ -1539,7 +1539,36 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
|
|
|
|
if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
|
|
if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
|
|
goto done;
|
|
goto done;
|
|
-
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We should find and kill all tasks which use this mm, and we should
|
|
|
|
+ * count them correctly into mm->core_waiters. We don't take tasklist
|
|
|
|
+ * lock, but this is safe wrt:
|
|
|
|
+ *
|
|
|
|
+ * fork:
|
|
|
|
+ * None of sub-threads can fork after zap_process(leader). All
|
|
|
|
+ * processes which were created before this point should be
|
|
|
|
+ * visible to zap_threads() because copy_process() adds the new
|
|
|
|
+ * process to the tail of init_task.tasks list, and lock/unlock
|
|
|
|
+ * of ->siglock provides a memory barrier.
|
|
|
|
+ *
|
|
|
|
+ * do_exit:
|
|
|
|
+ * The caller holds mm->mmap_sem. This means that the task which
|
|
|
|
+ * uses this mm can't pass exit_mm(), so it can't exit or clear
|
|
|
|
+ * its ->mm.
|
|
|
|
+ *
|
|
|
|
+ * de_thread:
|
|
|
|
+ * It does list_replace_rcu(&leader->tasks, ¤t->tasks),
|
|
|
|
+ * we must see either old or new leader, this does not matter.
|
|
|
|
+ * However, it can change p->sighand, so lock_task_sighand(p)
|
|
|
|
+ * must be used. Since p->mm != NULL and we hold ->mmap_sem
|
|
|
|
+ * it can't fail.
|
|
|
|
+ *
|
|
|
|
+ * Note also that "g" can be the old leader with ->mm == NULL
|
|
|
|
+ * and already unhashed and thus removed from ->thread_group.
|
|
|
|
+ * This is OK, __unhash_process()->list_del_rcu() does not
|
|
|
|
+ * clear the ->next pointer, we will find the new leader via
|
|
|
|
+ * next_thread().
|
|
|
|
+ */
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
for_each_process(g) {
|
|
for_each_process(g) {
|
|
if (g == tsk->group_leader)
|
|
if (g == tsk->group_leader)
|
|
@@ -1549,17 +1578,13 @@ static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
|
|
do {
|
|
do {
|
|
if (p->mm) {
|
|
if (p->mm) {
|
|
if (p->mm == mm) {
|
|
if (p->mm == mm) {
|
|
- /*
|
|
|
|
- * p->sighand can't disappear, but
|
|
|
|
- * may be changed by de_thread()
|
|
|
|
- */
|
|
|
|
lock_task_sighand(p, &flags);
|
|
lock_task_sighand(p, &flags);
|
|
zap_process(p);
|
|
zap_process(p);
|
|
unlock_task_sighand(p, &flags);
|
|
unlock_task_sighand(p, &flags);
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- } while ((p = next_thread(p)) != g);
|
|
|
|
|
|
+ } while_each_thread(g, p);
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
done:
|
|
done:
|