|
@@ -153,6 +153,9 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
|
|
|
{
|
|
|
struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
|
|
|
|
|
|
+#ifdef CONFIG_PERF_COUNTERS
|
|
|
+ WARN_ON_ONCE(!list_empty(&tsk->perf_counter_ctx.counter_list));
|
|
|
+#endif
|
|
|
trace_sched_process_free(tsk);
|
|
|
put_task_struct(tsk);
|
|
|
}
|
|
@@ -922,12 +925,6 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
|
|
|
forget_original_parent(tsk);
|
|
|
exit_task_namespaces(tsk);
|
|
|
|
|
|
- /*
|
|
|
- * Flush inherited counters to the parent - before the parent
|
|
|
- * gets woken up by child-exit notifications.
|
|
|
- */
|
|
|
- perf_counter_exit_task(tsk);
|
|
|
-
|
|
|
write_lock_irq(&tasklist_lock);
|
|
|
if (group_dead)
|
|
|
kill_orphaned_pgrp(tsk->group_leader, NULL);
|
|
@@ -1122,12 +1119,6 @@ NORET_TYPE void do_exit(long code)
|
|
|
if (tsk->splice_pipe)
|
|
|
__free_pipe_info(tsk->splice_pipe);
|
|
|
|
|
|
- /*
|
|
|
- * These must happen late, after the PID is not
|
|
|
- * hashed anymore, but still at a point that may sleep:
|
|
|
- */
|
|
|
- perf_counter_exit_task(tsk);
|
|
|
-
|
|
|
preempt_disable();
|
|
|
/* causes final put_task_struct in finish_task_switch(). */
|
|
|
tsk->state = TASK_DEAD;
|
|
@@ -1371,6 +1362,12 @@ static int wait_task_zombie(struct task_struct *p, int options,
|
|
|
*/
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
|
|
+ /*
|
|
|
+ * Flush inherited counters to the parent - before the parent
|
|
|
+ * gets woken up by child-exit notifications.
|
|
|
+ */
|
|
|
+ perf_counter_exit_task(p);
|
|
|
+
|
|
|
retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
|
|
|
status = (p->signal->flags & SIGNAL_GROUP_EXIT)
|
|
|
? p->signal->group_exit_code : p->exit_code;
|