|
@@ -2201,13 +2201,6 @@ find_lively_task_by_vpid(pid_t vpid)
|
|
|
if (!task)
|
|
|
return ERR_PTR(-ESRCH);
|
|
|
|
|
|
- /*
|
|
|
- * Can't attach events to a dying task.
|
|
|
- */
|
|
|
- err = -ESRCH;
|
|
|
- if (task->flags & PF_EXITING)
|
|
|
- goto errout;
|
|
|
-
|
|
|
/* Reuse ptrace permission checks for now. */
|
|
|
err = -EACCES;
|
|
|
if (!ptrace_may_access(task, PTRACE_MODE_READ))
|
|
@@ -2268,14 +2261,27 @@ retry:
|
|
|
|
|
|
get_ctx(ctx);
|
|
|
|
|
|
- if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
|
|
|
- /*
|
|
|
- * We raced with some other task; use
|
|
|
- * the context they set.
|
|
|
- */
|
|
|
+ err = 0;
|
|
|
+ mutex_lock(&task->perf_event_mutex);
|
|
|
+ /*
|
|
|
+ * If it has already passed perf_event_exit_task().
|
|
|
+ * we must see PF_EXITING, it takes this mutex too.
|
|
|
+ */
|
|
|
+ if (task->flags & PF_EXITING)
|
|
|
+ err = -ESRCH;
|
|
|
+ else if (task->perf_event_ctxp[ctxn])
|
|
|
+ err = -EAGAIN;
|
|
|
+ else
|
|
|
+ rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
|
|
|
+ mutex_unlock(&task->perf_event_mutex);
|
|
|
+
|
|
|
+ if (unlikely(err)) {
|
|
|
put_task_struct(task);
|
|
|
kfree(ctx);
|
|
|
- goto retry;
|
|
|
+
|
|
|
+ if (err == -EAGAIN)
|
|
|
+ goto retry;
|
|
|
+ goto errout;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -6127,7 +6133,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
|
|
|
* scheduled, so we are now safe from rescheduling changing
|
|
|
* our context.
|
|
|
*/
|
|
|
- child_ctx = child->perf_event_ctxp[ctxn];
|
|
|
+ child_ctx = rcu_dereference(child->perf_event_ctxp[ctxn]);
|
|
|
task_ctx_sched_out(child_ctx, EVENT_ALL);
|
|
|
|
|
|
/*
|