|
@@ -4474,7 +4474,7 @@ static void perf_event_task_ctx(struct perf_event_context *ctx,
|
|
|
static void perf_event_task_event(struct perf_task_event *task_event)
|
|
|
{
|
|
|
struct perf_cpu_context *cpuctx;
|
|
|
- struct perf_event_context *ctx;
|
|
|
+ struct perf_event_context *ctx, *task_ctx = task_event->task_ctx;
|
|
|
struct pmu *pmu;
|
|
|
int ctxn;
|
|
|
|
|
@@ -4485,20 +4485,22 @@ static void perf_event_task_event(struct perf_task_event *task_event)
|
|
|
goto next;
|
|
|
perf_event_task_ctx(&cpuctx->ctx, task_event);
|
|
|
|
|
|
- ctx = task_event->task_ctx;
|
|
|
- if (!ctx) {
|
|
|
- ctxn = pmu->task_ctx_nr;
|
|
|
- if (ctxn < 0)
|
|
|
- goto next;
|
|
|
- ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
|
|
|
- if (ctx)
|
|
|
- perf_event_task_ctx(ctx, task_event);
|
|
|
- }
|
|
|
+ if (task_ctx)
|
|
|
+ goto next;
|
|
|
+ ctxn = pmu->task_ctx_nr;
|
|
|
+ if (ctxn < 0)
|
|
|
+ goto next;
|
|
|
+ ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
|
|
|
+ if (ctx)
|
|
|
+ perf_event_task_ctx(ctx, task_event);
|
|
|
next:
|
|
|
put_cpu_ptr(pmu->pmu_cpu_context);
|
|
|
}
|
|
|
- if (task_event->task_ctx)
|
|
|
- perf_event_task_ctx(task_event->task_ctx, task_event);
|
|
|
+ if (task_ctx) {
|
|
|
+ preempt_disable();
|
|
|
+ perf_event_task_ctx(task_ctx, task_event);
|
|
|
+ preempt_enable();
|
|
|
+ }
|
|
|
|
|
|
rcu_read_unlock();
|
|
|
}
|