|
@@ -1505,25 +1505,31 @@ static int __perf_install_in_context(void *info)
|
|
|
struct perf_event_context *task_ctx = cpuctx->task_ctx;
|
|
|
struct task_struct *task = current;
|
|
|
|
|
|
- perf_ctx_lock(cpuctx, cpuctx->task_ctx);
|
|
|
+ perf_ctx_lock(cpuctx, task_ctx);
|
|
|
perf_pmu_disable(cpuctx->ctx.pmu);
|
|
|
|
|
|
/*
|
|
|
* If there was an active task_ctx schedule it out.
|
|
|
*/
|
|
|
- if (task_ctx) {
|
|
|
+ if (task_ctx)
|
|
|
task_ctx_sched_out(task_ctx);
|
|
|
- /*
|
|
|
- * If the context we're installing events in is not the
|
|
|
- * active task_ctx, flip them.
|
|
|
- */
|
|
|
- if (ctx->task && task_ctx != ctx) {
|
|
|
- raw_spin_unlock(&cpuctx->ctx.lock);
|
|
|
- raw_spin_lock(&ctx->lock);
|
|
|
- cpuctx->task_ctx = task_ctx = ctx;
|
|
|
- }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the context we're installing events in is not the
|
|
|
+ * active task_ctx, flip them.
|
|
|
+ */
|
|
|
+ if (ctx->task && task_ctx != ctx) {
|
|
|
+ if (task_ctx)
|
|
|
+ raw_spin_unlock(&task_ctx->lock);
|
|
|
+ raw_spin_lock(&ctx->lock);
|
|
|
+ task_ctx = ctx;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (task_ctx) {
|
|
|
+ cpuctx->task_ctx = task_ctx;
|
|
|
task = task_ctx->task;
|
|
|
}
|
|
|
+
|
|
|
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
|
|
|
|
|
|
update_context_time(ctx);
|