|
@@ -4419,7 +4419,7 @@ static void perf_event_task_event(struct perf_task_event *task_event)
|
|
|
rcu_read_lock();
|
|
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
|
|
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
|
|
- if (cpuctx->active_pmu != pmu)
|
|
|
+ if (cpuctx->unique_pmu != pmu)
|
|
|
goto next;
|
|
|
perf_event_task_ctx(&cpuctx->ctx, task_event);
|
|
|
|
|
@@ -4565,7 +4565,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
|
|
|
rcu_read_lock();
|
|
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
|
|
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
|
|
- if (cpuctx->active_pmu != pmu)
|
|
|
+ if (cpuctx->unique_pmu != pmu)
|
|
|
goto next;
|
|
|
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
|
|
|
|
|
@@ -4761,7 +4761,7 @@ got_name:
|
|
|
rcu_read_lock();
|
|
|
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
|
|
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
|
|
- if (cpuctx->active_pmu != pmu)
|
|
|
+ if (cpuctx->unique_pmu != pmu)
|
|
|
goto next;
|
|
|
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
|
|
|
vma->vm_flags & VM_EXEC);
|
|
@@ -5862,8 +5862,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
|
|
|
|
|
|
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
|
|
|
|
|
- if (cpuctx->active_pmu == old_pmu)
|
|
|
- cpuctx->active_pmu = pmu;
|
|
|
+ if (cpuctx->unique_pmu == old_pmu)
|
|
|
+ cpuctx->unique_pmu = pmu;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -5998,7 +5998,7 @@ skip_type:
|
|
|
cpuctx->ctx.pmu = pmu;
|
|
|
cpuctx->jiffies_interval = 1;
|
|
|
INIT_LIST_HEAD(&cpuctx->rotation_list);
|
|
|
- cpuctx->active_pmu = pmu;
|
|
|
+ cpuctx->unique_pmu = pmu;
|
|
|
}
|
|
|
|
|
|
got_cpu_context:
|