|
@@ -141,11 +141,11 @@ enum event_type_t {
|
|
|
struct static_key_deferred perf_sched_events __read_mostly;
|
|
|
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
|
|
|
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
|
|
|
-static DEFINE_PER_CPU(atomic_t, perf_freq_events);
|
|
|
|
|
|
static atomic_t nr_mmap_events __read_mostly;
|
|
|
static atomic_t nr_comm_events __read_mostly;
|
|
|
static atomic_t nr_task_events __read_mostly;
|
|
|
+static atomic_t nr_freq_events __read_mostly;
|
|
|
|
|
|
static LIST_HEAD(pmus);
|
|
|
static DEFINE_MUTEX(pmus_lock);
|
|
@@ -1871,9 +1871,6 @@ static int __perf_install_in_context(void *info)
|
|
|
perf_pmu_enable(cpuctx->ctx.pmu);
|
|
|
perf_ctx_unlock(cpuctx, task_ctx);
|
|
|
|
|
|
- if (atomic_read(&__get_cpu_var(perf_freq_events)))
|
|
|
- tick_nohz_full_kick();
|
|
|
-
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2811,7 +2808,7 @@ done:
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
|
bool perf_event_can_stop_tick(void)
|
|
|
{
|
|
|
- if (atomic_read(&__get_cpu_var(perf_freq_events)) ||
|
|
|
+ if (atomic_read(&nr_freq_events) ||
|
|
|
__this_cpu_read(perf_throttled_count))
|
|
|
return false;
|
|
|
else
|
|
@@ -3140,9 +3137,6 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
|
|
|
}
|
|
|
if (is_cgroup_event(event))
|
|
|
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
|
|
|
-
|
|
|
- if (event->attr.freq)
|
|
|
- atomic_dec(&per_cpu(perf_freq_events, cpu));
|
|
|
}
|
|
|
|
|
|
static void unaccount_event(struct perf_event *event)
|
|
@@ -3158,6 +3152,8 @@ static void unaccount_event(struct perf_event *event)
|
|
|
atomic_dec(&nr_comm_events);
|
|
|
if (event->attr.task)
|
|
|
atomic_dec(&nr_task_events);
|
|
|
+ if (event->attr.freq)
|
|
|
+ atomic_dec(&nr_freq_events);
|
|
|
if (is_cgroup_event(event))
|
|
|
static_key_slow_dec_deferred(&perf_sched_events);
|
|
|
if (has_branch_stack(event))
|
|
@@ -6489,9 +6485,6 @@ static void account_event_cpu(struct perf_event *event, int cpu)
|
|
|
}
|
|
|
if (is_cgroup_event(event))
|
|
|
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
|
|
|
-
|
|
|
- if (event->attr.freq)
|
|
|
- atomic_inc(&per_cpu(perf_freq_events, cpu));
|
|
|
}
|
|
|
|
|
|
static void account_event(struct perf_event *event)
|
|
@@ -6507,6 +6500,10 @@ static void account_event(struct perf_event *event)
|
|
|
atomic_inc(&nr_comm_events);
|
|
|
if (event->attr.task)
|
|
|
atomic_inc(&nr_task_events);
|
|
|
+ if (event->attr.freq) {
|
|
|
+ if (atomic_inc_return(&nr_freq_events) == 1)
|
|
|
+ tick_nohz_full_kick_all();
|
|
|
+ }
|
|
|
if (has_branch_stack(event))
|
|
|
static_key_slow_inc(&perf_sched_events.key);
|
|
|
if (is_cgroup_event(event))
|