|
@@ -141,6 +141,7 @@ enum event_type_t {
|
|
|
struct static_key_deferred perf_sched_events __read_mostly;
|
|
|
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
|
|
|
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
|
|
|
+static DEFINE_PER_CPU(atomic_t, perf_freq_events);
|
|
|
|
|
|
static atomic_t nr_mmap_events __read_mostly;
|
|
|
static atomic_t nr_comm_events __read_mostly;
|
|
@@ -3139,6 +3140,9 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
|
|
|
}
|
|
|
if (is_cgroup_event(event))
|
|
|
atomic_dec(&per_cpu(perf_cgroup_events, cpu));
|
|
|
+
|
|
|
+ if (event->attr.freq)
|
|
|
+ atomic_dec(&per_cpu(perf_freq_events, cpu));
|
|
|
}
|
|
|
|
|
|
static void unaccount_event(struct perf_event *event)
|
|
@@ -6474,6 +6478,9 @@ static void account_event_cpu(struct perf_event *event, int cpu)
|
|
|
}
|
|
|
if (is_cgroup_event(event))
|
|
|
atomic_inc(&per_cpu(perf_cgroup_events, cpu));
|
|
|
+
|
|
|
+ if (event->attr.freq)
|
|
|
+ atomic_inc(&per_cpu(perf_freq_events, cpu));
|
|
|
}
|
|
|
|
|
|
static void account_event(struct perf_event *event)
|