|
@@ -3128,6 +3128,40 @@ static void free_event_rcu(struct rcu_head *head)
|
|
|
static void ring_buffer_put(struct ring_buffer *rb);
|
|
|
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
|
|
|
|
|
|
+static void unaccount_event_cpu(struct perf_event *event, int cpu)
|
|
|
+{
|
|
|
+ if (event->parent)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (has_branch_stack(event)) {
|
|
|
+ if (!(event->attach_state & PERF_ATTACH_TASK))
|
|
|
+ atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
|
|
|
+ }
|
|
|
+ if (is_cgroup_event(event))
|
|
|
+ atomic_dec(&per_cpu(perf_cgroup_events, cpu));
|
|
|
+}
|
|
|
+
|
|
|
+static void unaccount_event(struct perf_event *event)
|
|
|
+{
|
|
|
+ if (event->parent)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (event->attach_state & PERF_ATTACH_TASK)
|
|
|
+ static_key_slow_dec_deferred(&perf_sched_events);
|
|
|
+ if (event->attr.mmap || event->attr.mmap_data)
|
|
|
+ atomic_dec(&nr_mmap_events);
|
|
|
+ if (event->attr.comm)
|
|
|
+ atomic_dec(&nr_comm_events);
|
|
|
+ if (event->attr.task)
|
|
|
+ atomic_dec(&nr_task_events);
|
|
|
+ if (is_cgroup_event(event))
|
|
|
+ static_key_slow_dec_deferred(&perf_sched_events);
|
|
|
+ if (has_branch_stack(event))
|
|
|
+ static_key_slow_dec_deferred(&perf_sched_events);
|
|
|
+
|
|
|
+ unaccount_event_cpu(event, event->cpu);
|
|
|
+}
|
|
|
+
|
|
|
static void __free_event(struct perf_event *event)
|
|
|
{
|
|
|
if (!event->parent) {
|
|
@@ -3147,29 +3181,7 @@ static void free_event(struct perf_event *event)
|
|
|
{
|
|
|
irq_work_sync(&event->pending);
|
|
|
|
|
|
- if (!event->parent) {
|
|
|
- if (event->attach_state & PERF_ATTACH_TASK)
|
|
|
- static_key_slow_dec_deferred(&perf_sched_events);
|
|
|
- if (event->attr.mmap || event->attr.mmap_data)
|
|
|
- atomic_dec(&nr_mmap_events);
|
|
|
- if (event->attr.comm)
|
|
|
- atomic_dec(&nr_comm_events);
|
|
|
- if (event->attr.task)
|
|
|
- atomic_dec(&nr_task_events);
|
|
|
- if (is_cgroup_event(event)) {
|
|
|
- atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
|
|
|
- static_key_slow_dec_deferred(&perf_sched_events);
|
|
|
- }
|
|
|
-
|
|
|
- if (has_branch_stack(event)) {
|
|
|
- static_key_slow_dec_deferred(&perf_sched_events);
|
|
|
- /* is system-wide event */
|
|
|
- if (!(event->attach_state & PERF_ATTACH_TASK)) {
|
|
|
- atomic_dec(&per_cpu(perf_branch_stack_events,
|
|
|
- event->cpu));
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
+ unaccount_event(event);
|
|
|
|
|
|
if (event->rb) {
|
|
|
struct ring_buffer *rb;
|
|
@@ -6451,8 +6463,24 @@ unlock:
|
|
|
return pmu;
|
|
|
}
|
|
|
|
|
|
+static void account_event_cpu(struct perf_event *event, int cpu)
|
|
|
+{
|
|
|
+ if (event->parent)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (has_branch_stack(event)) {
|
|
|
+ if (!(event->attach_state & PERF_ATTACH_TASK))
|
|
|
+ atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
|
|
|
+ }
|
|
|
+ if (is_cgroup_event(event))
|
|
|
+ atomic_inc(&per_cpu(perf_cgroup_events, cpu));
|
|
|
+}
|
|
|
+
|
|
|
static void account_event(struct perf_event *event)
|
|
|
{
|
|
|
+ if (event->parent)
|
|
|
+ return;
|
|
|
+
|
|
|
if (event->attach_state & PERF_ATTACH_TASK)
|
|
|
static_key_slow_inc(&perf_sched_events.key);
|
|
|
if (event->attr.mmap || event->attr.mmap_data)
|
|
@@ -6461,17 +6489,12 @@ static void account_event(struct perf_event *event)
|
|
|
atomic_inc(&nr_comm_events);
|
|
|
if (event->attr.task)
|
|
|
atomic_inc(&nr_task_events);
|
|
|
- if (has_branch_stack(event)) {
|
|
|
+ if (has_branch_stack(event))
|
|
|
static_key_slow_inc(&perf_sched_events.key);
|
|
|
- if (!(event->attach_state & PERF_ATTACH_TASK))
|
|
|
- atomic_inc(&per_cpu(perf_branch_stack_events,
|
|
|
- event->cpu));
|
|
|
- }
|
|
|
-
|
|
|
- if (is_cgroup_event(event)) {
|
|
|
- atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
|
|
|
+ if (is_cgroup_event(event))
|
|
|
static_key_slow_inc(&perf_sched_events.key);
|
|
|
- }
|
|
|
+
|
|
|
+ account_event_cpu(event, event->cpu);
|
|
|
}
|
|
|
|
|
|
/*
|