|
@@ -1381,6 +1381,9 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
|
|
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
|
|
continue;
|
|
|
|
|
|
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
|
|
|
+ continue;
|
|
|
+
|
|
|
hwc = &event->hw;
|
|
|
|
|
|
interrupts = hwc->interrupts;
|
|
@@ -3265,6 +3268,9 @@ static void perf_event_task_output(struct perf_event *event,
|
|
|
|
|
|
static int perf_event_task_match(struct perf_event *event)
|
|
|
{
|
|
|
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (event->attr.comm || event->attr.mmap || event->attr.task)
|
|
|
return 1;
|
|
|
|
|
@@ -3290,12 +3296,11 @@ static void perf_event_task_event(struct perf_task_event *task_event)
|
|
|
rcu_read_lock();
|
|
|
cpuctx = &get_cpu_var(perf_cpu_context);
|
|
|
perf_event_task_ctx(&cpuctx->ctx, task_event);
|
|
|
- put_cpu_var(perf_cpu_context);
|
|
|
-
|
|
|
if (!ctx)
|
|
|
ctx = rcu_dereference(task_event->task->perf_event_ctxp);
|
|
|
if (ctx)
|
|
|
perf_event_task_ctx(ctx, task_event);
|
|
|
+ put_cpu_var(perf_cpu_context);
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|
|
@@ -3372,6 +3377,9 @@ static void perf_event_comm_output(struct perf_event *event,
|
|
|
|
|
|
static int perf_event_comm_match(struct perf_event *event)
|
|
|
{
|
|
|
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (event->attr.comm)
|
|
|
return 1;
|
|
|
|
|
@@ -3408,15 +3416,10 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
|
|
|
rcu_read_lock();
|
|
|
cpuctx = &get_cpu_var(perf_cpu_context);
|
|
|
perf_event_comm_ctx(&cpuctx->ctx, comm_event);
|
|
|
- put_cpu_var(perf_cpu_context);
|
|
|
-
|
|
|
- /*
|
|
|
- * doesn't really matter which of the child contexts the
|
|
|
- * events ends up in.
|
|
|
- */
|
|
|
ctx = rcu_dereference(current->perf_event_ctxp);
|
|
|
if (ctx)
|
|
|
perf_event_comm_ctx(ctx, comm_event);
|
|
|
+ put_cpu_var(perf_cpu_context);
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|
|
@@ -3491,6 +3494,9 @@ static void perf_event_mmap_output(struct perf_event *event,
|
|
|
static int perf_event_mmap_match(struct perf_event *event,
|
|
|
struct perf_mmap_event *mmap_event)
|
|
|
{
|
|
|
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (event->attr.mmap)
|
|
|
return 1;
|
|
|
|
|
@@ -3564,15 +3570,10 @@ got_name:
|
|
|
rcu_read_lock();
|
|
|
cpuctx = &get_cpu_var(perf_cpu_context);
|
|
|
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event);
|
|
|
- put_cpu_var(perf_cpu_context);
|
|
|
-
|
|
|
- /*
|
|
|
- * doesn't really matter which of the child contexts the
|
|
|
- * events ends up in.
|
|
|
- */
|
|
|
ctx = rcu_dereference(current->perf_event_ctxp);
|
|
|
if (ctx)
|
|
|
perf_event_mmap_ctx(ctx, mmap_event);
|
|
|
+ put_cpu_var(perf_cpu_context);
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
kfree(buf);
|
|
@@ -3863,6 +3864,9 @@ static int perf_swevent_match(struct perf_event *event,
|
|
|
struct perf_sample_data *data,
|
|
|
struct pt_regs *regs)
|
|
|
{
|
|
|
+ if (event->cpu != -1 && event->cpu != smp_processor_id())
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (!perf_swevent_is_counting(event))
|
|
|
return 0;
|
|
|
|