|
@@ -4005,9 +4005,6 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
|
|
|
perf_swevent_overflow(event, 0, nmi, data, regs);
|
|
|
}
|
|
|
|
|
|
-static int perf_tp_event_match(struct perf_event *event,
|
|
|
- struct perf_sample_data *data);
|
|
|
-
|
|
|
static int perf_exclude_event(struct perf_event *event,
|
|
|
struct pt_regs *regs)
|
|
|
{
|
|
@@ -4037,10 +4034,6 @@ static int perf_swevent_match(struct perf_event *event,
|
|
|
if (perf_exclude_event(event, regs))
|
|
|
return 0;
|
|
|
|
|
|
- if (event->attr.type == PERF_TYPE_TRACEPOINT &&
|
|
|
- !perf_tp_event_match(event, data))
|
|
|
- return 0;
|
|
|
-
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -4122,7 +4115,7 @@ end:
|
|
|
|
|
|
int perf_swevent_get_recursion_context(void)
|
|
|
{
|
|
|
- struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
|
|
|
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
int rctx;
|
|
|
|
|
|
if (in_nmi())
|
|
@@ -4134,10 +4127,8 @@ int perf_swevent_get_recursion_context(void)
|
|
|
else
|
|
|
rctx = 0;
|
|
|
|
|
|
- if (cpuctx->recursion[rctx]) {
|
|
|
- put_cpu_var(perf_cpu_context);
|
|
|
+ if (cpuctx->recursion[rctx])
|
|
|
return -1;
|
|
|
- }
|
|
|
|
|
|
cpuctx->recursion[rctx]++;
|
|
|
barrier();
|
|
@@ -4151,7 +4142,6 @@ void perf_swevent_put_recursion_context(int rctx)
|
|
|
struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
barrier();
|
|
|
cpuctx->recursion[rctx]--;
|
|
|
- put_cpu_var(perf_cpu_context);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
|
|
|
|
|
@@ -4162,6 +4152,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
|
|
|
struct perf_sample_data data;
|
|
|
int rctx;
|
|
|
|
|
|
+ preempt_disable_notrace();
|
|
|
rctx = perf_swevent_get_recursion_context();
|
|
|
if (rctx < 0)
|
|
|
return;
|
|
@@ -4171,6 +4162,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
|
|
|
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
|
|
|
|
|
|
perf_swevent_put_recursion_context(rctx);
|
|
|
+ preempt_enable_notrace();
|
|
|
}
|
|
|
|
|
|
static void perf_swevent_read(struct perf_event *event)
|
|
@@ -4486,11 +4478,43 @@ static int swevent_hlist_get(struct perf_event *event)
|
|
|
|
|
|
#ifdef CONFIG_EVENT_TRACING
|
|
|
|
|
|
-void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
|
|
- int entry_size, struct pt_regs *regs, void *event)
|
|
|
+static const struct pmu perf_ops_tracepoint = {
|
|
|
+ .enable = perf_trace_enable,
|
|
|
+ .disable = perf_trace_disable,
|
|
|
+ .read = perf_swevent_read,
|
|
|
+ .unthrottle = perf_swevent_unthrottle,
|
|
|
+};
|
|
|
+
|
|
|
+static int perf_tp_filter_match(struct perf_event *event,
|
|
|
+ struct perf_sample_data *data)
|
|
|
+{
|
|
|
+ void *record = data->raw->data;
|
|
|
+
|
|
|
+ if (likely(!event->filter) || filter_match_preds(event->filter, record))
|
|
|
+ return 1;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int perf_tp_event_match(struct perf_event *event,
|
|
|
+ struct perf_sample_data *data,
|
|
|
+ struct pt_regs *regs)
|
|
|
+{
|
|
|
+ if (perf_exclude_event(event, regs))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (!perf_tp_filter_match(event, data))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
|
|
|
+ struct pt_regs *regs, struct hlist_head *head)
|
|
|
{
|
|
|
- const int type = PERF_TYPE_TRACEPOINT;
|
|
|
struct perf_sample_data data;
|
|
|
+ struct perf_event *event;
|
|
|
+ struct hlist_node *node;
|
|
|
+
|
|
|
struct perf_raw_record raw = {
|
|
|
.size = entry_size,
|
|
|
.data = record,
|
|
@@ -4499,30 +4523,18 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
|
|
perf_sample_data_init(&data, addr);
|
|
|
data.raw = &raw;
|
|
|
|
|
|
- if (!event) {
|
|
|
- do_perf_sw_event(type, event_id, count, 1, &data, regs);
|
|
|
- return;
|
|
|
+ rcu_read_lock();
|
|
|
+ hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
|
|
|
+ if (perf_tp_event_match(event, &data, regs))
|
|
|
+ perf_swevent_add(event, count, 1, &data, regs);
|
|
|
}
|
|
|
-
|
|
|
- if (perf_swevent_match(event, type, event_id, &data, regs))
|
|
|
- perf_swevent_add(event, count, 1, &data, regs);
|
|
|
+ rcu_read_unlock();
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(perf_tp_event);
|
|
|
|
|
|
-static int perf_tp_event_match(struct perf_event *event,
|
|
|
- struct perf_sample_data *data)
|
|
|
-{
|
|
|
- void *record = data->raw->data;
|
|
|
-
|
|
|
- if (likely(!event->filter) || filter_match_preds(event->filter, record))
|
|
|
- return 1;
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static void tp_perf_event_destroy(struct perf_event *event)
|
|
|
{
|
|
|
- perf_trace_disable(event->attr.config);
|
|
|
- swevent_hlist_put(event);
|
|
|
+ perf_trace_destroy(event);
|
|
|
}
|
|
|
|
|
|
static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
|
@@ -4538,17 +4550,13 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
|
|
!capable(CAP_SYS_ADMIN))
|
|
|
return ERR_PTR(-EPERM);
|
|
|
|
|
|
- if (perf_trace_enable(event->attr.config, event))
|
|
|
+ err = perf_trace_init(event);
|
|
|
+ if (err)
|
|
|
return NULL;
|
|
|
|
|
|
event->destroy = tp_perf_event_destroy;
|
|
|
- err = swevent_hlist_get(event);
|
|
|
- if (err) {
|
|
|
- perf_trace_disable(event->attr.config);
|
|
|
- return ERR_PTR(err);
|
|
|
- }
|
|
|
|
|
|
- return &perf_ops_generic;
|
|
|
+ return &perf_ops_tracepoint;
|
|
|
}
|
|
|
|
|
|
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
|
|
@@ -4576,12 +4584,6 @@ static void perf_event_free_filter(struct perf_event *event)
|
|
|
|
|
|
#else
|
|
|
|
|
|
-static int perf_tp_event_match(struct perf_event *event,
|
|
|
- struct perf_sample_data *data)
|
|
|
-{
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
static const struct pmu *tp_perf_event_init(struct perf_event *event)
|
|
|
{
|
|
|
return NULL;
|