|
@@ -3869,45 +3869,50 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Must be called with preemption disabled
|
|
|
- */
|
|
|
-int perf_swevent_get_recursion_context(int **recursion)
|
|
|
+int perf_swevent_get_recursion_context(void)
|
|
|
{
|
|
|
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
+ struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
|
|
|
+ int rctx;
|
|
|
|
|
|
if (in_nmi())
|
|
|
- *recursion = &cpuctx->recursion[3];
|
|
|
+ rctx = 3;
|
|
|
else if (in_irq())
|
|
|
- *recursion = &cpuctx->recursion[2];
|
|
|
+ rctx = 2;
|
|
|
else if (in_softirq())
|
|
|
- *recursion = &cpuctx->recursion[1];
|
|
|
+ rctx = 1;
|
|
|
else
|
|
|
- *recursion = &cpuctx->recursion[0];
|
|
|
+ rctx = 0;
|
|
|
|
|
|
- if (**recursion)
|
|
|
+ if (cpuctx->recursion[rctx]) {
|
|
|
+ put_cpu_var(perf_cpu_context);
|
|
|
return -1;
|
|
|
+ }
|
|
|
|
|
|
- (**recursion)++;
|
|
|
+ cpuctx->recursion[rctx]++;
|
|
|
+ barrier();
|
|
|
|
|
|
- return 0;
|
|
|
+ return rctx;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
|
|
|
|
|
|
-void perf_swevent_put_recursion_context(int *recursion)
|
|
|
+void perf_swevent_put_recursion_context(int rctx)
|
|
|
{
|
|
|
- (*recursion)--;
|
|
|
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
+ barrier();
|
|
|
+ cpuctx->recursion[rctx]++;
|
|
|
+ put_cpu_var(perf_cpu_context);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context);
|
|
|
|
|
|
-static void __do_perf_sw_event(enum perf_type_id type, u32 event_id,
|
|
|
- u64 nr, int nmi,
|
|
|
- struct perf_sample_data *data,
|
|
|
- struct pt_regs *regs)
|
|
|
+static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
|
|
|
+ u64 nr, int nmi,
|
|
|
+ struct perf_sample_data *data,
|
|
|
+ struct pt_regs *regs)
|
|
|
{
|
|
|
+ struct perf_cpu_context *cpuctx;
|
|
|
struct perf_event_context *ctx;
|
|
|
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
|
|
|
+ cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
rcu_read_lock();
|
|
|
perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
|
|
|
nr, nmi, data, regs);
|
|
@@ -3921,34 +3926,22 @@ static void __do_perf_sw_event(enum perf_type_id type, u32 event_id,
|
|
|
rcu_read_unlock();
|
|
|
}
|
|
|
|
|
|
-static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
|
|
|
- u64 nr, int nmi,
|
|
|
- struct perf_sample_data *data,
|
|
|
- struct pt_regs *regs)
|
|
|
-{
|
|
|
- int *recursion;
|
|
|
-
|
|
|
- preempt_disable();
|
|
|
-
|
|
|
- if (perf_swevent_get_recursion_context(&recursion))
|
|
|
- goto out;
|
|
|
-
|
|
|
- __do_perf_sw_event(type, event_id, nr, nmi, data, regs);
|
|
|
-
|
|
|
- perf_swevent_put_recursion_context(recursion);
|
|
|
-out:
|
|
|
- preempt_enable();
|
|
|
-}
|
|
|
-
|
|
|
void __perf_sw_event(u32 event_id, u64 nr, int nmi,
|
|
|
struct pt_regs *regs, u64 addr)
|
|
|
{
|
|
|
struct perf_sample_data data;
|
|
|
+ int rctx;
|
|
|
+
|
|
|
+ rctx = perf_swevent_get_recursion_context();
|
|
|
+ if (rctx < 0)
|
|
|
+ return;
|
|
|
|
|
|
data.addr = addr;
|
|
|
data.raw = NULL;
|
|
|
|
|
|
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
|
|
|
+
|
|
|
+ perf_swevent_put_recursion_context(rctx);
|
|
|
}
|
|
|
|
|
|
static void perf_swevent_read(struct perf_event *event)
|
|
@@ -4172,7 +4165,7 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
|
|
|
regs = task_pt_regs(current);
|
|
|
|
|
|
/* Trace events already protected against recursion */
|
|
|
- __do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
|
|
|
+ do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
|
|
|
&data, regs);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(perf_tp_event);
|