|
@@ -44,6 +44,14 @@
|
|
|
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
|
|
|
unsigned long __read_mostly tracing_thresh;
|
|
|
|
|
|
+/* We need to change this state when a selftest is running.
|
|
|
+ * A selftest will lurk into the ring-buffer to count the
|
|
|
+ * entries inserted during the selftest although some concurrent
|
|
|
+ * insertions into the ring-buffer such as ftrace_printk could occurred
|
|
|
+ * at the same time, giving false positive or negative results.
|
|
|
+ */
|
|
|
+static atomic_t tracing_selftest_running = ATOMIC_INIT(0);
|
|
|
+
|
|
|
/* For tracers that don't implement custom flags */
|
|
|
static struct tracer_opt dummy_tracer_opt[] = {
|
|
|
{ }
|
|
@@ -589,6 +597,8 @@ int register_tracer(struct tracer *type)
|
|
|
struct tracer *saved_tracer = current_trace;
|
|
|
struct trace_array *tr = &global_trace;
|
|
|
int i;
|
|
|
+
|
|
|
+ atomic_set(&tracing_selftest_running, 1);
|
|
|
/*
|
|
|
* Run a selftest on this tracer.
|
|
|
* Here we reset the trace buffer, and set the current
|
|
@@ -603,6 +613,7 @@ int register_tracer(struct tracer *type)
|
|
|
/* the test is responsible for initializing and enabling */
|
|
|
pr_info("Testing tracer %s: ", type->name);
|
|
|
ret = type->selftest(type, tr);
|
|
|
+ atomic_set(&tracing_selftest_running, 0);
|
|
|
/* the test is responsible for resetting too */
|
|
|
current_trace = saved_tracer;
|
|
|
if (ret) {
|
|
@@ -3594,7 +3605,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
|
|
|
unsigned long flags, irq_flags;
|
|
|
int cpu, len = 0, size, pc;
|
|
|
|
|
|
- if (tracing_disabled)
|
|
|
+ if (tracing_disabled || atomic_read(&tracing_selftest_running))
|
|
|
return 0;
|
|
|
|
|
|
pc = preempt_count();
|