|
@@ -21,13 +21,13 @@ static inline int trace_valid_entry(struct trace_entry *entry)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
|
|
|
+static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
|
|
|
{
|
|
|
struct ring_buffer_event *event;
|
|
|
struct trace_entry *entry;
|
|
|
unsigned int loops = 0;
|
|
|
|
|
|
- while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
|
|
|
+ while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
|
|
/*
|
|
@@ -58,7 +58,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
|
|
|
* Test the trace buffer to see if all the elements
|
|
|
* are still sane.
|
|
|
*/
|
|
|
-static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
|
|
|
+static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
|
|
|
{
|
|
|
unsigned long flags, cnt = 0;
|
|
|
int cpu, ret = 0;
|
|
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
|
|
|
local_irq_save(flags);
|
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
|
|
|
|
- cnt = ring_buffer_entries(tr->buffer);
|
|
|
+ cnt = ring_buffer_entries(buf->buffer);
|
|
|
|
|
|
/*
|
|
|
* The trace_test_buffer_cpu runs a while loop to consume all data.
|
|
@@ -78,7 +78,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
|
|
|
*/
|
|
|
tracing_off();
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
- ret = trace_test_buffer_cpu(tr, cpu);
|
|
|
+ ret = trace_test_buffer_cpu(buf, cpu);
|
|
|
if (ret)
|
|
|
break;
|
|
|
}
|
|
@@ -355,7 +355,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
|
|
|
msleep(100);
|
|
|
|
|
|
/* we should have nothing in the buffer */
|
|
|
- ret = trace_test_buffer(tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
@@ -376,7 +376,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
|
|
|
ftrace_enabled = 0;
|
|
|
|
|
|
/* check the trace buffer */
|
|
|
- ret = trace_test_buffer(tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
|
|
|
tracing_start();
|
|
|
|
|
|
/* we should only have one item */
|
|
@@ -666,7 +666,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
|
|
|
ftrace_enabled = 0;
|
|
|
|
|
|
/* check the trace buffer */
|
|
|
- ret = trace_test_buffer(tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
|
|
|
trace->reset(tr);
|
|
|
tracing_start();
|
|
|
|
|
@@ -737,7 +737,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
|
|
|
* Simulate the init() callback but we attach a watchdog callback
|
|
|
* to detect and recover from possible hangs
|
|
|
*/
|
|
|
- tracing_reset_online_cpus(tr);
|
|
|
+ tracing_reset_online_cpus(&tr->trace_buffer);
|
|
|
set_graph_array(tr);
|
|
|
ret = register_ftrace_graph(&trace_graph_return,
|
|
|
&trace_graph_entry_watchdog);
|
|
@@ -760,7 +760,7 @@ trace_selftest_startup_function_graph(struct tracer *trace,
|
|
|
tracing_stop();
|
|
|
|
|
|
/* check the trace buffer */
|
|
|
- ret = trace_test_buffer(tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
|
|
|
|
|
|
trace->reset(tr);
|
|
|
tracing_start();
|
|
@@ -815,9 +815,9 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
|
|
|
/* stop the tracing. */
|
|
|
tracing_stop();
|
|
|
/* check both trace buffers */
|
|
|
- ret = trace_test_buffer(tr, NULL);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
|
|
|
if (!ret)
|
|
|
- ret = trace_test_buffer(&max_tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->max_buffer, &count);
|
|
|
trace->reset(tr);
|
|
|
tracing_start();
|
|
|
|
|
@@ -877,9 +877,9 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
|
|
|
/* stop the tracing. */
|
|
|
tracing_stop();
|
|
|
/* check both trace buffers */
|
|
|
- ret = trace_test_buffer(tr, NULL);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
|
|
|
if (!ret)
|
|
|
- ret = trace_test_buffer(&max_tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->max_buffer, &count);
|
|
|
trace->reset(tr);
|
|
|
tracing_start();
|
|
|
|
|
@@ -943,11 +943,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
|
|
|
/* stop the tracing. */
|
|
|
tracing_stop();
|
|
|
/* check both trace buffers */
|
|
|
- ret = trace_test_buffer(tr, NULL);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
|
- ret = trace_test_buffer(&max_tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->max_buffer, &count);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
@@ -973,11 +973,11 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
|
|
|
/* stop the tracing. */
|
|
|
tracing_stop();
|
|
|
/* check both trace buffers */
|
|
|
- ret = trace_test_buffer(tr, NULL);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
|
|
|
if (ret)
|
|
|
goto out;
|
|
|
|
|
|
- ret = trace_test_buffer(&max_tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->max_buffer, &count);
|
|
|
|
|
|
if (!ret && !count) {
|
|
|
printk(KERN_CONT ".. no entries found ..");
|
|
@@ -1084,10 +1084,10 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
|
|
|
/* stop the tracing. */
|
|
|
tracing_stop();
|
|
|
/* check both trace buffers */
|
|
|
- ret = trace_test_buffer(tr, NULL);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, NULL);
|
|
|
printk("ret = %d\n", ret);
|
|
|
if (!ret)
|
|
|
- ret = trace_test_buffer(&max_tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->max_buffer, &count);
|
|
|
|
|
|
|
|
|
trace->reset(tr);
|
|
@@ -1126,7 +1126,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr
|
|
|
/* stop the tracing. */
|
|
|
tracing_stop();
|
|
|
/* check the trace buffer */
|
|
|
- ret = trace_test_buffer(tr, &count);
|
|
|
+ ret = trace_test_buffer(&tr->trace_buffer, &count);
|
|
|
trace->reset(tr);
|
|
|
tracing_start();
|
|
|
|