浏览代码

tracing/fastboot: use sched switch tracer from boot tracer

Impact: enhance boot trace output with scheduling events

Use the sched_switch tracer from the boot tracer.

We also can trace schedule events inside the initcalls.
Sched tracing is disabled after the initcall has finished and
then reenabled before the next one is started.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Frederic Weisbecker 16 年之前
父节点
当前提交
d7ad44b697
共有 4 个文件被更改,包括 12 次插入3 次删除
  1. 2 0
      kernel/trace/trace.c
  2. 1 0
      kernel/trace/trace.h
  3. 6 0
      kernel/trace/trace_boot.c
  4. 3 3
      kernel/trace/trace_sched_switch.c

+ 2 - 0
kernel/trace/trace.c

@@ -3251,6 +3251,8 @@ __init static int tracer_alloc_buffers(void)
 
 
 	register_tracer(&nop_trace);
 	register_tracer(&nop_trace);
 #ifdef CONFIG_BOOT_TRACER
 #ifdef CONFIG_BOOT_TRACER
+	/* We don't want to launch sched_switch tracer yet */
+	global_trace.ctrl = 0;
 	register_tracer(&boot_tracer);
 	register_tracer(&boot_tracer);
 	current_trace = &boot_tracer;
 	current_trace = &boot_tracer;
 	current_trace->init(&global_trace);
 	current_trace->init(&global_trace);

+ 1 - 0
kernel/trace/trace.h

@@ -49,6 +49,7 @@ struct ftrace_entry {
 	unsigned long		parent_ip;
 	unsigned long		parent_ip;
 };
 };
 extern struct tracer boot_tracer;
 extern struct tracer boot_tracer;
+extern struct tracer sched_switch_trace; /* Used by the boot tracer */
 
 
 /*
 /*
  * Context switch trace entry - which task (and prio) we switched from/to:
  * Context switch trace entry - which task (and prio) we switched from/to:

+ 6 - 0
kernel/trace/trace_boot.c

@@ -27,10 +27,14 @@ void start_boot_trace(void)
 
 
 void enable_boot_trace(void)
 void enable_boot_trace(void)
 {
 {
+	if (pre_initcalls_finished)
+		tracing_start_cmdline_record();
 }
 }
 
 
 void disable_boot_trace(void)
 void disable_boot_trace(void)
 {
 {
+	if (pre_initcalls_finished)
+		tracing_stop_cmdline_record();
 }
 }
 
 
 void reset_boot_trace(struct trace_array *tr)
 void reset_boot_trace(struct trace_array *tr)
@@ -45,6 +49,8 @@ static void boot_trace_init(struct trace_array *tr)
 
 
 	for_each_cpu_mask(cpu, cpu_possible_map)
 	for_each_cpu_mask(cpu, cpu_possible_map)
 		tracing_reset(tr, cpu);
 		tracing_reset(tr, cpu);
+
+	sched_switch_trace.init(tr);
 }
 }
 
 
 static void boot_trace_ctrl_update(struct trace_array *tr)
 static void boot_trace_ctrl_update(struct trace_array *tr)

+ 3 - 3
kernel/trace/trace_sched_switch.c

@@ -127,6 +127,7 @@ static void tracing_start_sched_switch(void)
 	long ref;
 	long ref;
 
 
 	mutex_lock(&tracepoint_mutex);
 	mutex_lock(&tracepoint_mutex);
+	tracer_enabled = 1;
 	ref = atomic_inc_return(&sched_ref);
 	ref = atomic_inc_return(&sched_ref);
 	if (ref == 1)
 	if (ref == 1)
 		tracing_sched_register();
 		tracing_sched_register();
@@ -138,6 +139,7 @@ static void tracing_stop_sched_switch(void)
 	long ref;
 	long ref;
 
 
 	mutex_lock(&tracepoint_mutex);
 	mutex_lock(&tracepoint_mutex);
+	tracer_enabled = 0;
 	ref = atomic_dec_and_test(&sched_ref);
 	ref = atomic_dec_and_test(&sched_ref);
 	if (ref)
 	if (ref)
 		tracing_sched_unregister();
 		tracing_sched_unregister();
@@ -158,12 +160,10 @@ static void start_sched_trace(struct trace_array *tr)
 {
 {
 	sched_switch_reset(tr);
 	sched_switch_reset(tr);
 	tracing_start_cmdline_record();
 	tracing_start_cmdline_record();
-	tracer_enabled = 1;
 }
 }
 
 
 static void stop_sched_trace(struct trace_array *tr)
 static void stop_sched_trace(struct trace_array *tr)
 {
 {
-	tracer_enabled = 0;
 	tracing_stop_cmdline_record();
 	tracing_stop_cmdline_record();
 }
 }
 
 
@@ -190,7 +190,7 @@ static void sched_switch_trace_ctrl_update(struct trace_array *tr)
 		stop_sched_trace(tr);
 		stop_sched_trace(tr);
 }
 }
 
 
-static struct tracer sched_switch_trace __read_mostly =
+struct tracer sched_switch_trace __read_mostly =
 {
 {
 	.name		= "sched_switch",
 	.name		= "sched_switch",
 	.init		= sched_switch_trace_init,
 	.init		= sched_switch_trace_init,