Bläddra i källkod

Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing: Fix trace_print_seq()
  kprobes: No need to unlock kprobe_insn_mutex
  tracing/fastboot: Document the need of initcall_debug
  trace_export: Repair missed fields
  tracing: Fix stack tracer sysctl handling
Linus Torvalds 16 år sedan
förälder
incheckning
2a6f86bc5e

+ 1 - 5
kernel/kprobes.c

@@ -237,13 +237,9 @@ static int __kprobes collect_garbage_slots(void)
 {
 {
 	struct kprobe_insn_page *kip;
 	struct kprobe_insn_page *kip;
 	struct hlist_node *pos, *next;
 	struct hlist_node *pos, *next;
-	int safety;
 
 
 	/* Ensure no-one is preepmted on the garbages */
 	/* Ensure no-one is preepmted on the garbages */
-	mutex_unlock(&kprobe_insn_mutex);
-	safety = check_safety();
-	mutex_lock(&kprobe_insn_mutex);
-	if (safety != 0)
+	if (check_safety())
 		return -EAGAIN;
 		return -EAGAIN;
 
 
 	hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
 	hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {

+ 3 - 3
kernel/trace/Kconfig

@@ -226,13 +226,13 @@ config BOOT_TRACER
 	  the timings of the initcalls and traces key events and the identity
 	  the timings of the initcalls and traces key events and the identity
 	  of tasks that can cause boot delays, such as context-switches.
 	  of tasks that can cause boot delays, such as context-switches.
 
 
-	  Its aim is to be parsed by the /scripts/bootgraph.pl tool to
+	  Its aim is to be parsed by the scripts/bootgraph.pl tool to
 	  produce pretty graphics about boot inefficiencies, giving a visual
 	  produce pretty graphics about boot inefficiencies, giving a visual
 	  representation of the delays during initcalls - but the raw
 	  representation of the delays during initcalls - but the raw
 	  /debug/tracing/trace text output is readable too.
 	  /debug/tracing/trace text output is readable too.
 
 
-	  You must pass in ftrace=initcall to the kernel command line
-	  to enable this on bootup.
+	  You must pass in initcall_debug and ftrace=initcall to the kernel
+	  command line to enable this on bootup.
 
 
 config TRACE_BRANCH_PROFILING
 config TRACE_BRANCH_PROFILING
 	bool
 	bool

+ 2 - 2
kernel/trace/ftrace.c

@@ -3160,10 +3160,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 
 	ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
 	ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
 
 
-	if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
+	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
 		goto out;
 		goto out;
 
 
-	last_ftrace_enabled = ftrace_enabled;
+	last_ftrace_enabled = !!ftrace_enabled;
 
 
 	if (ftrace_enabled) {
 	if (ftrace_enabled) {
 
 

+ 3 - 0
kernel/trace/trace_event_types.h

@@ -26,6 +26,9 @@ TRACE_EVENT_FORMAT(funcgraph_exit, TRACE_GRAPH_RET,
 		   ftrace_graph_ret_entry, ignore,
 		   ftrace_graph_ret_entry, ignore,
 	TRACE_STRUCT(
 	TRACE_STRUCT(
 		TRACE_FIELD(unsigned long, ret.func, func)
 		TRACE_FIELD(unsigned long, ret.func, func)
+		TRACE_FIELD(unsigned long long, ret.calltime, calltime)
+		TRACE_FIELD(unsigned long long, ret.rettime, rettime)
+		TRACE_FIELD(unsigned long, ret.overrun, overrun)
 		TRACE_FIELD(int, ret.depth, depth)
 		TRACE_FIELD(int, ret.depth, depth)
 	),
 	),
 	TP_RAW_FMT("<-- %lx (%d)")
 	TP_RAW_FMT("<-- %lx (%d)")

+ 1 - 2
kernel/trace/trace_output.c

@@ -27,8 +27,7 @@ void trace_print_seq(struct seq_file *m, struct trace_seq *s)
 {
 {
 	int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
 	int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
 
 
-	s->buffer[len] = 0;
-	seq_puts(m, s->buffer);
+	seq_write(m, s->buffer, len);
 
 
 	trace_seq_init(s);
 	trace_seq_init(s);
 }
 }

+ 2 - 2
kernel/trace/trace_stack.c

@@ -326,10 +326,10 @@ stack_trace_sysctl(struct ctl_table *table, int write,
 	ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
 	ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
 
 
 	if (ret || !write ||
 	if (ret || !write ||
-	    (last_stack_tracer_enabled == stack_tracer_enabled))
+	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
 		goto out;
 		goto out;
 
 
-	last_stack_tracer_enabled = stack_tracer_enabled;
+	last_stack_tracer_enabled = !!stack_tracer_enabled;
 
 
 	if (stack_tracer_enabled)
 	if (stack_tracer_enabled)
 		register_ftrace_function(&trace_ops);
 		register_ftrace_function(&trace_ops);