浏览代码

Merge branch 'tip/tracing/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into tracing/core

Ingo Molnar 15 年之前
父节点
当前提交
281b3714e9

+ 26 - 0
arch/x86/kernel/ftrace.c

@@ -30,14 +30,32 @@
 
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 
+/*
+ * modifying_code is set to notify NMIs that they need to use
+ * memory barriers when entering or exiting. But we don't want
+ * to burden NMIs with unnecessary memory barriers when code
+ * modification is not being done (which is most of the time).
+ *
+ * A mutex is already held when ftrace_arch_code_modify_prepare
+ * and post_process are called. No locks need to be taken here.
+ *
+ * Stop machine will make sure currently running NMIs are done
+ * and new NMIs will see the updated variable before we need
+ * to worry about NMIs doing memory barriers.
+ */
+static int modifying_code __read_mostly;
+static DEFINE_PER_CPU(int, save_modifying_code);
+
 int ftrace_arch_code_modify_prepare(void)
 int ftrace_arch_code_modify_prepare(void)
 {
 {
 	set_kernel_text_rw();
 	set_kernel_text_rw();
+	modifying_code = 1;
 	return 0;
 	return 0;
 }
 }
 
 
 int ftrace_arch_code_modify_post_process(void)
 int ftrace_arch_code_modify_post_process(void)
 {
 {
+	modifying_code = 0;
 	set_kernel_text_ro();
 	set_kernel_text_ro();
 	return 0;
 	return 0;
 }
 }
@@ -149,6 +167,11 @@ static void ftrace_mod_code(void)
 
 
 void ftrace_nmi_enter(void)
 void ftrace_nmi_enter(void)
 {
 {
+	__get_cpu_var(save_modifying_code) = modifying_code;
+
+	if (!__get_cpu_var(save_modifying_code))
+		return;
+
 	if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
 	if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
 		smp_rmb();
 		smp_rmb();
 		ftrace_mod_code();
 		ftrace_mod_code();
@@ -160,6 +183,9 @@ void ftrace_nmi_enter(void)
 
 
 void ftrace_nmi_exit(void)
 void ftrace_nmi_exit(void)
 {
 {
+	if (!__get_cpu_var(save_modifying_code))
+		return;
+
 	/* Finish all executions before clearing nmi_running */
 	/* Finish all executions before clearing nmi_running */
 	smp_mb();
 	smp_mb();
 	atomic_dec(&nmi_running);
 	atomic_dec(&nmi_running);

+ 4 - 2
include/linux/syscalls.h

@@ -132,7 +132,8 @@ struct perf_event_attr;
 
 
 #define SYSCALL_TRACE_ENTER_EVENT(sname)				\
 #define SYSCALL_TRACE_ENTER_EVENT(sname)				\
 	static const struct syscall_metadata __syscall_meta_##sname;	\
 	static const struct syscall_metadata __syscall_meta_##sname;	\
-	static struct ftrace_event_call event_enter_##sname;		\
+	static struct ftrace_event_call					\
+	__attribute__((__aligned__(4))) event_enter_##sname;		\
 	static struct trace_event enter_syscall_print_##sname = {	\
 	static struct trace_event enter_syscall_print_##sname = {	\
 		.trace                  = print_syscall_enter,		\
 		.trace                  = print_syscall_enter,		\
 	};								\
 	};								\
@@ -153,7 +154,8 @@ struct perf_event_attr;
 
 
 #define SYSCALL_TRACE_EXIT_EVENT(sname)					\
 #define SYSCALL_TRACE_EXIT_EVENT(sname)					\
 	static const struct syscall_metadata __syscall_meta_##sname;	\
 	static const struct syscall_metadata __syscall_meta_##sname;	\
-	static struct ftrace_event_call event_exit_##sname;		\
+	static struct ftrace_event_call					\
+	__attribute__((__aligned__(4))) event_exit_##sname;		\
 	static struct trace_event exit_syscall_print_##sname = {	\
 	static struct trace_event exit_syscall_print_##sname = {	\
 		.trace                  = print_syscall_exit,		\
 		.trace                  = print_syscall_exit,		\
 	};								\
 	};								\

+ 2 - 1
include/trace/ftrace.h

@@ -65,7 +65,8 @@
 	};
 	};
 #undef DEFINE_EVENT
 #undef DEFINE_EVENT
 #define DEFINE_EVENT(template, name, proto, args)	\
 #define DEFINE_EVENT(template, name, proto, args)	\
-	static struct ftrace_event_call event_##name
+	static struct ftrace_event_call			\
+	__attribute__((__aligned__(4))) event_##name
 
 
 #undef DEFINE_EVENT_PRINT
 #undef DEFINE_EVENT_PRINT
 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\
 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)	\

+ 0 - 9
kernel/trace/Kconfig

@@ -328,15 +328,6 @@ config BRANCH_TRACER
 
 
 	  Say N if unsure.
 	  Say N if unsure.
 
 
-config POWER_TRACER
-	bool "Trace power consumption behavior"
-	depends on X86
-	select GENERIC_TRACER
-	help
-	  This tracer helps developers to analyze and optimize the kernel's
-	  power management decisions, specifically the C-state and P-state
-	  behavior.
-
 config KSYM_TRACER
 config KSYM_TRACER
 	bool "Trace read and write access on kernel memory locations"
 	bool "Trace read and write access on kernel memory locations"
 	depends on HAVE_HW_BREAKPOINT
 	depends on HAVE_HW_BREAKPOINT

+ 2 - 1
kernel/trace/trace.h

@@ -792,7 +792,8 @@ extern const char *__stop___trace_bprintk_fmt[];
 
 
 #undef FTRACE_ENTRY
 #undef FTRACE_ENTRY
 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print)		\
 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print)		\
-	extern struct ftrace_event_call event_##call;
+	extern struct ftrace_event_call					\
+	__attribute__((__aligned__(4))) event_##call;
 #undef FTRACE_ENTRY_DUP
 #undef FTRACE_ENTRY_DUP
 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)		\
 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)		\
 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))

+ 1 - 3
kernel/trace/trace_events.c

@@ -60,10 +60,8 @@ int trace_define_field(struct ftrace_event_call *call, const char *type,
 	return 0;
 	return 0;
 
 
 err:
 err:
-	if (field) {
+	if (field)
 		kfree(field->name);
 		kfree(field->name);
-		kfree(field->type);
-	}
 	kfree(field);
 	kfree(field);
 
 
 	return -ENOMEM;
 	return -ENOMEM;

+ 0 - 1
kernel/trace/trace_functions_graph.c

@@ -855,7 +855,6 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
 	int i;
 	int i;
 
 
 	if (data) {
 	if (data) {
-		int cpu = iter->cpu;
 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
 		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
 
 
 		/*
 		/*

+ 2 - 2
kernel/trace/trace_kprobe.c

@@ -651,12 +651,12 @@ static int create_trace_probe(int argc, char **argv)
 			event = strchr(group, '/') + 1;
 			event = strchr(group, '/') + 1;
 			event[-1] = '\0';
 			event[-1] = '\0';
 			if (strlen(group) == 0) {
 			if (strlen(group) == 0) {
-				pr_info("Group name is not specifiled\n");
+				pr_info("Group name is not specified\n");
 				return -EINVAL;
 				return -EINVAL;
 			}
 			}
 		}
 		}
 		if (strlen(event) == 0) {
 		if (strlen(event) == 0) {
-			pr_info("Event name is not specifiled\n");
+			pr_info("Event name is not specified\n");
 			return -EINVAL;
 			return -EINVAL;
 		}
 		}
 	}
 	}

+ 1 - 1
kernel/trace/trace_syscalls.c

@@ -603,7 +603,7 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
 		ret = register_trace_sys_exit(prof_syscall_exit);
 		ret = register_trace_sys_exit(prof_syscall_exit);
 	if (ret) {
 	if (ret) {
 		pr_info("event trace: Could not activate"
 		pr_info("event trace: Could not activate"
-				"syscall entry trace point");
+				"syscall exit trace point");
 	} else {
 	} else {
 		set_bit(num, enabled_prof_exit_syscalls);
 		set_bit(num, enabled_prof_exit_syscalls);
 		sys_prof_refcount_exit++;
 		sys_prof_refcount_exit++;