Browse Source

trace: rename unlikely profiler to branch profiler

Impact: name change of unlikely tracer and profiler

Ingo Molnar suggested changing the config from UNLIKELY_PROFILE
to BRANCH_PROFILING. I never did like the "unlikely" name so I
went one step farther, and renamed all the unlikely configurations
to a "BRANCH" variant.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Steven Rostedt 16 years ago
parent
commit
2ed84eeb88

+ 1 - 1
arch/x86/kernel/vsyscall_64.c

@@ -18,7 +18,7 @@
  */
  */
 
 
 /* Disable profiling for userspace code: */
 /* Disable profiling for userspace code: */
-#define DISABLE_UNLIKELY_PROFILE
+#define DISABLE_BRANCH_PROFILING
 
 
 #include <linux/time.h>
 #include <linux/time.h>
 #include <linux/init.h>
 #include <linux/init.h>

+ 1 - 1
arch/x86/vdso/vclock_gettime.c

@@ -10,7 +10,7 @@
  */
  */
 
 
 /* Disable profiling for userspace code: */
 /* Disable profiling for userspace code: */
-#define DISABLE_UNLIKELY_PROFILE
+#define DISABLE_BRANCH_PROFILING
 
 
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/posix-timers.h>
 #include <linux/posix-timers.h>

+ 1 - 1
include/asm-generic/vmlinux.lds.h

@@ -45,7 +45,7 @@
 #define MCOUNT_REC()
 #define MCOUNT_REC()
 #endif
 #endif
 
 
-#ifdef CONFIG_TRACE_UNLIKELY_PROFILE
+#ifdef CONFIG_TRACE_BRANCH_PROFILING
 #define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_likely_profile) = .;   \
 #define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_likely_profile) = .;   \
 				*(_ftrace_likely)			      \
 				*(_ftrace_likely)			      \
 				VMLINUX_SYMBOL(__stop_likely_profile) = .;    \
 				VMLINUX_SYMBOL(__stop_likely_profile) = .;    \

+ 10 - 9
include/linux/compiler.h

@@ -59,26 +59,27 @@ extern void __chk_io_ptr(const volatile void __iomem *);
  * specific implementations come from the above header files
  * specific implementations come from the above header files
  */
  */
 
 
-/*
- * Note: DISABLE_UNLIKELY_PROFILE can be used by special lowlevel code
- * to disable branch tracing on a per file basis.
- */
-#if defined(CONFIG_TRACE_UNLIKELY_PROFILE) && !defined(DISABLE_UNLIKELY_PROFILE)
-struct ftrace_likely_data {
+struct ftrace_branch_data {
 	const char *func;
 	const char *func;
 	const char *file;
 	const char *file;
 	unsigned line;
 	unsigned line;
 	unsigned long correct;
 	unsigned long correct;
 	unsigned long incorrect;
 	unsigned long incorrect;
 };
 };
-void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect);
+
+/*
+ * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
+ * to disable branch tracing on a per file basis.
+ */
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
+void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
 
 
 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
 #define likely_notrace(x)	__builtin_expect(!!(x), 1)
 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
 #define unlikely_notrace(x)	__builtin_expect(!!(x), 0)
 
 
 #define likely_check(x) ({						\
 #define likely_check(x) ({						\
 			int ______r;					\
 			int ______r;					\
-			static struct ftrace_likely_data		\
+			static struct ftrace_branch_data		\
 				__attribute__((__aligned__(4)))		\
 				__attribute__((__aligned__(4)))		\
 				__attribute__((section("_ftrace_likely"))) \
 				__attribute__((section("_ftrace_likely"))) \
 				______f = {				\
 				______f = {				\
@@ -93,7 +94,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect);
 		})
 		})
 #define unlikely_check(x) ({						\
 #define unlikely_check(x) ({						\
 			int ______r;					\
 			int ______r;					\
-			static struct ftrace_likely_data		\
+			static struct ftrace_branch_data		\
 				__attribute__((__aligned__(4)))		\
 				__attribute__((__aligned__(4)))		\
 				__attribute__((section("_ftrace_unlikely"))) \
 				__attribute__((section("_ftrace_unlikely"))) \
 				______f = {				\
 				______f = {				\

+ 5 - 5
kernel/trace/Kconfig

@@ -159,7 +159,7 @@ config BOOT_TRACER
 	    selected, because the self-tests are an initcall as well and that
 	    selected, because the self-tests are an initcall as well and that
 	    would invalidate the boot trace. )
 	    would invalidate the boot trace. )
 
 
-config TRACE_UNLIKELY_PROFILE
+config TRACE_BRANCH_PROFILING
 	bool "Trace likely/unlikely profiler"
 	bool "Trace likely/unlikely profiler"
 	depends on DEBUG_KERNEL
 	depends on DEBUG_KERNEL
 	select TRACING
 	select TRACING
@@ -175,7 +175,7 @@ config TRACE_UNLIKELY_PROFILE
 
 
 	  Say N if unsure.
 	  Say N if unsure.
 
 
-config TRACING_UNLIKELY
+config TRACING_BRANCHES
 	bool
 	bool
 	help
 	help
 	  Selected by tracers that will trace the likely and unlikely
 	  Selected by tracers that will trace the likely and unlikely
@@ -183,10 +183,10 @@ config TRACING_UNLIKELY
 	  profiled. Profiling the tracing infrastructure can only happen
 	  profiled. Profiling the tracing infrastructure can only happen
 	  when the likelys and unlikelys are not being traced.
 	  when the likelys and unlikelys are not being traced.
 
 
-config UNLIKELY_TRACER
+config BRANCH_TRACER
 	bool "Trace likely/unlikely instances"
 	bool "Trace likely/unlikely instances"
-	depends on TRACE_UNLIKELY_PROFILE
-	select TRACING_UNLIKELY
+	depends on TRACE_BRANCH_PROFILING
+	select TRACING_BRANCHES
 	help
 	help
 	  This traces the events of likely and unlikely condition
 	  This traces the events of likely and unlikely condition
 	  calls in the kernel.  The difference between this and the
 	  calls in the kernel.  The difference between this and the

+ 3 - 4
kernel/trace/Makefile

@@ -11,9 +11,8 @@ obj-y += trace_selftest_dynamic.o
 endif
 endif
 
 
 # If unlikely tracing is enabled, do not trace these files
 # If unlikely tracing is enabled, do not trace these files
-ifdef CONFIG_TRACING_UNLIKELY
-KBUILD_CFLAGS += '-Dlikely(x)=likely_notrace(x)'
-KBUILD_CFLAGS += '-Dunlikely(x)=unlikely_notrace(x)'
+ifdef CONFIG_TRACING_BRANCHES
+KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
 endif
 endif
 
 
 obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
 obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
@@ -31,6 +30,6 @@ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
 obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
 obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
 obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
 obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
 obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
-obj-$(CONFIG_TRACE_UNLIKELY_PROFILE) += trace_unlikely.o
+obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_unlikely.o
 
 
 libftrace-y := ftrace.o
 libftrace-y := ftrace.o

+ 1 - 1
kernel/trace/trace.c

@@ -258,7 +258,7 @@ static const char *trace_options[] = {
 	"sched-tree",
 	"sched-tree",
 	"ftrace_printk",
 	"ftrace_printk",
 	"ftrace_preempt",
 	"ftrace_preempt",
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
 	"unlikely",
 	"unlikely",
 #endif
 #endif
 	NULL
 	NULL

+ 3 - 3
kernel/trace/trace.h

@@ -468,7 +468,7 @@ enum trace_iterator_flags {
 	TRACE_ITER_SCHED_TREE		= 0x200,
 	TRACE_ITER_SCHED_TREE		= 0x200,
 	TRACE_ITER_PRINTK		= 0x400,
 	TRACE_ITER_PRINTK		= 0x400,
 	TRACE_ITER_PREEMPTONLY		= 0x800,
 	TRACE_ITER_PREEMPTONLY		= 0x800,
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
 	TRACE_ITER_UNLIKELY		= 0x1000,
 	TRACE_ITER_UNLIKELY		= 0x1000,
 #endif
 #endif
 };
 };
@@ -530,7 +530,7 @@ static inline void ftrace_preempt_enable(int resched)
 		preempt_enable_notrace();
 		preempt_enable_notrace();
 }
 }
 
 
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
 extern int enable_unlikely_tracing(struct trace_array *tr);
 extern int enable_unlikely_tracing(struct trace_array *tr);
 extern void disable_unlikely_tracing(void);
 extern void disable_unlikely_tracing(void);
 static inline int trace_unlikely_enable(struct trace_array *tr)
 static inline int trace_unlikely_enable(struct trace_array *tr)
@@ -552,6 +552,6 @@ static inline int trace_unlikely_enable(struct trace_array *tr)
 static inline void trace_unlikely_disable(void)
 static inline void trace_unlikely_disable(void)
 {
 {
 }
 }
-#endif /* CONFIG_UNLIKELY_TRACER */
+#endif /* CONFIG_BRANCH_TRACER */
 
 
 #endif /* _LINUX_KERNEL_TRACE_H */
 #endif /* _LINUX_KERNEL_TRACE_H */

+ 2 - 2
kernel/trace/trace_unlikely.c

@@ -15,7 +15,7 @@
 #include <asm/local.h>
 #include <asm/local.h>
 #include "trace.h"
 #include "trace.h"
 
 
-#ifdef CONFIG_UNLIKELY_TRACER
+#ifdef CONFIG_BRANCH_TRACER
 
 
 static int unlikely_tracing_enabled __read_mostly;
 static int unlikely_tracing_enabled __read_mostly;
 static DEFINE_MUTEX(unlikely_tracing_mutex);
 static DEFINE_MUTEX(unlikely_tracing_mutex);
@@ -119,7 +119,7 @@ static inline
 void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
 {
 {
 }
 }
-#endif /* CONFIG_UNLIKELY_TRACER */
+#endif /* CONFIG_BRANCH_TRACER */
 
 
 void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect)
 void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect)
 {
 {