Эх сурвалжийг харах

ftrace: rename FTRACE to FUNCTION_TRACER

Due to confusion between the ftrace infrastructure and the gcc profiling
tracer "ftrace", this patch renames the config options from FTRACE to
FUNCTION_TRACER.  The other two names that are offspring from FTRACE
DYNAMIC_FTRACE and FTRACE_MCOUNT_RECORD will stay the same.

This patch was generated mostly by script, and partially by hand.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Steven Rostedt 16 жил өмнө
parent
commit
606576ce81

+ 1 - 1
Makefile

@@ -536,7 +536,7 @@ KBUILD_CFLAGS	+= -g
 KBUILD_AFLAGS	+= -gdwarf-2
 KBUILD_AFLAGS	+= -gdwarf-2
 endif
 endif
 
 
-ifdef CONFIG_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
 KBUILD_CFLAGS	+= -pg
 KBUILD_CFLAGS	+= -pg
 endif
 endif
 
 

+ 2 - 2
arch/arm/Kconfig

@@ -16,8 +16,8 @@ config ARM
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_KGDB
 	select HAVE_KPROBES if (!XIP_KERNEL)
 	select HAVE_KPROBES if (!XIP_KERNEL)
 	select HAVE_KRETPROBES if (HAVE_KPROBES)
 	select HAVE_KRETPROBES if (HAVE_KPROBES)
-	select HAVE_FTRACE if (!XIP_KERNEL)
-	select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE)
+	select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
+	select HAVE_DYNAMIC_FTRACE if (HAVE_FUNCTION_TRACER)
 	select HAVE_GENERIC_DMA_COHERENT
 	select HAVE_GENERIC_DMA_COHERENT
 	help
 	help
 	  The ARM series is a line of low-power-consumption RISC chip designs
 	  The ARM series is a line of low-power-consumption RISC chip designs

+ 1 - 1
arch/arm/boot/compressed/Makefile

@@ -70,7 +70,7 @@ SEDFLAGS	= s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
 targets       := vmlinux vmlinux.lds piggy.gz piggy.o font.o font.c \
 targets       := vmlinux vmlinux.lds piggy.gz piggy.o font.o font.c \
 		 head.o misc.o $(OBJS)
 		 head.o misc.o $(OBJS)
 
 
-ifeq ($(CONFIG_FTRACE),y)
+ifeq ($(CONFIG_FUNCTION_TRACER),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 endif

+ 1 - 1
arch/arm/include/asm/ftrace.h

@@ -1,7 +1,7 @@
 #ifndef _ASM_ARM_FTRACE
 #ifndef _ASM_ARM_FTRACE
 #define _ASM_ARM_FTRACE
 #define _ASM_ARM_FTRACE
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 #define MCOUNT_ADDR		((long)(mcount))
 #define MCOUNT_ADDR		((long)(mcount))
 #define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
 #define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
 
 

+ 1 - 1
arch/arm/kernel/armksyms.c

@@ -183,6 +183,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
 
 
 EXPORT_SYMBOL(copy_page);
 EXPORT_SYMBOL(copy_page);
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 EXPORT_SYMBOL(mcount);
 EXPORT_SYMBOL(mcount);
 #endif
 #endif

+ 2 - 2
arch/arm/kernel/entry-common.S

@@ -101,7 +101,7 @@ ENDPROC(ret_from_fork)
 #undef CALL
 #undef CALL
 #define CALL(x) .long x
 #define CALL(x) .long x
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(mcount)
 ENTRY(mcount)
 	stmdb sp!, {r0-r3, lr}
 	stmdb sp!, {r0-r3, lr}
@@ -149,7 +149,7 @@ trace:
 ftrace_stub:
 ftrace_stub:
 	mov pc, lr
 	mov pc, lr
 
 
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
 
 
 /*=============================================================================
 /*=============================================================================
  * SWI handler
  * SWI handler

+ 1 - 1
arch/powerpc/Kconfig

@@ -112,7 +112,7 @@ config PPC
 	bool
 	bool
 	default y
 	default y
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_DYNAMIC_FTRACE
-	select HAVE_FTRACE
+	select HAVE_FUNCTION_TRACER
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select ARCH_WANT_OPTIONAL_GPIOLIB
 	select HAVE_IDE
 	select HAVE_IDE
 	select HAVE_IOREMAP_PROT
 	select HAVE_IOREMAP_PROT

+ 1 - 1
arch/powerpc/Makefile

@@ -122,7 +122,7 @@ KBUILD_CFLAGS		+= -mcpu=powerpc
 endif
 endif
 
 
 # Work around a gcc code-gen bug with -fno-omit-frame-pointer.
 # Work around a gcc code-gen bug with -fno-omit-frame-pointer.
-ifeq ($(CONFIG_FTRACE),y)
+ifeq ($(CONFIG_FUNCTION_TRACER),y)
 KBUILD_CFLAGS		+= -mno-sched-epilog
 KBUILD_CFLAGS		+= -mno-sched-epilog
 endif
 endif
 
 

+ 1 - 1
arch/powerpc/include/asm/ftrace.h

@@ -1,7 +1,7 @@
 #ifndef _ASM_POWERPC_FTRACE
 #ifndef _ASM_POWERPC_FTRACE
 #define _ASM_POWERPC_FTRACE
 #define _ASM_POWERPC_FTRACE
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 #define MCOUNT_ADDR		((long)(_mcount))
 #define MCOUNT_ADDR		((long)(_mcount))
 #define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
 #define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
 
 

+ 1 - 1
arch/powerpc/kernel/Makefile

@@ -12,7 +12,7 @@ CFLAGS_prom_init.o      += -fPIC
 CFLAGS_btext.o		+= -fPIC
 CFLAGS_btext.o		+= -fPIC
 endif
 endif
 
 
-ifdef CONFIG_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 # Do not trace early boot code
 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
 CFLAGS_REMOVE_cputable.o = -pg -mno-sched-epilog
 CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog
 CFLAGS_REMOVE_prom_init.o = -pg -mno-sched-epilog

+ 1 - 1
arch/powerpc/kernel/entry_32.S

@@ -1158,7 +1158,7 @@ machine_check_in_rtas:
 
 
 #endif /* CONFIG_PPC_RTAS */
 #endif /* CONFIG_PPC_RTAS */
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 _GLOBAL(mcount)
 _GLOBAL(mcount)
 _GLOBAL(_mcount)
 _GLOBAL(_mcount)

+ 1 - 1
arch/powerpc/kernel/entry_64.S

@@ -884,7 +884,7 @@ _GLOBAL(enter_prom)
 	mtlr    r0
 	mtlr    r0
         blr
         blr
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 _GLOBAL(mcount)
 _GLOBAL(mcount)
 _GLOBAL(_mcount)
 _GLOBAL(_mcount)

+ 1 - 1
arch/powerpc/kernel/ppc_ksyms.c

@@ -68,7 +68,7 @@ EXPORT_SYMBOL(single_step_exception);
 EXPORT_SYMBOL(sys_sigreturn);
 EXPORT_SYMBOL(sys_sigreturn);
 #endif
 #endif
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 EXPORT_SYMBOL(_mcount);
 EXPORT_SYMBOL(_mcount);
 #endif
 #endif
 
 

+ 1 - 1
arch/powerpc/platforms/powermac/Makefile

@@ -1,6 +1,6 @@
 CFLAGS_bootx_init.o  		+= -fPIC
 CFLAGS_bootx_init.o  		+= -fPIC
 
 
-ifdef CONFIG_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
 # Do not trace early boot code
 # Do not trace early boot code
 CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
 CFLAGS_REMOVE_bootx_init.o = -pg -mno-sched-epilog
 endif
 endif

+ 1 - 1
arch/sparc64/Kconfig

@@ -12,7 +12,7 @@ config SPARC64
 	bool
 	bool
 	default y
 	default y
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_DYNAMIC_FTRACE
-	select HAVE_FTRACE
+	select HAVE_FUNCTION_TRACER
 	select HAVE_IDE
 	select HAVE_IDE
 	select HAVE_LMB
 	select HAVE_LMB
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_KGDB

+ 1 - 1
arch/sparc64/Kconfig.debug

@@ -33,7 +33,7 @@ config DEBUG_PAGEALLOC
 
 
 config MCOUNT
 config MCOUNT
 	bool
 	bool
-	depends on STACK_DEBUG || FTRACE
+	depends on STACK_DEBUG || FUNCTION_TRACER
 	default y
 	default y
 
 
 config FRAME_POINTER
 config FRAME_POINTER

+ 2 - 2
arch/sparc64/lib/mcount.S

@@ -93,7 +93,7 @@ mcount:
 	 nop
 	 nop
 1:
 1:
 #endif
 #endif
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 	mov		%o7, %o0
 	mov		%o7, %o0
 	.globl		mcount_call
 	.globl		mcount_call
@@ -119,7 +119,7 @@ mcount_call:
 	.size		_mcount,.-_mcount
 	.size		_mcount,.-_mcount
 	.size		mcount,.-mcount
 	.size		mcount,.-mcount
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 	.globl		ftrace_stub
 	.globl		ftrace_stub
 	.type		ftrace_stub,#function
 	.type		ftrace_stub,#function
 ftrace_stub:
 ftrace_stub:

+ 1 - 1
arch/x86/Kconfig

@@ -28,7 +28,7 @@ config X86
 	select HAVE_KRETPROBES
 	select HAVE_KRETPROBES
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_FTRACE_MCOUNT_RECORD
 	select HAVE_DYNAMIC_FTRACE
 	select HAVE_DYNAMIC_FTRACE
-	select HAVE_FTRACE
+	select HAVE_FUNCTION_TRACER
 	select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
 	select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
 	select HAVE_ARCH_KGDB if !X86_VOYAGER
 	select HAVE_ARCH_KGDB if !X86_VOYAGER
 	select HAVE_ARCH_TRACEHOOK
 	select HAVE_ARCH_TRACEHOOK

+ 1 - 1
arch/x86/kernel/Makefile

@@ -6,7 +6,7 @@ extra-y                := head_$(BITS).o head$(BITS).o head.o init_task.o vmlinu
 
 
 CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
 CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
 
 
-ifdef CONFIG_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_tsc.o = -pg
 CFLAGS_REMOVE_tsc.o = -pg
 CFLAGS_REMOVE_rtc.o = -pg
 CFLAGS_REMOVE_rtc.o = -pg

+ 2 - 2
arch/x86/kernel/entry_32.S

@@ -1149,7 +1149,7 @@ ENDPROC(xen_failsafe_callback)
 
 
 #endif	/* CONFIG_XEN */
 #endif	/* CONFIG_XEN */
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 
 ENTRY(mcount)
 ENTRY(mcount)
@@ -1204,7 +1204,7 @@ trace:
 	jmp ftrace_stub
 	jmp ftrace_stub
 END(mcount)
 END(mcount)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_DYNAMIC_FTRACE */
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
 
 
 .section .rodata,"a"
 .section .rodata,"a"
 #include "syscall_table_32.S"
 #include "syscall_table_32.S"

+ 2 - 2
arch/x86/kernel/entry_64.S

@@ -61,7 +61,7 @@
 
 
 	.code64
 	.code64
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 ENTRY(mcount)
 ENTRY(mcount)
 	retq
 	retq
@@ -138,7 +138,7 @@ trace:
 	jmp ftrace_stub
 	jmp ftrace_stub
 END(mcount)
 END(mcount)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_DYNAMIC_FTRACE */
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
 
 
 #ifndef CONFIG_PREEMPT
 #ifndef CONFIG_PREEMPT
 #define retint_kernel retint_restore_args
 #define retint_kernel retint_restore_args

+ 1 - 1
arch/x86/kernel/i386_ksyms_32.c

@@ -5,7 +5,7 @@
 #include <asm/desc.h>
 #include <asm/desc.h>
 #include <asm/ftrace.h>
 #include <asm/ftrace.h>
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 /* mcount is defined in assembly */
 /* mcount is defined in assembly */
 EXPORT_SYMBOL(mcount);
 EXPORT_SYMBOL(mcount);
 #endif
 #endif

+ 1 - 1
arch/x86/kernel/x8664_ksyms_64.c

@@ -12,7 +12,7 @@
 #include <asm/desc.h>
 #include <asm/desc.h>
 #include <asm/ftrace.h>
 #include <asm/ftrace.h>
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 /* mcount is defined in assembly */
 /* mcount is defined in assembly */
 EXPORT_SYMBOL(mcount);
 EXPORT_SYMBOL(mcount);
 #endif
 #endif

+ 1 - 1
arch/x86/xen/Makefile

@@ -1,4 +1,4 @@
-ifdef CONFIG_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
 # Do not profile debug and lowlevel utilities
 CFLAGS_REMOVE_spinlock.o = -pg
 CFLAGS_REMOVE_spinlock.o = -pg
 CFLAGS_REMOVE_time.o = -pg
 CFLAGS_REMOVE_time.o = -pg

+ 2 - 2
include/asm-x86/ftrace.h

@@ -1,7 +1,7 @@
 #ifndef ASM_X86__FTRACE_H
 #ifndef ASM_X86__FTRACE_H
 #define ASM_X86__FTRACE_H
 #define ASM_X86__FTRACE_H
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 #define MCOUNT_ADDR		((long)(mcount))
 #define MCOUNT_ADDR		((long)(mcount))
 #define MCOUNT_INSN_SIZE	5 /* sizeof mcount call */
 #define MCOUNT_INSN_SIZE	5 /* sizeof mcount call */
 
 
@@ -19,6 +19,6 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
 }
 }
 #endif
 #endif
 
 
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
 
 
 #endif /* ASM_X86__FTRACE_H */
 #endif /* ASM_X86__FTRACE_H */

+ 6 - 6
include/linux/ftrace.h

@@ -8,7 +8,7 @@
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/kallsyms.h>
 #include <linux/kallsyms.h>
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 
 
 extern int ftrace_enabled;
 extern int ftrace_enabled;
 extern int
 extern int
@@ -36,12 +36,12 @@ void clear_ftrace_function(void);
 
 
 extern void ftrace_stub(unsigned long a0, unsigned long a1);
 extern void ftrace_stub(unsigned long a0, unsigned long a1);
 
 
-#else /* !CONFIG_FTRACE */
+#else /* !CONFIG_FUNCTION_TRACER */
 # define register_ftrace_function(ops) do { } while (0)
 # define register_ftrace_function(ops) do { } while (0)
 # define unregister_ftrace_function(ops) do { } while (0)
 # define unregister_ftrace_function(ops) do { } while (0)
 # define clear_ftrace_function(ops) do { } while (0)
 # define clear_ftrace_function(ops) do { } while (0)
 static inline void ftrace_kill_atomic(void) { }
 static inline void ftrace_kill_atomic(void) { }
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
 
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 # define FTRACE_HASHBITS	10
 # define FTRACE_HASHBITS	10
@@ -101,7 +101,7 @@ void ftrace_kill_atomic(void);
 
 
 static inline void tracer_disable(void)
 static inline void tracer_disable(void)
 {
 {
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 	ftrace_enabled = 0;
 	ftrace_enabled = 0;
 #endif
 #endif
 }
 }
@@ -113,7 +113,7 @@ static inline void tracer_disable(void)
  */
  */
 static inline int __ftrace_enabled_save(void)
 static inline int __ftrace_enabled_save(void)
 {
 {
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 	int saved_ftrace_enabled = ftrace_enabled;
 	int saved_ftrace_enabled = ftrace_enabled;
 	ftrace_enabled = 0;
 	ftrace_enabled = 0;
 	return saved_ftrace_enabled;
 	return saved_ftrace_enabled;
@@ -124,7 +124,7 @@ static inline int __ftrace_enabled_save(void)
 
 
 static inline void __ftrace_enabled_restore(int enabled)
 static inline void __ftrace_enabled_restore(int enabled)
 {
 {
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 	ftrace_enabled = enabled;
 	ftrace_enabled = enabled;
 #endif
 #endif
 }
 }

+ 2 - 2
kernel/Makefile

@@ -13,7 +13,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o \
 
 
 CFLAGS_REMOVE_sched.o = -mno-spe
 CFLAGS_REMOVE_sched.o = -mno-spe
 
 
-ifdef CONFIG_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
 # Do not trace debug files and internal ftrace files
 # Do not trace debug files and internal ftrace files
 CFLAGS_REMOVE_lockdep.o = -pg
 CFLAGS_REMOVE_lockdep.o = -pg
 CFLAGS_REMOVE_lockdep_proc.o = -pg
 CFLAGS_REMOVE_lockdep_proc.o = -pg
@@ -86,7 +86,7 @@ obj-$(CONFIG_MARKERS) += marker.o
 obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
 obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
 obj-$(CONFIG_LATENCYTOP) += latencytop.o
 obj-$(CONFIG_LATENCYTOP) += latencytop.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_FTRACE) += trace/
+obj-$(CONFIG_FUNCTION_TRACER) += trace/
 obj-$(CONFIG_TRACING) += trace/
 obj-$(CONFIG_TRACING) += trace/
 obj-$(CONFIG_SMP) += sched_cpupri.o
 obj-$(CONFIG_SMP) += sched_cpupri.o
 
 

+ 1 - 1
kernel/sysctl.c

@@ -464,7 +464,7 @@ static struct ctl_table kern_table[] = {
 		.mode		= 0644,
 		.mode		= 0644,
 		.proc_handler	= &proc_dointvec,
 		.proc_handler	= &proc_dointvec,
 	},
 	},
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 	{
 	{
 		.ctl_name	= CTL_UNNUMBERED,
 		.ctl_name	= CTL_UNNUMBERED,
 		.procname	= "ftrace_enabled",
 		.procname	= "ftrace_enabled",

+ 9 - 8
kernel/trace/Kconfig

@@ -1,11 +1,12 @@
 #
 #
-# Architectures that offer an FTRACE implementation should select HAVE_FTRACE:
+# Architectures that offer an FUNCTION_TRACER implementation should
+#  select HAVE_FUNCTION_TRACER:
 #
 #
 
 
 config NOP_TRACER
 config NOP_TRACER
 	bool
 	bool
 
 
-config HAVE_FTRACE
+config HAVE_FUNCTION_TRACER
 	bool
 	bool
 	select NOP_TRACER
 	select NOP_TRACER
 
 
@@ -28,9 +29,9 @@ config TRACING
 	select STACKTRACE
 	select STACKTRACE
 	select TRACEPOINTS
 	select TRACEPOINTS
 
 
-config FTRACE
+config FUNCTION_TRACER
 	bool "Kernel Function Tracer"
 	bool "Kernel Function Tracer"
-	depends on HAVE_FTRACE
+	depends on HAVE_FUNCTION_TRACER
 	depends on DEBUG_KERNEL
 	depends on DEBUG_KERNEL
 	select FRAME_POINTER
 	select FRAME_POINTER
 	select TRACING
 	select TRACING
@@ -136,9 +137,9 @@ config BOOT_TRACER
 
 
 config STACK_TRACER
 config STACK_TRACER
 	bool "Trace max stack"
 	bool "Trace max stack"
-	depends on HAVE_FTRACE
+	depends on HAVE_FUNCTION_TRACER
 	depends on DEBUG_KERNEL
 	depends on DEBUG_KERNEL
-	select FTRACE
+	select FUNCTION_TRACER
 	select STACKTRACE
 	select STACKTRACE
 	help
 	help
 	  This special tracer records the maximum stack footprint of the
 	  This special tracer records the maximum stack footprint of the
@@ -155,7 +156,7 @@ config STACK_TRACER
 
 
 config DYNAMIC_FTRACE
 config DYNAMIC_FTRACE
 	bool "enable/disable ftrace tracepoints dynamically"
 	bool "enable/disable ftrace tracepoints dynamically"
-	depends on FTRACE
+	depends on FUNCTION_TRACER
 	depends on HAVE_DYNAMIC_FTRACE
 	depends on HAVE_DYNAMIC_FTRACE
 	depends on DEBUG_KERNEL
 	depends on DEBUG_KERNEL
 	default y
 	default y
@@ -165,7 +166,7 @@ config DYNAMIC_FTRACE
 	 with a No-Op instruction) as they are called. A table is
 	 with a No-Op instruction) as they are called. A table is
 	 created to dynamically enable them again.
 	 created to dynamically enable them again.
 
 
-	 This way a CONFIG_FTRACE kernel is slightly larger, but otherwise
+	 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
 	 has native performance as long as no tracing is active.
 	 has native performance as long as no tracing is active.
 
 
 	 The changes to the code are done by a kernel thread that
 	 The changes to the code are done by a kernel thread that

+ 3 - 3
kernel/trace/Makefile

@@ -1,7 +1,7 @@
 
 
 # Do not instrument the tracer itself:
 # Do not instrument the tracer itself:
 
 
-ifdef CONFIG_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
 
 
@@ -10,13 +10,13 @@ CFLAGS_trace_selftest_dynamic.o = -pg
 obj-y += trace_selftest_dynamic.o
 obj-y += trace_selftest_dynamic.o
 endif
 endif
 
 
-obj-$(CONFIG_FTRACE) += libftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
 obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
 obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
 
 
 obj-$(CONFIG_TRACING) += trace.o
 obj-$(CONFIG_TRACING) += trace.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
 obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
-obj-$(CONFIG_FTRACE) += trace_functions.o
+obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o

+ 1 - 1
kernel/trace/trace.c

@@ -851,7 +851,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
 	preempt_enable_notrace();
 	preempt_enable_notrace();
 }
 }
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 static void
 static void
 function_trace_call(unsigned long ip, unsigned long parent_ip)
 function_trace_call(unsigned long ip, unsigned long parent_ip)
 {
 {

+ 1 - 1
kernel/trace/trace.h

@@ -335,7 +335,7 @@ void update_max_tr_single(struct trace_array *tr,
 
 
 extern cycle_t ftrace_now(int cpu);
 extern cycle_t ftrace_now(int cpu);
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 void tracing_start_function_trace(void);
 void tracing_start_function_trace(void);
 void tracing_stop_function_trace(void);
 void tracing_stop_function_trace(void);
 #else
 #else

+ 2 - 2
kernel/trace/trace_irqsoff.c

@@ -63,7 +63,7 @@ irq_trace(void)
  */
  */
 static __cacheline_aligned_in_smp	unsigned long max_sequence;
 static __cacheline_aligned_in_smp	unsigned long max_sequence;
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 /*
 /*
  * irqsoff uses its own tracer function to keep the overhead down:
  * irqsoff uses its own tracer function to keep the overhead down:
  */
  */
@@ -104,7 +104,7 @@ static struct ftrace_ops trace_ops __read_mostly =
 {
 {
 	.func = irqsoff_tracer_call,
 	.func = irqsoff_tracer_call,
 };
 };
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
 
 
 /*
 /*
  * Should this new latency be reported/recorded?
  * Should this new latency be reported/recorded?

+ 2 - 2
kernel/trace/trace_sched_wakeup.c

@@ -31,7 +31,7 @@ static raw_spinlock_t wakeup_lock =
 
 
 static void __wakeup_reset(struct trace_array *tr);
 static void __wakeup_reset(struct trace_array *tr);
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 /*
 /*
  * irqsoff uses its own tracer function to keep the overhead down:
  * irqsoff uses its own tracer function to keep the overhead down:
  */
  */
@@ -96,7 +96,7 @@ static struct ftrace_ops trace_ops __read_mostly =
 {
 {
 	.func = wakeup_tracer_call,
 	.func = wakeup_tracer_call,
 };
 };
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
 
 
 /*
 /*
  * Should this new latency be reported/recorded?
  * Should this new latency be reported/recorded?

+ 2 - 2
kernel/trace/trace_selftest.c

@@ -70,7 +70,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
 	return ret;
 	return ret;
 }
 }
 
 
-#ifdef CONFIG_FTRACE
+#ifdef CONFIG_FUNCTION_TRACER
 
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 
@@ -226,7 +226,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
 
 
 	return ret;
 	return ret;
 }
 }
-#endif /* CONFIG_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
 
 
 #ifdef CONFIG_IRQSOFF_TRACER
 #ifdef CONFIG_IRQSOFF_TRACER
 int
 int

+ 1 - 1
lib/Makefile

@@ -2,7 +2,7 @@
 # Makefile for some libs needed in the kernel.
 # Makefile for some libs needed in the kernel.
 #
 #
 
 
-ifdef CONFIG_FTRACE
+ifdef CONFIG_FUNCTION_TRACER
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
 KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
 endif
 endif