|
@@ -146,7 +146,7 @@ int __init ftrace_dyn_arch_init(void *data)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
-#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
+#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
|
@@ -166,9 +166,10 @@ int ftrace_disable_ftrace_graph_caller(void)
|
|
|
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
|
|
|
}
|
|
|
|
|
|
-#endif /* !CONFIG_DYNAMIC_FTRACE */
|
|
|
+#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
|
|
#ifndef KBUILD_MCOUNT_RA_ADDRESS
|
|
|
+
|
|
|
#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
|
|
|
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
|
|
|
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
|
|
@@ -182,17 +183,17 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
|
|
|
unsigned int code;
|
|
|
int faulted;
|
|
|
|
|
|
- /* in module or kernel? */
|
|
|
- if (self_addr & 0x40000000) {
|
|
|
- /* module: move to the instruction "lui v1, HI_16BIT_OF_MCOUNT" */
|
|
|
- ip = self_addr - 20;
|
|
|
- } else {
|
|
|
- /* kernel: move to the instruction "move ra, at" */
|
|
|
- ip = self_addr - 12;
|
|
|
- }
|
|
|
+ /*
|
|
|
+ * For module, move the ip from calling site of mcount to the
|
|
|
+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for
|
|
|
+ * kernel, move to the instruction "move ra, at"(offset is 12)
|
|
|
+ */
|
|
|
+ ip = self_addr - ((self_addr & 0x40000000) ? 20 : 12);
|
|
|
|
|
|
- /* search the text until finding the non-store instruction or "s{d,w}
|
|
|
- * ra, offset(sp)" instruction */
|
|
|
+ /*
|
|
|
+ * search the text until finding the non-store instruction or "s{d,w}
|
|
|
+ * ra, offset(sp)" instruction
|
|
|
+ */
|
|
|
do {
|
|
|
ip -= 4;
|
|
|
|
|
@@ -201,10 +202,11 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
|
|
|
|
|
|
if (unlikely(faulted))
|
|
|
return 0;
|
|
|
-
|
|
|
- /* If we hit the non-store instruction before finding where the
|
|
|
+ /*
|
|
|
+ * If we hit the non-store instruction before finding where the
|
|
|
* ra is stored, then this is a leaf function and it does not
|
|
|
- * store the ra on the stack. */
|
|
|
+ * store the ra on the stack
|
|
|
+ */
|
|
|
if ((code & S_R_SP) != S_R_SP)
|
|
|
return parent_addr;
|
|
|
|
|
@@ -222,7 +224,7 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-#endif
|
|
|
+#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
|
|
|
|
|
|
/*
|
|
|
* Hook the return address and push it in the stack of return addrs
|
|
@@ -240,7 +242,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
|
|
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
|
|
return;
|
|
|
|
|
|
- /* "parent" is the stack address saved the return address of the caller
|
|
|
+ /*
|
|
|
+ * "parent" is the stack address saved the return address of the caller
|
|
|
* of _mcount.
|
|
|
*
|
|
|
* if the gcc < 4.5, a leaf function does not save the return address
|
|
@@ -262,10 +265,11 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
|
|
goto out;
|
|
|
#ifndef KBUILD_MCOUNT_RA_ADDRESS
|
|
|
parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old,
|
|
|
- (unsigned long)parent,
|
|
|
- fp);
|
|
|
- /* If fails when getting the stack address of the non-leaf function's
|
|
|
- * ra, stop function graph tracer and return */
|
|
|
+ (unsigned long)parent, fp);
|
|
|
+ /*
|
|
|
+ * If fails when getting the stack address of the non-leaf function's
|
|
|
+ * ra, stop function graph tracer and return
|
|
|
+ */
|
|
|
if (parent == 0)
|
|
|
goto out;
|
|
|
#endif
|
|
@@ -292,4 +296,4 @@ out:
|
|
|
ftrace_graph_stop();
|
|
|
WARN_ON(1);
|
|
|
}
|
|
|
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
|
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|