|
@@ -114,16 +114,37 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * The curr_ret_stack is an index to ftrace return stack of
|
|
|
+ * current task. Its value should be in [0, FTRACE_RETFUNC_
|
|
|
+ * DEPTH) when the function graph tracer is used. To support
|
|
|
+ * filtering out specific functions, it makes the index
|
|
|
+ * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
|
|
|
+ * so when it sees a negative index the ftrace will ignore
|
|
|
+ * the record. And the index gets recovered when returning
|
|
|
+ * from the filtered function by adding the FTRACE_NOTRACE_
|
|
|
+ * DEPTH and then it'll continue to record functions normally.
|
|
|
+ *
|
|
|
+ * The curr_ret_stack is initialized to -1 and get increased
|
|
|
+ * in this function. So it can be less than -1 only if it was
|
|
|
+ * filtered out via ftrace_graph_notrace_addr() which can be
|
|
|
+ * set from set_graph_notrace file in debugfs by user.
|
|
|
+ */
|
|
|
+ if (current->curr_ret_stack < -1)
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
calltime = trace_clock_local();
|
|
|
|
|
|
index = ++current->curr_ret_stack;
|
|
|
+ if (ftrace_graph_notrace_addr(func))
|
|
|
+ current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
|
|
|
barrier();
|
|
|
current->ret_stack[index].ret = ret;
|
|
|
current->ret_stack[index].func = func;
|
|
|
current->ret_stack[index].calltime = calltime;
|
|
|
current->ret_stack[index].subtime = 0;
|
|
|
current->ret_stack[index].fp = frame_pointer;
|
|
|
- *depth = index;
|
|
|
+ *depth = current->curr_ret_stack;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -137,7 +158,17 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
|
|
|
|
|
|
index = current->curr_ret_stack;
|
|
|
|
|
|
- if (unlikely(index < 0)) {
|
|
|
+ /*
|
|
|
+ * A negative index here means that it's just returned from a
|
|
|
+ * notrace'd function. Recover index to get an original
|
|
|
+ * return address. See ftrace_push_return_trace().
|
|
|
+ *
|
|
|
+ * TODO: Need to check whether the stack gets corrupted.
|
|
|
+ */
|
|
|
+ if (index < 0)
|
|
|
+ index += FTRACE_NOTRACE_DEPTH;
|
|
|
+
|
|
|
+ if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
|
|
|
ftrace_graph_stop();
|
|
|
WARN_ON(1);
|
|
|
/* Might as well panic, otherwise we have no where to go */
|
|
@@ -193,6 +224,15 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
|
|
|
trace.rettime = trace_clock_local();
|
|
|
barrier();
|
|
|
current->curr_ret_stack--;
|
|
|
+ /*
|
|
|
+ * The curr_ret_stack can be less than -1 only if it was
|
|
|
+ * filtered out and it's about to return from the function.
|
|
|
+ * Recover the index and continue to trace normal functions.
|
|
|
+ */
|
|
|
+ if (current->curr_ret_stack < -1) {
|
|
|
+ current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* The trace should run after decrementing the ret counter
|
|
@@ -259,10 +299,20 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
|
|
|
|
|
|
/* trace it when it is-nested-in or is a function enabled. */
|
|
|
if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
|
|
|
- ftrace_graph_ignore_irqs()) ||
|
|
|
+ ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
|
|
|
(max_depth && trace->depth >= max_depth))
|
|
|
return 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * Do not trace a function if it's filtered by set_graph_notrace.
|
|
|
+ * Make the index of ret stack negative to indicate that it should
|
|
|
+ * ignore further functions. But it needs its own ret stack entry
|
|
|
+ * to recover the original index in order to continue tracing after
|
|
|
+ * returning from the function.
|
|
|
+ */
|
|
|
+ if (ftrace_graph_notrace_addr(trace->func))
|
|
|
+ return 1;
|
|
|
+
|
|
|
local_irq_save(flags);
|
|
|
cpu = raw_smp_processor_id();
|
|
|
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|