|
@@ -64,12 +64,19 @@
|
|
|
|
|
|
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
|
|
|
|
|
|
+static struct ftrace_ops ftrace_list_end __read_mostly = {
|
|
|
+ .func = ftrace_stub,
|
|
|
+};
|
|
|
+
|
|
|
/* ftrace_enabled is a method to turn ftrace on or off */
|
|
|
int ftrace_enabled __read_mostly;
|
|
|
static int last_ftrace_enabled;
|
|
|
|
|
|
/* Quick disabling of function tracer. */
|
|
|
-int function_trace_stop;
|
|
|
+int function_trace_stop __read_mostly;
|
|
|
+
|
|
|
+/* Current function tracing op */
|
|
|
+struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
|
|
|
|
|
|
/* List for set_ftrace_pid's pids. */
|
|
|
LIST_HEAD(ftrace_pids);
|
|
@@ -86,10 +93,6 @@ static int ftrace_disabled __read_mostly;
|
|
|
|
|
|
static DEFINE_MUTEX(ftrace_lock);
|
|
|
|
|
|
-static struct ftrace_ops ftrace_list_end __read_mostly = {
|
|
|
- .func = ftrace_stub,
|
|
|
-};
|
|
|
-
|
|
|
static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
|
|
|
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
|
|
|
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
|
@@ -100,8 +103,14 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
|
|
static struct ftrace_ops global_ops;
|
|
|
static struct ftrace_ops control_ops;
|
|
|
|
|
|
-static void
|
|
|
-ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
|
|
|
+#if ARCH_SUPPORTS_FTRACE_OPS
|
|
|
+static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
|
|
+ struct ftrace_ops *op);
|
|
|
+#else
|
|
|
+/* See comment below, where ftrace_ops_list_func is defined */
|
|
|
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
|
|
|
+#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
|
|
|
+#endif
|
|
|
|
|
|
/*
|
|
|
* Traverse the ftrace_global_list, invoking all entries. The reason that we
|
|
@@ -112,29 +121,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
|
|
|
*
|
|
|
* Silly Alpha and silly pointer-speculation compiler optimizations!
|
|
|
*/
|
|
|
-static void ftrace_global_list_func(unsigned long ip,
|
|
|
- unsigned long parent_ip)
|
|
|
+static void
|
|
|
+ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
|
|
|
+ struct ftrace_ops *op)
|
|
|
{
|
|
|
- struct ftrace_ops *op;
|
|
|
-
|
|
|
if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
|
|
|
return;
|
|
|
|
|
|
trace_recursion_set(TRACE_GLOBAL_BIT);
|
|
|
op = rcu_dereference_raw(ftrace_global_list); /*see above*/
|
|
|
while (op != &ftrace_list_end) {
|
|
|
- op->func(ip, parent_ip);
|
|
|
+ op->func(ip, parent_ip, op);
|
|
|
op = rcu_dereference_raw(op->next); /*see above*/
|
|
|
};
|
|
|
trace_recursion_clear(TRACE_GLOBAL_BIT);
|
|
|
}
|
|
|
|
|
|
-static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
|
|
|
+static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
|
|
|
+ struct ftrace_ops *op)
|
|
|
{
|
|
|
if (!test_tsk_trace_trace(current))
|
|
|
return;
|
|
|
|
|
|
- ftrace_pid_function(ip, parent_ip);
|
|
|
+ ftrace_pid_function(ip, parent_ip, op);
|
|
|
}
|
|
|
|
|
|
static void set_ftrace_pid_function(ftrace_func_t func)
|
|
@@ -163,12 +172,13 @@ void clear_ftrace_function(void)
|
|
|
* For those archs that do not test ftrace_trace_stop in their
|
|
|
* mcount call site, we need to do it from C.
|
|
|
*/
|
|
|
-static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
|
|
|
+static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip,
|
|
|
+ struct ftrace_ops *op)
|
|
|
{
|
|
|
if (function_trace_stop)
|
|
|
return;
|
|
|
|
|
|
- __ftrace_trace_function(ip, parent_ip);
|
|
|
+ __ftrace_trace_function(ip, parent_ip, op);
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -230,15 +240,24 @@ static void update_ftrace_function(void)
|
|
|
|
|
|
/*
|
|
|
* If we are at the end of the list and this ops is
|
|
|
- * not dynamic, then have the mcount trampoline call
|
|
|
- * the function directly
|
|
|
+ * not dynamic and the arch supports passing ops, then have the
|
|
|
+ * mcount trampoline call the function directly.
|
|
|
*/
|
|
|
if (ftrace_ops_list == &ftrace_list_end ||
|
|
|
(ftrace_ops_list->next == &ftrace_list_end &&
|
|
|
- !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))
|
|
|
+ !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
|
|
|
+ ARCH_SUPPORTS_FTRACE_OPS)) {
|
|
|
+ /* Set the ftrace_ops that the arch callback uses */
|
|
|
+ if (ftrace_ops_list == &global_ops)
|
|
|
+ function_trace_op = ftrace_global_list;
|
|
|
+ else
|
|
|
+ function_trace_op = ftrace_ops_list;
|
|
|
func = ftrace_ops_list->func;
|
|
|
- else
|
|
|
+ } else {
|
|
|
+ /* Just use the default ftrace_ops */
|
|
|
+ function_trace_op = &ftrace_list_end;
|
|
|
func = ftrace_ops_list_func;
|
|
|
+ }
|
|
|
|
|
|
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
|
|
ftrace_trace_function = func;
|
|
@@ -773,7 +792,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
|
|
|
}
|
|
|
|
|
|
static void
|
|
|
-function_profile_call(unsigned long ip, unsigned long parent_ip)
|
|
|
+function_profile_call(unsigned long ip, unsigned long parent_ip,
|
|
|
+ struct ftrace_ops *ops)
|
|
|
{
|
|
|
struct ftrace_profile_stat *stat;
|
|
|
struct ftrace_profile *rec;
|
|
@@ -803,7 +823,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
static int profile_graph_entry(struct ftrace_graph_ent *trace)
|
|
|
{
|
|
|
- function_profile_call(trace->func, 0);
|
|
|
+ function_profile_call(trace->func, 0, NULL);
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
@@ -2790,8 +2810,8 @@ static int __init ftrace_mod_cmd_init(void)
|
|
|
}
|
|
|
device_initcall(ftrace_mod_cmd_init);
|
|
|
|
|
|
-static void
|
|
|
-function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
|
|
|
+static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
|
|
|
+ struct ftrace_ops *op)
|
|
|
{
|
|
|
struct ftrace_func_probe *entry;
|
|
|
struct hlist_head *hhd;
|
|
@@ -3942,10 +3962,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
|
|
static void
|
|
|
-ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
|
|
|
+ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
|
|
|
+ struct ftrace_ops *op)
|
|
|
{
|
|
|
- struct ftrace_ops *op;
|
|
|
-
|
|
|
if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
|
|
|
return;
|
|
|
|
|
@@ -3959,7 +3978,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
|
|
|
while (op != &ftrace_list_end) {
|
|
|
if (!ftrace_function_local_disabled(op) &&
|
|
|
ftrace_ops_test(op, ip))
|
|
|
- op->func(ip, parent_ip);
|
|
|
+ op->func(ip, parent_ip, op);
|
|
|
|
|
|
op = rcu_dereference_raw(op->next);
|
|
|
};
|
|
@@ -3971,8 +3990,9 @@ static struct ftrace_ops control_ops = {
|
|
|
.func = ftrace_ops_control_func,
|
|
|
};
|
|
|
|
|
|
-static void
|
|
|
-ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
|
|
|
+static inline void
|
|
|
+__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
|
|
+ struct ftrace_ops *ignored)
|
|
|
{
|
|
|
struct ftrace_ops *op;
|
|
|
|
|
@@ -3988,13 +4008,32 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
|
|
|
op = rcu_dereference_raw(ftrace_ops_list);
|
|
|
while (op != &ftrace_list_end) {
|
|
|
if (ftrace_ops_test(op, ip))
|
|
|
- op->func(ip, parent_ip);
|
|
|
+ op->func(ip, parent_ip, op);
|
|
|
op = rcu_dereference_raw(op->next);
|
|
|
};
|
|
|
preempt_enable_notrace();
|
|
|
trace_recursion_clear(TRACE_INTERNAL_BIT);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Some archs only support passing ip and parent_ip. Even though
|
|
|
+ * the list function ignores the op parameter, we do not want any
|
|
|
+ * C side effects, where a function is called without the caller
|
|
|
+ * sending a third parameter.
|
|
|
+ */
|
|
|
+#if ARCH_SUPPORTS_FTRACE_OPS
|
|
|
+static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
|
|
+ struct ftrace_ops *op)
|
|
|
+{
|
|
|
+ __ftrace_ops_list_func(ip, parent_ip, NULL);
|
|
|
+}
|
|
|
+#else
|
|
|
+static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
|
|
|
+{
|
|
|
+ __ftrace_ops_list_func(ip, parent_ip, NULL);
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static void clear_ftrace_swapper(void)
|
|
|
{
|
|
|
struct task_struct *p;
|