|
@@ -111,6 +111,26 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
|
|
|
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
|
|
|
#endif
|
|
|
|
|
|
+/*
|
|
|
+ * Traverse the ftrace_global_list, invoking all entries. The reason that we
|
|
|
+ * can use rcu_dereference_raw() is that elements removed from this list
|
|
|
+ * are simply leaked, so there is no need to interact with a grace-period
|
|
|
+ * mechanism. The rcu_dereference_raw() calls are needed to handle
|
|
|
+ * concurrent insertions into the ftrace_global_list.
|
|
|
+ *
|
|
|
+ * Silly Alpha and silly pointer-speculation compiler optimizations!
|
|
|
+ */
|
|
|
+#define do_for_each_ftrace_op(op, list) \
|
|
|
+ op = rcu_dereference_raw(list); \
|
|
|
+ do
|
|
|
+
|
|
|
+/*
|
|
|
+ * Optimized for just a single item in the list (as that is the normal case).
|
|
|
+ */
|
|
|
+#define while_for_each_ftrace_op(op) \
|
|
|
+ while (likely(op = rcu_dereference_raw((op)->next)) && \
|
|
|
+ unlikely((op) != &ftrace_list_end))
|
|
|
+
|
|
|
/**
|
|
|
* ftrace_nr_registered_ops - return number of ops registered
|
|
|
*
|
|
@@ -132,29 +152,21 @@ int ftrace_nr_registered_ops(void)
|
|
|
return cnt;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Traverse the ftrace_global_list, invoking all entries. The reason that we
|
|
|
- * can use rcu_dereference_raw() is that elements removed from this list
|
|
|
- * are simply leaked, so there is no need to interact with a grace-period
|
|
|
- * mechanism. The rcu_dereference_raw() calls are needed to handle
|
|
|
- * concurrent insertions into the ftrace_global_list.
|
|
|
- *
|
|
|
- * Silly Alpha and silly pointer-speculation compiler optimizations!
|
|
|
- */
|
|
|
static void
|
|
|
ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
|
|
|
struct ftrace_ops *op, struct pt_regs *regs)
|
|
|
{
|
|
|
- if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
|
|
|
+ int bit;
|
|
|
+
|
|
|
+ bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
|
|
|
+ if (bit < 0)
|
|
|
return;
|
|
|
|
|
|
- trace_recursion_set(TRACE_GLOBAL_BIT);
|
|
|
- op = rcu_dereference_raw(ftrace_global_list); /*see above*/
|
|
|
- while (op != &ftrace_list_end) {
|
|
|
+ do_for_each_ftrace_op(op, ftrace_global_list) {
|
|
|
op->func(ip, parent_ip, op, regs);
|
|
|
- op = rcu_dereference_raw(op->next); /*see above*/
|
|
|
- };
|
|
|
- trace_recursion_clear(TRACE_GLOBAL_BIT);
|
|
|
+ } while_for_each_ftrace_op(op);
|
|
|
+
|
|
|
+ trace_clear_recursion(bit);
|
|
|
}
|
|
|
|
|
|
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
|
|
@@ -221,10 +233,24 @@ static void update_global_ops(void)
|
|
|
* registered callers.
|
|
|
*/
|
|
|
if (ftrace_global_list == &ftrace_list_end ||
|
|
|
- ftrace_global_list->next == &ftrace_list_end)
|
|
|
+ ftrace_global_list->next == &ftrace_list_end) {
|
|
|
func = ftrace_global_list->func;
|
|
|
- else
|
|
|
+ /*
|
|
|
+ * As we are calling the function directly.
|
|
|
+ * If it does not have recursion protection,
|
|
|
+ * the function_trace_op needs to be updated
|
|
|
+ * accordingly.
|
|
|
+ */
|
|
|
+ if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
|
|
|
+ global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
|
|
|
+ else
|
|
|
+ global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
|
|
|
+ } else {
|
|
|
func = ftrace_global_list_func;
|
|
|
+ /* The list has its own recursion protection. */
|
|
|
+ global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
|
|
|
+ }
|
|
|
+
|
|
|
|
|
|
/* If we filter on pids, update to use the pid function */
|
|
|
if (!list_empty(&ftrace_pids)) {
|
|
@@ -337,7 +363,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
|
|
if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
|
|
|
return -EINVAL;
|
|
|
|
|
|
-#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS
|
|
|
+#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
|
|
/*
|
|
|
* If the ftrace_ops specifies SAVE_REGS, then it only can be used
|
|
|
* if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
|
|
@@ -4090,14 +4116,11 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
|
|
|
*/
|
|
|
preempt_disable_notrace();
|
|
|
trace_recursion_set(TRACE_CONTROL_BIT);
|
|
|
- op = rcu_dereference_raw(ftrace_control_list);
|
|
|
- while (op != &ftrace_list_end) {
|
|
|
+ do_for_each_ftrace_op(op, ftrace_control_list) {
|
|
|
if (!ftrace_function_local_disabled(op) &&
|
|
|
ftrace_ops_test(op, ip))
|
|
|
op->func(ip, parent_ip, op, regs);
|
|
|
-
|
|
|
- op = rcu_dereference_raw(op->next);
|
|
|
- };
|
|
|
+ } while_for_each_ftrace_op(op);
|
|
|
trace_recursion_clear(TRACE_CONTROL_BIT);
|
|
|
preempt_enable_notrace();
|
|
|
}
|
|
@@ -4112,27 +4135,26 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
|
|
struct ftrace_ops *ignored, struct pt_regs *regs)
|
|
|
{
|
|
|
struct ftrace_ops *op;
|
|
|
+ int bit;
|
|
|
|
|
|
if (function_trace_stop)
|
|
|
return;
|
|
|
|
|
|
- if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
|
|
|
+ bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
|
|
|
+ if (bit < 0)
|
|
|
return;
|
|
|
|
|
|
- trace_recursion_set(TRACE_INTERNAL_BIT);
|
|
|
/*
|
|
|
* Some of the ops may be dynamically allocated,
|
|
|
* they must be freed after a synchronize_sched().
|
|
|
*/
|
|
|
preempt_disable_notrace();
|
|
|
- op = rcu_dereference_raw(ftrace_ops_list);
|
|
|
- while (op != &ftrace_list_end) {
|
|
|
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
|
|
|
if (ftrace_ops_test(op, ip))
|
|
|
op->func(ip, parent_ip, op, regs);
|
|
|
- op = rcu_dereference_raw(op->next);
|
|
|
- };
|
|
|
+ } while_for_each_ftrace_op(op);
|
|
|
preempt_enable_notrace();
|
|
|
- trace_recursion_clear(TRACE_INTERNAL_BIT);
|
|
|
+ trace_clear_recursion(bit);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -4143,8 +4165,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
|
|
|
* Archs are to support both the regs and ftrace_ops at the same time.
|
|
|
* If they support ftrace_ops, it is assumed they support regs.
|
|
|
* If call backs want to use regs, they must either check for regs
|
|
|
- * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
|
|
|
- * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
|
|
|
+ * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
|
|
|
+ * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
|
|
|
* An architecture can pass partial regs with ftrace_ops and still
|
|
|
* set the ARCH_SUPPORT_FTARCE_OPS.
|
|
|
*/
|