|
@@ -0,0 +1,138 @@
|
|
|
+/*
|
|
|
+ * Infrastructure for profiling code inserted by 'gcc -pg'.
|
|
|
+ *
|
|
|
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
|
|
+ * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
|
|
|
+ *
|
|
|
+ * Originally ported from the -rt patch by:
|
|
|
+ * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
|
|
|
+ *
|
|
|
+ * Based on code in the latency_tracer, that is:
|
|
|
+ *
|
|
|
+ * Copyright (C) 2004-2006 Ingo Molnar
|
|
|
+ * Copyright (C) 2004 William Lee Irwin III
|
|
|
+ */
|
|
|
+
|
|
|
+#include <linux/module.h>
|
|
|
+#include <linux/ftrace.h>
|
|
|
+
|
|
|
+static DEFINE_SPINLOCK(ftrace_func_lock);
|
|
|
+static struct ftrace_ops ftrace_list_end __read_mostly =
|
|
|
+{
|
|
|
+ .func = ftrace_stub,
|
|
|
+};
|
|
|
+
|
|
|
+static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
|
|
|
+ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
|
|
+
|
|
|
+/* mcount is defined per arch in assembly */
|
|
|
+EXPORT_SYMBOL(mcount);
|
|
|
+
|
|
|
+notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
|
|
+{
|
|
|
+ struct ftrace_ops *op = ftrace_list;
|
|
|
+
|
|
|
+ /* in case someone actually ports this to alpha! */
|
|
|
+ read_barrier_depends();
|
|
|
+
|
|
|
+ while (op != &ftrace_list_end) {
|
|
|
+ /* silly alpha */
|
|
|
+ read_barrier_depends();
|
|
|
+ op->func(ip, parent_ip);
|
|
|
+ op = op->next;
|
|
|
+ };
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * register_ftrace_function - register a function for profiling
|
|
|
+ * @ops - ops structure that holds the function for profiling.
|
|
|
+ *
|
|
|
+ * Register a function to be called by all functions in the
|
|
|
+ * kernel.
|
|
|
+ *
|
|
|
+ * Note: @ops->func and all the functions it calls must be labeled
|
|
|
+ * with "notrace", otherwise it will go into a
|
|
|
+ * recursive loop.
|
|
|
+ */
|
|
|
+int register_ftrace_function(struct ftrace_ops *ops)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&ftrace_func_lock, flags);
|
|
|
+ ops->next = ftrace_list;
|
|
|
+ /*
|
|
|
+ * We are entering ops into the ftrace_list but another
|
|
|
+ * CPU might be walking that list. We need to make sure
|
|
|
+ * the ops->next pointer is valid before another CPU sees
|
|
|
+ * the ops pointer included into the ftrace_list.
|
|
|
+ */
|
|
|
+ smp_wmb();
|
|
|
+ ftrace_list = ops;
|
|
|
+ /*
|
|
|
+ * For one func, simply call it directly.
|
|
|
+ * For more than one func, call the chain.
|
|
|
+ */
|
|
|
+ if (ops->next == &ftrace_list_end)
|
|
|
+ ftrace_trace_function = ops->func;
|
|
|
+ else
|
|
|
+ ftrace_trace_function = ftrace_list_func;
|
|
|
+ spin_unlock_irqrestore(&ftrace_func_lock, flags);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * unregister_ftrace_function - unresgister a function for profiling.
|
|
|
+ * @ops - ops structure that holds the function to unregister
|
|
|
+ *
|
|
|
+ * Unregister a function that was added to be called by ftrace profiling.
|
|
|
+ */
|
|
|
+int unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct ftrace_ops **p;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&ftrace_func_lock, flags);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If we are the only function, then the ftrace pointer is
|
|
|
+ * pointing directly to that function.
|
|
|
+ */
|
|
|
+ if (ftrace_list == ops && ops->next == &ftrace_list_end) {
|
|
|
+ ftrace_trace_function = ftrace_stub;
|
|
|
+ ftrace_list = &ftrace_list_end;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
|
|
|
+ if (*p == ops)
|
|
|
+ break;
|
|
|
+
|
|
|
+ if (*p != ops) {
|
|
|
+ ret = -1;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
+ *p = (*p)->next;
|
|
|
+
|
|
|
+ /* If we only have one func left, then call that directly */
|
|
|
+ if (ftrace_list->next == &ftrace_list_end)
|
|
|
+ ftrace_trace_function = ftrace_list->func;
|
|
|
+
|
|
|
+ out:
|
|
|
+ spin_unlock_irqrestore(&ftrace_func_lock, flags);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * clear_ftrace_function - reset the ftrace function
|
|
|
+ *
|
|
|
+ * This NULLs the ftrace function and in essence stops
|
|
|
+ * tracing. There may be lag
|
|
|
+ */
|
|
|
+void clear_ftrace_function(void)
|
|
|
+{
|
|
|
+ ftrace_trace_function = ftrace_stub;
|
|
|
+}
|