|
@@ -47,12 +47,12 @@
|
|
int ftrace_enabled __read_mostly;
|
|
int ftrace_enabled __read_mostly;
|
|
static int last_ftrace_enabled;
|
|
static int last_ftrace_enabled;
|
|
|
|
|
|
|
|
+/* ftrace_pid_trace >= 0 will only trace threads with this pid */
|
|
|
|
+static int ftrace_pid_trace = -1;
|
|
|
|
+
|
|
/* Quick disabling of function tracer. */
|
|
/* Quick disabling of function tracer. */
|
|
int function_trace_stop;
|
|
int function_trace_stop;
|
|
|
|
|
|
-/* By default, current tracing type is normal tracing. */
|
|
|
|
-enum ftrace_tracing_type_t ftrace_tracing_type = FTRACE_TYPE_ENTER;
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* ftrace_disabled is set when an anomaly is discovered.
|
|
* ftrace_disabled is set when an anomaly is discovered.
|
|
* ftrace_disabled is much stronger than ftrace_enabled.
|
|
* ftrace_disabled is much stronger than ftrace_enabled.
|
|
@@ -61,6 +61,7 @@ static int ftrace_disabled __read_mostly;
|
|
|
|
|
|
static DEFINE_SPINLOCK(ftrace_lock);
|
|
static DEFINE_SPINLOCK(ftrace_lock);
|
|
static DEFINE_MUTEX(ftrace_sysctl_lock);
|
|
static DEFINE_MUTEX(ftrace_sysctl_lock);
|
|
|
|
+static DEFINE_MUTEX(ftrace_start_lock);
|
|
|
|
|
|
static struct ftrace_ops ftrace_list_end __read_mostly =
|
|
static struct ftrace_ops ftrace_list_end __read_mostly =
|
|
{
|
|
{
|
|
@@ -70,6 +71,7 @@ static struct ftrace_ops ftrace_list_end __read_mostly =
|
|
static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
|
|
static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
|
|
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
|
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
|
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
|
|
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
|
|
|
|
+ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
|
|
|
|
|
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
|
static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
|
{
|
|
{
|
|
@@ -86,6 +88,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
|
|
};
|
|
};
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
|
|
|
|
+{
|
|
|
|
+ if (current->pid != ftrace_pid_trace)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ ftrace_pid_function(ip, parent_ip);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void set_ftrace_pid_function(ftrace_func_t func)
|
|
|
|
+{
|
|
|
|
+ /* do not set ftrace_pid_function to itself! */
|
|
|
|
+ if (func != ftrace_pid_func)
|
|
|
|
+ ftrace_pid_function = func;
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* clear_ftrace_function - reset the ftrace function
|
|
* clear_ftrace_function - reset the ftrace function
|
|
*
|
|
*
|
|
@@ -96,6 +113,7 @@ void clear_ftrace_function(void)
|
|
{
|
|
{
|
|
ftrace_trace_function = ftrace_stub;
|
|
ftrace_trace_function = ftrace_stub;
|
|
__ftrace_trace_function = ftrace_stub;
|
|
__ftrace_trace_function = ftrace_stub;
|
|
|
|
+ ftrace_pid_function = ftrace_stub;
|
|
}
|
|
}
|
|
|
|
|
|
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
|
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
|
@@ -128,20 +146,26 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
|
ftrace_list = ops;
|
|
ftrace_list = ops;
|
|
|
|
|
|
if (ftrace_enabled) {
|
|
if (ftrace_enabled) {
|
|
|
|
+ ftrace_func_t func;
|
|
|
|
+
|
|
|
|
+ if (ops->next == &ftrace_list_end)
|
|
|
|
+ func = ops->func;
|
|
|
|
+ else
|
|
|
|
+ func = ftrace_list_func;
|
|
|
|
+
|
|
|
|
+ if (ftrace_pid_trace >= 0) {
|
|
|
|
+ set_ftrace_pid_function(func);
|
|
|
|
+ func = ftrace_pid_func;
|
|
|
|
+ }
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* For one func, simply call it directly.
|
|
* For one func, simply call it directly.
|
|
* For more than one func, call the chain.
|
|
* For more than one func, call the chain.
|
|
*/
|
|
*/
|
|
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
|
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
|
- if (ops->next == &ftrace_list_end)
|
|
|
|
- ftrace_trace_function = ops->func;
|
|
|
|
- else
|
|
|
|
- ftrace_trace_function = ftrace_list_func;
|
|
|
|
|
|
+ ftrace_trace_function = func;
|
|
#else
|
|
#else
|
|
- if (ops->next == &ftrace_list_end)
|
|
|
|
- __ftrace_trace_function = ops->func;
|
|
|
|
- else
|
|
|
|
- __ftrace_trace_function = ftrace_list_func;
|
|
|
|
|
|
+ __ftrace_trace_function = func;
|
|
ftrace_trace_function = ftrace_test_stop_func;
|
|
ftrace_trace_function = ftrace_test_stop_func;
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
@@ -182,8 +206,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
|
|
|
if (ftrace_enabled) {
|
|
if (ftrace_enabled) {
|
|
/* If we only have one func left, then call that directly */
|
|
/* If we only have one func left, then call that directly */
|
|
- if (ftrace_list->next == &ftrace_list_end)
|
|
|
|
- ftrace_trace_function = ftrace_list->func;
|
|
|
|
|
|
+ if (ftrace_list->next == &ftrace_list_end) {
|
|
|
|
+ ftrace_func_t func = ftrace_list->func;
|
|
|
|
+
|
|
|
|
+ if (ftrace_pid_trace >= 0) {
|
|
|
|
+ set_ftrace_pid_function(func);
|
|
|
|
+ func = ftrace_pid_func;
|
|
|
|
+ }
|
|
|
|
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
|
|
|
+ ftrace_trace_function = func;
|
|
|
|
+#else
|
|
|
|
+ __ftrace_trace_function = func;
|
|
|
|
+#endif
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
out:
|
|
out:
|
|
@@ -192,6 +227,38 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void ftrace_update_pid_func(void)
|
|
|
|
+{
|
|
|
|
+ ftrace_func_t func;
|
|
|
|
+
|
|
|
|
+ /* should not be called from interrupt context */
|
|
|
|
+ spin_lock(&ftrace_lock);
|
|
|
|
+
|
|
|
|
+ if (ftrace_trace_function == ftrace_stub)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ func = ftrace_trace_function;
|
|
|
|
+
|
|
|
|
+ if (ftrace_pid_trace >= 0) {
|
|
|
|
+ set_ftrace_pid_function(func);
|
|
|
|
+ func = ftrace_pid_func;
|
|
|
|
+ } else {
|
|
|
|
+ if (func != ftrace_pid_func)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ set_ftrace_pid_function(func);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
|
|
|
+ ftrace_trace_function = func;
|
|
|
|
+#else
|
|
|
|
+ __ftrace_trace_function = func;
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ out:
|
|
|
|
+ spin_unlock(&ftrace_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
|
|
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
|
|
# error Dynamic ftrace depends on MCOUNT_RECORD
|
|
# error Dynamic ftrace depends on MCOUNT_RECORD
|
|
@@ -211,6 +278,8 @@ enum {
|
|
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
|
|
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
|
|
FTRACE_ENABLE_MCOUNT = (1 << 3),
|
|
FTRACE_ENABLE_MCOUNT = (1 << 3),
|
|
FTRACE_DISABLE_MCOUNT = (1 << 4),
|
|
FTRACE_DISABLE_MCOUNT = (1 << 4),
|
|
|
|
+ FTRACE_START_FUNC_RET = (1 << 5),
|
|
|
|
+ FTRACE_STOP_FUNC_RET = (1 << 6),
|
|
};
|
|
};
|
|
|
|
|
|
static int ftrace_filtered;
|
|
static int ftrace_filtered;
|
|
@@ -395,14 +464,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
|
|
unsigned long ip, fl;
|
|
unsigned long ip, fl;
|
|
unsigned long ftrace_addr;
|
|
unsigned long ftrace_addr;
|
|
|
|
|
|
-#ifdef CONFIG_FUNCTION_RET_TRACER
|
|
|
|
- if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
|
|
|
|
- ftrace_addr = (unsigned long)ftrace_caller;
|
|
|
|
- else
|
|
|
|
- ftrace_addr = (unsigned long)ftrace_return_caller;
|
|
|
|
-#else
|
|
|
|
ftrace_addr = (unsigned long)ftrace_caller;
|
|
ftrace_addr = (unsigned long)ftrace_caller;
|
|
-#endif
|
|
|
|
|
|
|
|
ip = rec->ip;
|
|
ip = rec->ip;
|
|
|
|
|
|
@@ -535,6 +597,11 @@ static int __ftrace_modify_code(void *data)
|
|
if (*command & FTRACE_UPDATE_TRACE_FUNC)
|
|
if (*command & FTRACE_UPDATE_TRACE_FUNC)
|
|
ftrace_update_ftrace_func(ftrace_trace_function);
|
|
ftrace_update_ftrace_func(ftrace_trace_function);
|
|
|
|
|
|
|
|
+ if (*command & FTRACE_START_FUNC_RET)
|
|
|
|
+ ftrace_enable_ftrace_graph_caller();
|
|
|
|
+ else if (*command & FTRACE_STOP_FUNC_RET)
|
|
|
|
+ ftrace_disable_ftrace_graph_caller();
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -545,12 +612,22 @@ static void ftrace_run_update_code(int command)
|
|
|
|
|
|
static ftrace_func_t saved_ftrace_func;
|
|
static ftrace_func_t saved_ftrace_func;
|
|
static int ftrace_start_up;
|
|
static int ftrace_start_up;
|
|
-static DEFINE_MUTEX(ftrace_start_lock);
|
|
|
|
|
|
|
|
-static void ftrace_startup(void)
|
|
|
|
|
|
+static void ftrace_startup_enable(int command)
|
|
{
|
|
{
|
|
- int command = 0;
|
|
|
|
|
|
+ if (saved_ftrace_func != ftrace_trace_function) {
|
|
|
|
+ saved_ftrace_func = ftrace_trace_function;
|
|
|
|
+ command |= FTRACE_UPDATE_TRACE_FUNC;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!command || !ftrace_enabled)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ ftrace_run_update_code(command);
|
|
|
|
+}
|
|
|
|
|
|
|
|
+static void ftrace_startup(int command)
|
|
|
|
+{
|
|
if (unlikely(ftrace_disabled))
|
|
if (unlikely(ftrace_disabled))
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -558,23 +635,13 @@ static void ftrace_startup(void)
|
|
ftrace_start_up++;
|
|
ftrace_start_up++;
|
|
command |= FTRACE_ENABLE_CALLS;
|
|
command |= FTRACE_ENABLE_CALLS;
|
|
|
|
|
|
- if (saved_ftrace_func != ftrace_trace_function) {
|
|
|
|
- saved_ftrace_func = ftrace_trace_function;
|
|
|
|
- command |= FTRACE_UPDATE_TRACE_FUNC;
|
|
|
|
- }
|
|
|
|
|
|
+ ftrace_startup_enable(command);
|
|
|
|
|
|
- if (!command || !ftrace_enabled)
|
|
|
|
- goto out;
|
|
|
|
-
|
|
|
|
- ftrace_run_update_code(command);
|
|
|
|
- out:
|
|
|
|
mutex_unlock(&ftrace_start_lock);
|
|
mutex_unlock(&ftrace_start_lock);
|
|
}
|
|
}
|
|
|
|
|
|
-static void ftrace_shutdown(void)
|
|
|
|
|
|
+static void ftrace_shutdown(int command)
|
|
{
|
|
{
|
|
- int command = 0;
|
|
|
|
-
|
|
|
|
if (unlikely(ftrace_disabled))
|
|
if (unlikely(ftrace_disabled))
|
|
return;
|
|
return;
|
|
|
|
|
|
@@ -1262,13 +1329,10 @@ static struct file_operations ftrace_notrace_fops = {
|
|
.release = ftrace_notrace_release,
|
|
.release = ftrace_notrace_release,
|
|
};
|
|
};
|
|
|
|
|
|
-static __init int ftrace_init_debugfs(void)
|
|
|
|
|
|
+static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
|
|
{
|
|
{
|
|
- struct dentry *d_tracer;
|
|
|
|
struct dentry *entry;
|
|
struct dentry *entry;
|
|
|
|
|
|
- d_tracer = tracing_init_dentry();
|
|
|
|
-
|
|
|
|
entry = debugfs_create_file("available_filter_functions", 0444,
|
|
entry = debugfs_create_file("available_filter_functions", 0444,
|
|
d_tracer, NULL, &ftrace_avail_fops);
|
|
d_tracer, NULL, &ftrace_avail_fops);
|
|
if (!entry)
|
|
if (!entry)
|
|
@@ -1295,8 +1359,6 @@ static __init int ftrace_init_debugfs(void)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-fs_initcall(ftrace_init_debugfs);
|
|
|
|
-
|
|
|
|
static int ftrace_convert_nops(struct module *mod,
|
|
static int ftrace_convert_nops(struct module *mod,
|
|
unsigned long *start,
|
|
unsigned long *start,
|
|
unsigned long *end)
|
|
unsigned long *end)
|
|
@@ -1382,12 +1444,101 @@ static int __init ftrace_nodyn_init(void)
|
|
}
|
|
}
|
|
device_initcall(ftrace_nodyn_init);
|
|
device_initcall(ftrace_nodyn_init);
|
|
|
|
|
|
-# define ftrace_startup() do { } while (0)
|
|
|
|
-# define ftrace_shutdown() do { } while (0)
|
|
|
|
|
|
+static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
|
|
|
|
+static inline void ftrace_startup_enable(int command) { }
|
|
|
|
+/* Keep as macros so we do not need to define the commands */
|
|
|
|
+# define ftrace_startup(command) do { } while (0)
|
|
|
|
+# define ftrace_shutdown(command) do { } while (0)
|
|
# define ftrace_startup_sysctl() do { } while (0)
|
|
# define ftrace_startup_sysctl() do { } while (0)
|
|
# define ftrace_shutdown_sysctl() do { } while (0)
|
|
# define ftrace_shutdown_sysctl() do { } while (0)
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
|
|
|
|
|
|
|
+static ssize_t
|
|
|
|
+ftrace_pid_read(struct file *file, char __user *ubuf,
|
|
|
|
+ size_t cnt, loff_t *ppos)
|
|
|
|
+{
|
|
|
|
+ char buf[64];
|
|
|
|
+ int r;
|
|
|
|
+
|
|
|
|
+ if (ftrace_pid_trace >= 0)
|
|
|
|
+ r = sprintf(buf, "%u\n", ftrace_pid_trace);
|
|
|
|
+ else
|
|
|
|
+ r = sprintf(buf, "no pid\n");
|
|
|
|
+
|
|
|
|
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static ssize_t
|
|
|
|
+ftrace_pid_write(struct file *filp, const char __user *ubuf,
|
|
|
|
+ size_t cnt, loff_t *ppos)
|
|
|
|
+{
|
|
|
|
+ char buf[64];
|
|
|
|
+ long val;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ if (cnt >= sizeof(buf))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ if (copy_from_user(&buf, ubuf, cnt))
|
|
|
|
+ return -EFAULT;
|
|
|
|
+
|
|
|
|
+ buf[cnt] = 0;
|
|
|
|
+
|
|
|
|
+ ret = strict_strtol(buf, 10, &val);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ return ret;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&ftrace_start_lock);
|
|
|
|
+ if (ret < 0) {
|
|
|
|
+ /* disable pid tracing */
|
|
|
|
+ if (ftrace_pid_trace < 0)
|
|
|
|
+ goto out;
|
|
|
|
+ ftrace_pid_trace = -1;
|
|
|
|
+
|
|
|
|
+ } else {
|
|
|
|
+
|
|
|
|
+ if (ftrace_pid_trace == val)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
|
|
+ ftrace_pid_trace = val;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* update the function call */
|
|
|
|
+ ftrace_update_pid_func();
|
|
|
|
+ ftrace_startup_enable(0);
|
|
|
|
+
|
|
|
|
+ out:
|
|
|
|
+ mutex_unlock(&ftrace_start_lock);
|
|
|
|
+
|
|
|
|
+ return cnt;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct file_operations ftrace_pid_fops = {
|
|
|
|
+ .read = ftrace_pid_read,
|
|
|
|
+ .write = ftrace_pid_write,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static __init int ftrace_init_debugfs(void)
|
|
|
|
+{
|
|
|
|
+ struct dentry *d_tracer;
|
|
|
|
+ struct dentry *entry;
|
|
|
|
+
|
|
|
|
+ d_tracer = tracing_init_dentry();
|
|
|
|
+ if (!d_tracer)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ ftrace_init_dyn_debugfs(d_tracer);
|
|
|
|
+
|
|
|
|
+ entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
|
|
|
|
+ NULL, &ftrace_pid_fops);
|
|
|
|
+ if (!entry)
|
|
|
|
+ pr_warning("Could not create debugfs "
|
|
|
|
+ "'set_ftrace_pid' entry\n");
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+fs_initcall(ftrace_init_debugfs);
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* ftrace_kill - kill ftrace
|
|
* ftrace_kill - kill ftrace
|
|
*
|
|
*
|
|
@@ -1422,15 +1573,9 @@ int register_ftrace_function(struct ftrace_ops *ops)
|
|
|
|
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
|
|
|
|
- if (ftrace_tracing_type == FTRACE_TYPE_RETURN) {
|
|
|
|
- ret = -EBUSY;
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
ret = __register_ftrace_function(ops);
|
|
ret = __register_ftrace_function(ops);
|
|
- ftrace_startup();
|
|
|
|
|
|
+ ftrace_startup(0);
|
|
|
|
|
|
-out:
|
|
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
@@ -1447,7 +1592,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
|
|
|
|
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
ret = __unregister_ftrace_function(ops);
|
|
ret = __unregister_ftrace_function(ops);
|
|
- ftrace_shutdown();
|
|
|
|
|
|
+ ftrace_shutdown(0);
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
@@ -1496,14 +1641,15 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_FUNCTION_RET_TRACER
|
|
|
|
-
|
|
|
|
-static atomic_t ftrace_retfunc_active;
|
|
|
|
|
|
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
|
|
-/* The callback that hooks the return of a function */
|
|
|
|
-trace_function_return_t ftrace_function_return =
|
|
|
|
- (trace_function_return_t)ftrace_stub;
|
|
|
|
|
|
+static atomic_t ftrace_graph_active;
|
|
|
|
|
|
|
|
+/* The callbacks that hook a function */
|
|
|
|
+trace_func_graph_ret_t ftrace_graph_return =
|
|
|
|
+ (trace_func_graph_ret_t)ftrace_stub;
|
|
|
|
+trace_func_graph_ent_t ftrace_graph_entry =
|
|
|
|
+ (trace_func_graph_ent_t)ftrace_stub;
|
|
|
|
|
|
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
|
|
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
|
|
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
|
static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
|
|
@@ -1549,7 +1695,7 @@ free:
|
|
}
|
|
}
|
|
|
|
|
|
/* Allocate a return stack for each task */
|
|
/* Allocate a return stack for each task */
|
|
-static int start_return_tracing(void)
|
|
|
|
|
|
+static int start_graph_tracing(void)
|
|
{
|
|
{
|
|
struct ftrace_ret_stack **ret_stack_list;
|
|
struct ftrace_ret_stack **ret_stack_list;
|
|
int ret;
|
|
int ret;
|
|
@@ -1569,52 +1715,46 @@ static int start_return_tracing(void)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-int register_ftrace_return(trace_function_return_t func)
|
|
|
|
|
|
+int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
|
|
|
+ trace_func_graph_ent_t entryfunc)
|
|
{
|
|
{
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
|
|
|
|
- /*
|
|
|
|
- * Don't launch return tracing if normal function
|
|
|
|
- * tracing is already running.
|
|
|
|
- */
|
|
|
|
- if (ftrace_trace_function != ftrace_stub) {
|
|
|
|
- ret = -EBUSY;
|
|
|
|
- goto out;
|
|
|
|
- }
|
|
|
|
- atomic_inc(&ftrace_retfunc_active);
|
|
|
|
- ret = start_return_tracing();
|
|
|
|
|
|
+ atomic_inc(&ftrace_graph_active);
|
|
|
|
+ ret = start_graph_tracing();
|
|
if (ret) {
|
|
if (ret) {
|
|
- atomic_dec(&ftrace_retfunc_active);
|
|
|
|
|
|
+ atomic_dec(&ftrace_graph_active);
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
- ftrace_tracing_type = FTRACE_TYPE_RETURN;
|
|
|
|
- ftrace_function_return = func;
|
|
|
|
- ftrace_startup();
|
|
|
|
|
|
+
|
|
|
|
+ ftrace_graph_return = retfunc;
|
|
|
|
+ ftrace_graph_entry = entryfunc;
|
|
|
|
+
|
|
|
|
+ ftrace_startup(FTRACE_START_FUNC_RET);
|
|
|
|
|
|
out:
|
|
out:
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-void unregister_ftrace_return(void)
|
|
|
|
|
|
+void unregister_ftrace_graph(void)
|
|
{
|
|
{
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
|
|
|
|
- atomic_dec(&ftrace_retfunc_active);
|
|
|
|
- ftrace_function_return = (trace_function_return_t)ftrace_stub;
|
|
|
|
- ftrace_shutdown();
|
|
|
|
- /* Restore normal tracing type */
|
|
|
|
- ftrace_tracing_type = FTRACE_TYPE_ENTER;
|
|
|
|
|
|
+ atomic_dec(&ftrace_graph_active);
|
|
|
|
+ ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
|
|
|
|
+ ftrace_graph_entry = (trace_func_graph_ent_t)ftrace_stub;
|
|
|
|
+ ftrace_shutdown(FTRACE_STOP_FUNC_RET);
|
|
|
|
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
}
|
|
}
|
|
|
|
|
|
/* Allocate a return stack for newly created task */
|
|
/* Allocate a return stack for newly created task */
|
|
-void ftrace_retfunc_init_task(struct task_struct *t)
|
|
|
|
|
|
+void ftrace_graph_init_task(struct task_struct *t)
|
|
{
|
|
{
|
|
- if (atomic_read(&ftrace_retfunc_active)) {
|
|
|
|
|
|
+ if (atomic_read(&ftrace_graph_active)) {
|
|
t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
|
|
t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
|
|
* sizeof(struct ftrace_ret_stack),
|
|
* sizeof(struct ftrace_ret_stack),
|
|
GFP_KERNEL);
|
|
GFP_KERNEL);
|
|
@@ -1626,7 +1766,7 @@ void ftrace_retfunc_init_task(struct task_struct *t)
|
|
t->ret_stack = NULL;
|
|
t->ret_stack = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
-void ftrace_retfunc_exit_task(struct task_struct *t)
|
|
|
|
|
|
+void ftrace_graph_exit_task(struct task_struct *t)
|
|
{
|
|
{
|
|
struct ftrace_ret_stack *ret_stack = t->ret_stack;
|
|
struct ftrace_ret_stack *ret_stack = t->ret_stack;
|
|
|
|
|
|
@@ -1638,5 +1778,3 @@ void ftrace_retfunc_exit_task(struct task_struct *t)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-
|
|
|
|
-
|
|
|