|
@@ -165,25 +165,8 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
|
}
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
-
|
|
|
|
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
|
|
#ifndef CONFIG_FTRACE_MCOUNT_RECORD
|
|
-/*
|
|
|
|
- * The hash lock is only needed when the recording of the mcount
|
|
|
|
- * callers are dynamic. That is, by the caller themselves and
|
|
|
|
- * not recorded via the compilation.
|
|
|
|
- */
|
|
|
|
-static DEFINE_SPINLOCK(ftrace_hash_lock);
|
|
|
|
-#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
|
|
|
|
-#define ftrace_hash_unlock(flags) \
|
|
|
|
- spin_unlock_irqrestore(&ftrace_hash_lock, flags)
|
|
|
|
-static void ftrace_release_hash(unsigned long start, unsigned long end);
|
|
|
|
-#else
|
|
|
|
-/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
|
|
|
|
-#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
|
|
|
|
-#define ftrace_hash_unlock(flags) do { } while(0)
|
|
|
|
-static inline void ftrace_release_hash(unsigned long start, unsigned long end)
|
|
|
|
-{
|
|
|
|
-}
|
|
|
|
|
|
+# error Dynamic ftrace depends on MCOUNT_RECORD
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -194,8 +177,6 @@ static inline void ftrace_release_hash(unsigned long start, unsigned long end)
|
|
*/
|
|
*/
|
|
static unsigned long mcount_addr = MCOUNT_ADDR;
|
|
static unsigned long mcount_addr = MCOUNT_ADDR;
|
|
|
|
|
|
-static struct task_struct *ftraced_task;
|
|
|
|
-
|
|
|
|
enum {
|
|
enum {
|
|
FTRACE_ENABLE_CALLS = (1 << 0),
|
|
FTRACE_ENABLE_CALLS = (1 << 0),
|
|
FTRACE_DISABLE_CALLS = (1 << 1),
|
|
FTRACE_DISABLE_CALLS = (1 << 1),
|
|
@@ -212,7 +193,6 @@ static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
|
|
|
|
|
|
static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
|
|
static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
|
|
|
|
|
|
-static DEFINE_MUTEX(ftraced_lock);
|
|
|
|
static DEFINE_MUTEX(ftrace_regex_lock);
|
|
static DEFINE_MUTEX(ftrace_regex_lock);
|
|
|
|
|
|
struct ftrace_page {
|
|
struct ftrace_page {
|
|
@@ -230,10 +210,6 @@ struct ftrace_page {
|
|
static struct ftrace_page *ftrace_pages_start;
|
|
static struct ftrace_page *ftrace_pages_start;
|
|
static struct ftrace_page *ftrace_pages;
|
|
static struct ftrace_page *ftrace_pages;
|
|
|
|
|
|
-static int ftraced_trigger;
|
|
|
|
-static int ftraced_suspend;
|
|
|
|
-static int ftraced_stop;
|
|
|
|
-
|
|
|
|
static int ftrace_record_suspend;
|
|
static int ftrace_record_suspend;
|
|
|
|
|
|
static struct dyn_ftrace *ftrace_free_records;
|
|
static struct dyn_ftrace *ftrace_free_records;
|
|
@@ -398,7 +374,6 @@ static void
|
|
ftrace_record_ip(unsigned long ip)
|
|
ftrace_record_ip(unsigned long ip)
|
|
{
|
|
{
|
|
struct dyn_ftrace *node;
|
|
struct dyn_ftrace *node;
|
|
- unsigned long flags;
|
|
|
|
unsigned long key;
|
|
unsigned long key;
|
|
int resched;
|
|
int resched;
|
|
int cpu;
|
|
int cpu;
|
|
@@ -430,24 +405,18 @@ ftrace_record_ip(unsigned long ip)
|
|
if (ftrace_ip_in_hash(ip, key))
|
|
if (ftrace_ip_in_hash(ip, key))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- ftrace_hash_lock(flags);
|
|
|
|
-
|
|
|
|
/* This ip may have hit the hash before the lock */
|
|
/* This ip may have hit the hash before the lock */
|
|
if (ftrace_ip_in_hash(ip, key))
|
|
if (ftrace_ip_in_hash(ip, key))
|
|
- goto out_unlock;
|
|
|
|
|
|
+ goto out;
|
|
|
|
|
|
node = ftrace_alloc_dyn_node(ip);
|
|
node = ftrace_alloc_dyn_node(ip);
|
|
if (!node)
|
|
if (!node)
|
|
- goto out_unlock;
|
|
|
|
|
|
+ goto out;
|
|
|
|
|
|
node->ip = ip;
|
|
node->ip = ip;
|
|
|
|
|
|
ftrace_add_hash(node, key);
|
|
ftrace_add_hash(node, key);
|
|
|
|
|
|
- ftraced_trigger = 1;
|
|
|
|
-
|
|
|
|
- out_unlock:
|
|
|
|
- ftrace_hash_unlock(flags);
|
|
|
|
out:
|
|
out:
|
|
per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
|
|
per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
|
|
|
|
|
|
@@ -647,7 +616,7 @@ ftrace_code_disable(struct dyn_ftrace *rec)
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
-static int __ftrace_update_code(void *ignore);
|
|
|
|
|
|
+static int ftrace_update_code(void *ignore);
|
|
|
|
|
|
static int __ftrace_modify_code(void *data)
|
|
static int __ftrace_modify_code(void *data)
|
|
{
|
|
{
|
|
@@ -659,7 +628,7 @@ static int __ftrace_modify_code(void *data)
|
|
* Update any recorded ips now that we have the
|
|
* Update any recorded ips now that we have the
|
|
* machine stopped
|
|
* machine stopped
|
|
*/
|
|
*/
|
|
- __ftrace_update_code(NULL);
|
|
|
|
|
|
+ ftrace_update_code(NULL);
|
|
ftrace_replace_code(1);
|
|
ftrace_replace_code(1);
|
|
tracing_on = 1;
|
|
tracing_on = 1;
|
|
} else if (*command & FTRACE_DISABLE_CALLS) {
|
|
} else if (*command & FTRACE_DISABLE_CALLS) {
|
|
@@ -686,26 +655,9 @@ static void ftrace_run_update_code(int command)
|
|
stop_machine(__ftrace_modify_code, &command, NULL);
|
|
stop_machine(__ftrace_modify_code, &command, NULL);
|
|
}
|
|
}
|
|
|
|
|
|
-void ftrace_disable_daemon(void)
|
|
|
|
-{
|
|
|
|
- /* Stop the daemon from calling kstop_machine */
|
|
|
|
- mutex_lock(&ftraced_lock);
|
|
|
|
- ftraced_stop = 1;
|
|
|
|
- mutex_unlock(&ftraced_lock);
|
|
|
|
-
|
|
|
|
- ftrace_force_update();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void ftrace_enable_daemon(void)
|
|
|
|
-{
|
|
|
|
- mutex_lock(&ftraced_lock);
|
|
|
|
- ftraced_stop = 0;
|
|
|
|
- mutex_unlock(&ftraced_lock);
|
|
|
|
-
|
|
|
|
- ftrace_force_update();
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static ftrace_func_t saved_ftrace_func;
|
|
static ftrace_func_t saved_ftrace_func;
|
|
|
|
+static int ftrace_start;
|
|
|
|
+static DEFINE_MUTEX(ftrace_start_lock);
|
|
|
|
|
|
static void ftrace_startup(void)
|
|
static void ftrace_startup(void)
|
|
{
|
|
{
|
|
@@ -714,9 +666,9 @@ static void ftrace_startup(void)
|
|
if (unlikely(ftrace_disabled))
|
|
if (unlikely(ftrace_disabled))
|
|
return;
|
|
return;
|
|
|
|
|
|
- mutex_lock(&ftraced_lock);
|
|
|
|
- ftraced_suspend++;
|
|
|
|
- if (ftraced_suspend == 1)
|
|
|
|
|
|
+ mutex_lock(&ftrace_start_lock);
|
|
|
|
+ ftrace_start++;
|
|
|
|
+ if (ftrace_start == 1)
|
|
command |= FTRACE_ENABLE_CALLS;
|
|
command |= FTRACE_ENABLE_CALLS;
|
|
|
|
|
|
if (saved_ftrace_func != ftrace_trace_function) {
|
|
if (saved_ftrace_func != ftrace_trace_function) {
|
|
@@ -729,7 +681,7 @@ static void ftrace_startup(void)
|
|
|
|
|
|
ftrace_run_update_code(command);
|
|
ftrace_run_update_code(command);
|
|
out:
|
|
out:
|
|
- mutex_unlock(&ftraced_lock);
|
|
|
|
|
|
+ mutex_unlock(&ftrace_start_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static void ftrace_shutdown(void)
|
|
static void ftrace_shutdown(void)
|
|
@@ -739,9 +691,9 @@ static void ftrace_shutdown(void)
|
|
if (unlikely(ftrace_disabled))
|
|
if (unlikely(ftrace_disabled))
|
|
return;
|
|
return;
|
|
|
|
|
|
- mutex_lock(&ftraced_lock);
|
|
|
|
- ftraced_suspend--;
|
|
|
|
- if (!ftraced_suspend)
|
|
|
|
|
|
+ mutex_lock(&ftrace_start_lock);
|
|
|
|
+ ftrace_start--;
|
|
|
|
+ if (!ftrace_start)
|
|
command |= FTRACE_DISABLE_CALLS;
|
|
command |= FTRACE_DISABLE_CALLS;
|
|
|
|
|
|
if (saved_ftrace_func != ftrace_trace_function) {
|
|
if (saved_ftrace_func != ftrace_trace_function) {
|
|
@@ -754,7 +706,7 @@ static void ftrace_shutdown(void)
|
|
|
|
|
|
ftrace_run_update_code(command);
|
|
ftrace_run_update_code(command);
|
|
out:
|
|
out:
|
|
- mutex_unlock(&ftraced_lock);
|
|
|
|
|
|
+ mutex_unlock(&ftrace_start_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static void ftrace_startup_sysctl(void)
|
|
static void ftrace_startup_sysctl(void)
|
|
@@ -764,15 +716,15 @@ static void ftrace_startup_sysctl(void)
|
|
if (unlikely(ftrace_disabled))
|
|
if (unlikely(ftrace_disabled))
|
|
return;
|
|
return;
|
|
|
|
|
|
- mutex_lock(&ftraced_lock);
|
|
|
|
|
|
+ mutex_lock(&ftrace_start_lock);
|
|
/* Force update next time */
|
|
/* Force update next time */
|
|
saved_ftrace_func = NULL;
|
|
saved_ftrace_func = NULL;
|
|
- /* ftraced_suspend is true if we want ftrace running */
|
|
|
|
- if (ftraced_suspend)
|
|
|
|
|
|
+ /* ftrace_start is true if we want ftrace running */
|
|
|
|
+ if (ftrace_start)
|
|
command |= FTRACE_ENABLE_CALLS;
|
|
command |= FTRACE_ENABLE_CALLS;
|
|
|
|
|
|
ftrace_run_update_code(command);
|
|
ftrace_run_update_code(command);
|
|
- mutex_unlock(&ftraced_lock);
|
|
|
|
|
|
+ mutex_unlock(&ftrace_start_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static void ftrace_shutdown_sysctl(void)
|
|
static void ftrace_shutdown_sysctl(void)
|
|
@@ -782,20 +734,20 @@ static void ftrace_shutdown_sysctl(void)
|
|
if (unlikely(ftrace_disabled))
|
|
if (unlikely(ftrace_disabled))
|
|
return;
|
|
return;
|
|
|
|
|
|
- mutex_lock(&ftraced_lock);
|
|
|
|
- /* ftraced_suspend is true if ftrace is running */
|
|
|
|
- if (ftraced_suspend)
|
|
|
|
|
|
+ mutex_lock(&ftrace_start_lock);
|
|
|
|
+ /* ftrace_start is true if ftrace is running */
|
|
|
|
+ if (ftrace_start)
|
|
command |= FTRACE_DISABLE_CALLS;
|
|
command |= FTRACE_DISABLE_CALLS;
|
|
|
|
|
|
ftrace_run_update_code(command);
|
|
ftrace_run_update_code(command);
|
|
- mutex_unlock(&ftraced_lock);
|
|
|
|
|
|
+ mutex_unlock(&ftrace_start_lock);
|
|
}
|
|
}
|
|
|
|
|
|
static cycle_t ftrace_update_time;
|
|
static cycle_t ftrace_update_time;
|
|
static unsigned long ftrace_update_cnt;
|
|
static unsigned long ftrace_update_cnt;
|
|
unsigned long ftrace_update_tot_cnt;
|
|
unsigned long ftrace_update_tot_cnt;
|
|
|
|
|
|
-static int __ftrace_update_code(void *ignore)
|
|
|
|
|
|
+static int ftrace_update_code(void *ignore)
|
|
{
|
|
{
|
|
int i, save_ftrace_enabled;
|
|
int i, save_ftrace_enabled;
|
|
cycle_t start, stop;
|
|
cycle_t start, stop;
|
|
@@ -869,7 +821,6 @@ static int __ftrace_update_code(void *ignore)
|
|
stop = ftrace_now(raw_smp_processor_id());
|
|
stop = ftrace_now(raw_smp_processor_id());
|
|
ftrace_update_time = stop - start;
|
|
ftrace_update_time = stop - start;
|
|
ftrace_update_tot_cnt += ftrace_update_cnt;
|
|
ftrace_update_tot_cnt += ftrace_update_cnt;
|
|
- ftraced_trigger = 0;
|
|
|
|
|
|
|
|
ftrace_enabled = save_ftrace_enabled;
|
|
ftrace_enabled = save_ftrace_enabled;
|
|
ftrace_record_suspend--;
|
|
ftrace_record_suspend--;
|
|
@@ -877,17 +828,6 @@ static int __ftrace_update_code(void *ignore)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static int ftrace_update_code(void)
|
|
|
|
-{
|
|
|
|
- if (unlikely(ftrace_disabled) ||
|
|
|
|
- !ftrace_enabled || !ftraced_trigger)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- stop_machine(__ftrace_update_code, NULL, NULL);
|
|
|
|
-
|
|
|
|
- return 1;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
|
|
static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
|
|
{
|
|
{
|
|
struct ftrace_page *pg;
|
|
struct ftrace_page *pg;
|
|
@@ -1425,10 +1365,10 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable)
|
|
}
|
|
}
|
|
|
|
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
mutex_lock(&ftrace_sysctl_lock);
|
|
- mutex_lock(&ftraced_lock);
|
|
|
|
- if (iter->filtered && ftraced_suspend && ftrace_enabled)
|
|
|
|
|
|
+ mutex_lock(&ftrace_start_lock);
|
|
|
|
+ if (iter->filtered && ftrace_start && ftrace_enabled)
|
|
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
|
|
ftrace_run_update_code(FTRACE_ENABLE_CALLS);
|
|
- mutex_unlock(&ftraced_lock);
|
|
|
|
|
|
+ mutex_unlock(&ftrace_start_lock);
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
mutex_unlock(&ftrace_sysctl_lock);
|
|
|
|
|
|
kfree(iter);
|
|
kfree(iter);
|
|
@@ -1448,55 +1388,6 @@ ftrace_notrace_release(struct inode *inode, struct file *file)
|
|
return ftrace_regex_release(inode, file, 0);
|
|
return ftrace_regex_release(inode, file, 0);
|
|
}
|
|
}
|
|
|
|
|
|
-static ssize_t
|
|
|
|
-ftraced_read(struct file *filp, char __user *ubuf,
|
|
|
|
- size_t cnt, loff_t *ppos)
|
|
|
|
-{
|
|
|
|
- /* don't worry about races */
|
|
|
|
- char *buf = ftraced_stop ? "disabled\n" : "enabled\n";
|
|
|
|
- int r = strlen(buf);
|
|
|
|
-
|
|
|
|
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static ssize_t
|
|
|
|
-ftraced_write(struct file *filp, const char __user *ubuf,
|
|
|
|
- size_t cnt, loff_t *ppos)
|
|
|
|
-{
|
|
|
|
- char buf[64];
|
|
|
|
- long val;
|
|
|
|
- int ret;
|
|
|
|
-
|
|
|
|
- if (cnt >= sizeof(buf))
|
|
|
|
- return -EINVAL;
|
|
|
|
-
|
|
|
|
- if (copy_from_user(&buf, ubuf, cnt))
|
|
|
|
- return -EFAULT;
|
|
|
|
-
|
|
|
|
- if (strncmp(buf, "enable", 6) == 0)
|
|
|
|
- val = 1;
|
|
|
|
- else if (strncmp(buf, "disable", 7) == 0)
|
|
|
|
- val = 0;
|
|
|
|
- else {
|
|
|
|
- buf[cnt] = 0;
|
|
|
|
-
|
|
|
|
- ret = strict_strtoul(buf, 10, &val);
|
|
|
|
- if (ret < 0)
|
|
|
|
- return ret;
|
|
|
|
-
|
|
|
|
- val = !!val;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- if (val)
|
|
|
|
- ftrace_enable_daemon();
|
|
|
|
- else
|
|
|
|
- ftrace_disable_daemon();
|
|
|
|
-
|
|
|
|
- filp->f_pos += cnt;
|
|
|
|
-
|
|
|
|
- return cnt;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static struct file_operations ftrace_avail_fops = {
|
|
static struct file_operations ftrace_avail_fops = {
|
|
.open = ftrace_avail_open,
|
|
.open = ftrace_avail_open,
|
|
.read = seq_read,
|
|
.read = seq_read,
|
|
@@ -1527,38 +1418,6 @@ static struct file_operations ftrace_notrace_fops = {
|
|
.release = ftrace_notrace_release,
|
|
.release = ftrace_notrace_release,
|
|
};
|
|
};
|
|
|
|
|
|
-static struct file_operations ftraced_fops = {
|
|
|
|
- .open = tracing_open_generic,
|
|
|
|
- .read = ftraced_read,
|
|
|
|
- .write = ftraced_write,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * ftrace_force_update - force an update to all recording ftrace functions
|
|
|
|
- */
|
|
|
|
-int ftrace_force_update(void)
|
|
|
|
-{
|
|
|
|
- int ret = 0;
|
|
|
|
-
|
|
|
|
- if (unlikely(ftrace_disabled))
|
|
|
|
- return -ENODEV;
|
|
|
|
-
|
|
|
|
- mutex_lock(&ftrace_sysctl_lock);
|
|
|
|
- mutex_lock(&ftraced_lock);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If ftraced_trigger is not set, then there is nothing
|
|
|
|
- * to update.
|
|
|
|
- */
|
|
|
|
- if (ftraced_trigger && !ftrace_update_code())
|
|
|
|
- ret = -EBUSY;
|
|
|
|
-
|
|
|
|
- mutex_unlock(&ftraced_lock);
|
|
|
|
- mutex_unlock(&ftrace_sysctl_lock);
|
|
|
|
-
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static __init int ftrace_init_debugfs(void)
|
|
static __init int ftrace_init_debugfs(void)
|
|
{
|
|
{
|
|
struct dentry *d_tracer;
|
|
struct dentry *d_tracer;
|
|
@@ -1589,17 +1448,11 @@ static __init int ftrace_init_debugfs(void)
|
|
pr_warning("Could not create debugfs "
|
|
pr_warning("Could not create debugfs "
|
|
"'set_ftrace_notrace' entry\n");
|
|
"'set_ftrace_notrace' entry\n");
|
|
|
|
|
|
- entry = debugfs_create_file("ftraced_enabled", 0644, d_tracer,
|
|
|
|
- NULL, &ftraced_fops);
|
|
|
|
- if (!entry)
|
|
|
|
- pr_warning("Could not create debugfs "
|
|
|
|
- "'ftraced_enabled' entry\n");
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
fs_initcall(ftrace_init_debugfs);
|
|
fs_initcall(ftrace_init_debugfs);
|
|
|
|
|
|
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
|
|
|
static int ftrace_convert_nops(unsigned long *start,
|
|
static int ftrace_convert_nops(unsigned long *start,
|
|
unsigned long *end)
|
|
unsigned long *end)
|
|
{
|
|
{
|
|
@@ -1619,7 +1472,7 @@ static int ftrace_convert_nops(unsigned long *start,
|
|
|
|
|
|
/* p is ignored */
|
|
/* p is ignored */
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
- __ftrace_update_code(p);
|
|
|
|
|
|
+ ftrace_update_code(p);
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -1666,122 +1519,6 @@ void __init ftrace_init(void)
|
|
failed:
|
|
failed:
|
|
ftrace_disabled = 1;
|
|
ftrace_disabled = 1;
|
|
}
|
|
}
|
|
-#else /* CONFIG_FTRACE_MCOUNT_RECORD */
|
|
|
|
-
|
|
|
|
-static void ftrace_release_hash(unsigned long start, unsigned long end)
|
|
|
|
-{
|
|
|
|
- struct dyn_ftrace *rec;
|
|
|
|
- struct hlist_node *t, *n;
|
|
|
|
- struct hlist_head *head, temp_list;
|
|
|
|
- unsigned long flags;
|
|
|
|
- int i, cpu;
|
|
|
|
-
|
|
|
|
- preempt_disable_notrace();
|
|
|
|
-
|
|
|
|
- /* disable incase we call something that calls mcount */
|
|
|
|
- cpu = raw_smp_processor_id();
|
|
|
|
- per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
|
|
|
|
-
|
|
|
|
- ftrace_hash_lock(flags);
|
|
|
|
-
|
|
|
|
- for (i = 0; i < FTRACE_HASHSIZE; i++) {
|
|
|
|
- INIT_HLIST_HEAD(&temp_list);
|
|
|
|
- head = &ftrace_hash[i];
|
|
|
|
-
|
|
|
|
- /* all CPUS are stopped, we are safe to modify code */
|
|
|
|
- hlist_for_each_entry_safe(rec, t, n, head, node) {
|
|
|
|
- if (rec->flags & FTRACE_FL_FREE)
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- if ((rec->ip >= start) && (rec->ip < end))
|
|
|
|
- ftrace_free_rec(rec);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- ftrace_hash_unlock(flags);
|
|
|
|
-
|
|
|
|
- per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
|
|
|
|
- preempt_enable_notrace();
|
|
|
|
-
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int ftraced(void *ignore)
|
|
|
|
-{
|
|
|
|
- unsigned long usecs;
|
|
|
|
-
|
|
|
|
- while (!kthread_should_stop()) {
|
|
|
|
-
|
|
|
|
- set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
-
|
|
|
|
- /* check once a second */
|
|
|
|
- schedule_timeout(HZ);
|
|
|
|
-
|
|
|
|
- if (unlikely(ftrace_disabled))
|
|
|
|
- continue;
|
|
|
|
-
|
|
|
|
- mutex_lock(&ftrace_sysctl_lock);
|
|
|
|
- mutex_lock(&ftraced_lock);
|
|
|
|
- if (!ftraced_suspend && !ftraced_stop &&
|
|
|
|
- ftrace_update_code()) {
|
|
|
|
- usecs = nsecs_to_usecs(ftrace_update_time);
|
|
|
|
- if (ftrace_update_tot_cnt > 100000) {
|
|
|
|
- ftrace_update_tot_cnt = 0;
|
|
|
|
- pr_info("hm, dftrace overflow: %lu change%s"
|
|
|
|
- " (%lu total) in %lu usec%s\n",
|
|
|
|
- ftrace_update_cnt,
|
|
|
|
- ftrace_update_cnt != 1 ? "s" : "",
|
|
|
|
- ftrace_update_tot_cnt,
|
|
|
|
- usecs, usecs != 1 ? "s" : "");
|
|
|
|
- FTRACE_WARN_ON_ONCE(1);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- mutex_unlock(&ftraced_lock);
|
|
|
|
- mutex_unlock(&ftrace_sysctl_lock);
|
|
|
|
-
|
|
|
|
- ftrace_shutdown_replenish();
|
|
|
|
- }
|
|
|
|
- __set_current_state(TASK_RUNNING);
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int __init ftrace_dynamic_init(void)
|
|
|
|
-{
|
|
|
|
- struct task_struct *p;
|
|
|
|
- unsigned long addr;
|
|
|
|
- int ret;
|
|
|
|
-
|
|
|
|
- addr = (unsigned long)ftrace_record_ip;
|
|
|
|
-
|
|
|
|
- stop_machine(ftrace_dyn_arch_init, &addr, NULL);
|
|
|
|
-
|
|
|
|
- /* ftrace_dyn_arch_init places the return code in addr */
|
|
|
|
- if (addr) {
|
|
|
|
- ret = (int)addr;
|
|
|
|
- goto failed;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- ret = ftrace_dyn_table_alloc(NR_TO_INIT);
|
|
|
|
- if (ret)
|
|
|
|
- goto failed;
|
|
|
|
-
|
|
|
|
- p = kthread_run(ftraced, NULL, "ftraced");
|
|
|
|
- if (IS_ERR(p)) {
|
|
|
|
- ret = -1;
|
|
|
|
- goto failed;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- last_ftrace_enabled = ftrace_enabled = 1;
|
|
|
|
- ftraced_task = p;
|
|
|
|
-
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
- failed:
|
|
|
|
- ftrace_disabled = 1;
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-core_initcall(ftrace_dynamic_init);
|
|
|
|
-#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
|
|
|
|
|
|
|
|
#else
|
|
#else
|
|
# define ftrace_startup() do { } while (0)
|
|
# define ftrace_startup() do { } while (0)
|
|
@@ -1801,9 +1538,6 @@ void ftrace_kill(void)
|
|
{
|
|
{
|
|
ftrace_disabled = 1;
|
|
ftrace_disabled = 1;
|
|
ftrace_enabled = 0;
|
|
ftrace_enabled = 0;
|
|
-#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
- ftraced_suspend = -1;
|
|
|
|
-#endif
|
|
|
|
clear_ftrace_function();
|
|
clear_ftrace_function();
|
|
}
|
|
}
|
|
|
|
|