|
@@ -341,7 +341,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
|
|
/* trace_flags holds trace_options default values */
|
|
/* trace_flags holds trace_options default values */
|
|
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
|
|
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
|
|
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
|
|
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
|
|
- TRACE_ITER_GRAPH_TIME;
|
|
|
|
|
|
+ TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD;
|
|
|
|
|
|
static int trace_stop_count;
|
|
static int trace_stop_count;
|
|
static DEFINE_SPINLOCK(tracing_start_lock);
|
|
static DEFINE_SPINLOCK(tracing_start_lock);
|
|
@@ -425,6 +425,7 @@ static const char *trace_options[] = {
|
|
"latency-format",
|
|
"latency-format",
|
|
"sleep-time",
|
|
"sleep-time",
|
|
"graph-time",
|
|
"graph-time",
|
|
|
|
+ "record-cmd",
|
|
NULL
|
|
NULL
|
|
};
|
|
};
|
|
|
|
|
|
@@ -656,6 +657,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
return;
|
|
return;
|
|
|
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
+ if (!current_trace->use_max_tr) {
|
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
|
|
|
|
tr->buffer = max_tr.buffer;
|
|
tr->buffer = max_tr.buffer;
|
|
@@ -682,6 +687,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
return;
|
|
return;
|
|
|
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
+ if (!current_trace->use_max_tr) {
|
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
|
|
|
|
ftrace_disable_cpu();
|
|
ftrace_disable_cpu();
|
|
@@ -726,7 +736,7 @@ __acquires(kernel_lock)
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
- if (strlen(type->name) > MAX_TRACER_SIZE) {
|
|
|
|
|
|
+ if (strlen(type->name) >= MAX_TRACER_SIZE) {
|
|
pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
|
|
pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
@@ -1328,61 +1338,6 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags)
|
|
|
|
|
|
#endif /* CONFIG_STACKTRACE */
|
|
#endif /* CONFIG_STACKTRACE */
|
|
|
|
|
|
-static void
|
|
|
|
-ftrace_trace_special(void *__tr,
|
|
|
|
- unsigned long arg1, unsigned long arg2, unsigned long arg3,
|
|
|
|
- int pc)
|
|
|
|
-{
|
|
|
|
- struct ftrace_event_call *call = &event_special;
|
|
|
|
- struct ring_buffer_event *event;
|
|
|
|
- struct trace_array *tr = __tr;
|
|
|
|
- struct ring_buffer *buffer = tr->buffer;
|
|
|
|
- struct special_entry *entry;
|
|
|
|
-
|
|
|
|
- event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL,
|
|
|
|
- sizeof(*entry), 0, pc);
|
|
|
|
- if (!event)
|
|
|
|
- return;
|
|
|
|
- entry = ring_buffer_event_data(event);
|
|
|
|
- entry->arg1 = arg1;
|
|
|
|
- entry->arg2 = arg2;
|
|
|
|
- entry->arg3 = arg3;
|
|
|
|
-
|
|
|
|
- if (!filter_check_discard(call, entry, buffer, event))
|
|
|
|
- trace_buffer_unlock_commit(buffer, event, 0, pc);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void
|
|
|
|
-__trace_special(void *__tr, void *__data,
|
|
|
|
- unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
|
|
|
-{
|
|
|
|
- ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count());
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void
|
|
|
|
-ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
|
|
|
-{
|
|
|
|
- struct trace_array *tr = &global_trace;
|
|
|
|
- struct trace_array_cpu *data;
|
|
|
|
- unsigned long flags;
|
|
|
|
- int cpu;
|
|
|
|
- int pc;
|
|
|
|
-
|
|
|
|
- if (tracing_disabled)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- pc = preempt_count();
|
|
|
|
- local_irq_save(flags);
|
|
|
|
- cpu = raw_smp_processor_id();
|
|
|
|
- data = tr->data[cpu];
|
|
|
|
-
|
|
|
|
- if (likely(atomic_inc_return(&data->disabled) == 1))
|
|
|
|
- ftrace_trace_special(tr, arg1, arg2, arg3, pc);
|
|
|
|
-
|
|
|
|
- atomic_dec(&data->disabled);
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* trace_vbprintk - write binary msg to tracing buffer
|
|
* trace_vbprintk - write binary msg to tracing buffer
|
|
*
|
|
*
|
|
@@ -1401,7 +1356,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
|
struct bprint_entry *entry;
|
|
struct bprint_entry *entry;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
int disable;
|
|
int disable;
|
|
- int resched;
|
|
|
|
int cpu, len = 0, size, pc;
|
|
int cpu, len = 0, size, pc;
|
|
|
|
|
|
if (unlikely(tracing_selftest_running || tracing_disabled))
|
|
if (unlikely(tracing_selftest_running || tracing_disabled))
|
|
@@ -1411,7 +1365,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
|
pause_graph_tracing();
|
|
pause_graph_tracing();
|
|
|
|
|
|
pc = preempt_count();
|
|
pc = preempt_count();
|
|
- resched = ftrace_preempt_disable();
|
|
|
|
|
|
+ preempt_disable_notrace();
|
|
cpu = raw_smp_processor_id();
|
|
cpu = raw_smp_processor_id();
|
|
data = tr->data[cpu];
|
|
data = tr->data[cpu];
|
|
|
|
|
|
@@ -1449,7 +1403,7 @@ out_unlock:
|
|
|
|
|
|
out:
|
|
out:
|
|
atomic_dec_return(&data->disabled);
|
|
atomic_dec_return(&data->disabled);
|
|
- ftrace_preempt_enable(resched);
|
|
|
|
|
|
+ preempt_enable_notrace();
|
|
unpause_graph_tracing();
|
|
unpause_graph_tracing();
|
|
|
|
|
|
return len;
|
|
return len;
|
|
@@ -2386,6 +2340,7 @@ static const struct file_operations show_traces_fops = {
|
|
.open = show_traces_open,
|
|
.open = show_traces_open,
|
|
.read = seq_read,
|
|
.read = seq_read,
|
|
.release = seq_release,
|
|
.release = seq_release,
|
|
|
|
+ .llseek = seq_lseek,
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2479,6 +2434,7 @@ static const struct file_operations tracing_cpumask_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = tracing_cpumask_read,
|
|
.read = tracing_cpumask_read,
|
|
.write = tracing_cpumask_write,
|
|
.write = tracing_cpumask_write,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static int tracing_trace_options_show(struct seq_file *m, void *v)
|
|
static int tracing_trace_options_show(struct seq_file *m, void *v)
|
|
@@ -2554,6 +2510,9 @@ static void set_tracer_flags(unsigned int mask, int enabled)
|
|
trace_flags |= mask;
|
|
trace_flags |= mask;
|
|
else
|
|
else
|
|
trace_flags &= ~mask;
|
|
trace_flags &= ~mask;
|
|
|
|
+
|
|
|
|
+ if (mask == TRACE_ITER_RECORD_CMD)
|
|
|
|
+ trace_event_enable_cmd_record(enabled);
|
|
}
|
|
}
|
|
|
|
|
|
static ssize_t
|
|
static ssize_t
|
|
@@ -2645,6 +2604,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf,
|
|
static const struct file_operations tracing_readme_fops = {
|
|
static const struct file_operations tracing_readme_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = tracing_readme_read,
|
|
.read = tracing_readme_read,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static ssize_t
|
|
static ssize_t
|
|
@@ -2695,6 +2655,7 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
|
|
static const struct file_operations tracing_saved_cmdlines_fops = {
|
|
static const struct file_operations tracing_saved_cmdlines_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = tracing_saved_cmdlines_read,
|
|
.read = tracing_saved_cmdlines_read,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static ssize_t
|
|
static ssize_t
|
|
@@ -2790,6 +2751,9 @@ static int tracing_resize_ring_buffer(unsigned long size)
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
return ret;
|
|
return ret;
|
|
|
|
|
|
|
|
+ if (!current_trace->use_max_tr)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
ret = ring_buffer_resize(max_tr.buffer, size);
|
|
ret = ring_buffer_resize(max_tr.buffer, size);
|
|
if (ret < 0) {
|
|
if (ret < 0) {
|
|
int r;
|
|
int r;
|
|
@@ -2817,11 +2781,14 @@ static int tracing_resize_ring_buffer(unsigned long size)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ max_tr.entries = size;
|
|
|
|
+ out:
|
|
global_trace.entries = size;
|
|
global_trace.entries = size;
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* tracing_update_buffers - used by tracing facility to expand ring buffers
|
|
* tracing_update_buffers - used by tracing facility to expand ring buffers
|
|
*
|
|
*
|
|
@@ -2882,12 +2849,26 @@ static int tracing_set_tracer(const char *buf)
|
|
trace_branch_disable();
|
|
trace_branch_disable();
|
|
if (current_trace && current_trace->reset)
|
|
if (current_trace && current_trace->reset)
|
|
current_trace->reset(tr);
|
|
current_trace->reset(tr);
|
|
-
|
|
|
|
|
|
+ if (current_trace && current_trace->use_max_tr) {
|
|
|
|
+ /*
|
|
|
|
+ * We don't free the ring buffer. instead, resize it because
|
|
|
|
+ * The max_tr ring buffer has some state (e.g. ring->clock) and
|
|
|
|
+ * we want preserve it.
|
|
|
|
+ */
|
|
|
|
+ ring_buffer_resize(max_tr.buffer, 1);
|
|
|
|
+ max_tr.entries = 1;
|
|
|
|
+ }
|
|
destroy_trace_option_files(topts);
|
|
destroy_trace_option_files(topts);
|
|
|
|
|
|
current_trace = t;
|
|
current_trace = t;
|
|
|
|
|
|
topts = create_trace_option_files(current_trace);
|
|
topts = create_trace_option_files(current_trace);
|
|
|
|
+ if (current_trace->use_max_tr) {
|
|
|
|
+ ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
|
|
|
|
+ if (ret < 0)
|
|
|
|
+ goto out;
|
|
|
|
+ max_tr.entries = global_trace.entries;
|
|
|
|
+ }
|
|
|
|
|
|
if (t->init) {
|
|
if (t->init) {
|
|
ret = tracer_init(t, tr);
|
|
ret = tracer_init(t, tr);
|
|
@@ -3024,6 +3005,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
if (iter->trace->pipe_open)
|
|
if (iter->trace->pipe_open)
|
|
iter->trace->pipe_open(iter);
|
|
iter->trace->pipe_open(iter);
|
|
|
|
|
|
|
|
+ nonseekable_open(inode, filp);
|
|
out:
|
|
out:
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
return ret;
|
|
return ret;
|
|
@@ -3469,7 +3451,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
}
|
|
}
|
|
|
|
|
|
tracing_start();
|
|
tracing_start();
|
|
- max_tr.entries = global_trace.entries;
|
|
|
|
mutex_unlock(&trace_types_lock);
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
return cnt;
|
|
return cnt;
|
|
@@ -3582,18 +3563,21 @@ static const struct file_operations tracing_max_lat_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = tracing_max_lat_read,
|
|
.read = tracing_max_lat_read,
|
|
.write = tracing_max_lat_write,
|
|
.write = tracing_max_lat_write,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static const struct file_operations tracing_ctrl_fops = {
|
|
static const struct file_operations tracing_ctrl_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = tracing_ctrl_read,
|
|
.read = tracing_ctrl_read,
|
|
.write = tracing_ctrl_write,
|
|
.write = tracing_ctrl_write,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static const struct file_operations set_tracer_fops = {
|
|
static const struct file_operations set_tracer_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = tracing_set_trace_read,
|
|
.read = tracing_set_trace_read,
|
|
.write = tracing_set_trace_write,
|
|
.write = tracing_set_trace_write,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static const struct file_operations tracing_pipe_fops = {
|
|
static const struct file_operations tracing_pipe_fops = {
|
|
@@ -3602,17 +3586,20 @@ static const struct file_operations tracing_pipe_fops = {
|
|
.read = tracing_read_pipe,
|
|
.read = tracing_read_pipe,
|
|
.splice_read = tracing_splice_read_pipe,
|
|
.splice_read = tracing_splice_read_pipe,
|
|
.release = tracing_release_pipe,
|
|
.release = tracing_release_pipe,
|
|
|
|
+ .llseek = no_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static const struct file_operations tracing_entries_fops = {
|
|
static const struct file_operations tracing_entries_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = tracing_entries_read,
|
|
.read = tracing_entries_read,
|
|
.write = tracing_entries_write,
|
|
.write = tracing_entries_write,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static const struct file_operations tracing_mark_fops = {
|
|
static const struct file_operations tracing_mark_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.write = tracing_mark_write,
|
|
.write = tracing_mark_write,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static const struct file_operations trace_clock_fops = {
|
|
static const struct file_operations trace_clock_fops = {
|
|
@@ -3918,6 +3905,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
|
|
static const struct file_operations tracing_stats_fops = {
|
|
static const struct file_operations tracing_stats_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = tracing_stats_read,
|
|
.read = tracing_stats_read,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
@@ -3954,6 +3942,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf,
|
|
static const struct file_operations tracing_dyn_info_fops = {
|
|
static const struct file_operations tracing_dyn_info_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = tracing_read_dyn_info,
|
|
.read = tracing_read_dyn_info,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
#endif
|
|
#endif
|
|
|
|
|
|
@@ -4107,6 +4096,7 @@ static const struct file_operations trace_options_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = trace_options_read,
|
|
.read = trace_options_read,
|
|
.write = trace_options_write,
|
|
.write = trace_options_write,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
static ssize_t
|
|
static ssize_t
|
|
@@ -4158,6 +4148,7 @@ static const struct file_operations trace_options_core_fops = {
|
|
.open = tracing_open_generic,
|
|
.open = tracing_open_generic,
|
|
.read = trace_options_core_read,
|
|
.read = trace_options_core_read,
|
|
.write = trace_options_core_write,
|
|
.write = trace_options_core_write,
|
|
|
|
+ .llseek = generic_file_llseek,
|
|
};
|
|
};
|
|
|
|
|
|
struct dentry *trace_create_file(const char *name,
|
|
struct dentry *trace_create_file(const char *name,
|
|
@@ -4347,9 +4338,6 @@ static __init int tracer_init_debugfs(void)
|
|
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
|
|
trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
|
|
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
|
|
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
|
|
#endif
|
|
#endif
|
|
-#ifdef CONFIG_SYSPROF_TRACER
|
|
|
|
- init_tracer_sysprof_debugfs(d_tracer);
|
|
|
|
-#endif
|
|
|
|
|
|
|
|
create_trace_options_dir();
|
|
create_trace_options_dir();
|
|
|
|
|
|
@@ -4576,16 +4564,14 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
- max_tr.buffer = ring_buffer_alloc(ring_buf_size,
|
|
|
|
- TRACE_BUFFER_FLAGS);
|
|
|
|
|
|
+ max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS);
|
|
if (!max_tr.buffer) {
|
|
if (!max_tr.buffer) {
|
|
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
|
|
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
|
|
WARN_ON(1);
|
|
WARN_ON(1);
|
|
ring_buffer_free(global_trace.buffer);
|
|
ring_buffer_free(global_trace.buffer);
|
|
goto out_free_cpumask;
|
|
goto out_free_cpumask;
|
|
}
|
|
}
|
|
- max_tr.entries = ring_buffer_size(max_tr.buffer);
|
|
|
|
- WARN_ON(max_tr.entries != global_trace.entries);
|
|
|
|
|
|
+ max_tr.entries = 1;
|
|
#endif
|
|
#endif
|
|
|
|
|
|
/* Allocate the first page for all buffers */
|
|
/* Allocate the first page for all buffers */
|
|
@@ -4598,9 +4584,6 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
register_tracer(&nop_trace);
|
|
register_tracer(&nop_trace);
|
|
current_trace = &nop_trace;
|
|
current_trace = &nop_trace;
|
|
-#ifdef CONFIG_BOOT_TRACER
|
|
|
|
- register_tracer(&boot_tracer);
|
|
|
|
-#endif
|
|
|
|
/* All seems OK, enable tracing */
|
|
/* All seems OK, enable tracing */
|
|
tracing_disabled = 0;
|
|
tracing_disabled = 0;
|
|
|
|
|