|
@@ -101,10 +101,7 @@ static inline void ftrace_enable_cpu(void)
|
|
|
preempt_enable();
|
|
|
}
|
|
|
|
|
|
-static cpumask_var_t __read_mostly tracing_buffer_mask;
|
|
|
-
|
|
|
-#define for_each_tracing_cpu(cpu) \
|
|
|
- for_each_cpu(cpu, tracing_buffer_mask)
|
|
|
+cpumask_var_t __read_mostly tracing_buffer_mask;
|
|
|
|
|
|
/*
|
|
|
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
|
|
@@ -1539,11 +1536,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(trace_vprintk);
|
|
|
|
|
|
-enum trace_file_type {
|
|
|
- TRACE_FILE_LAT_FMT = 1,
|
|
|
- TRACE_FILE_ANNOTATE = 2,
|
|
|
-};
|
|
|
-
|
|
|
static void trace_iterator_increment(struct trace_iterator *iter)
|
|
|
{
|
|
|
/* Don't allow ftrace to trace into the ring buffers */
|
|
@@ -1641,7 +1633,7 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
|
|
|
}
|
|
|
|
|
|
/* Find the next real entry, and increment the iterator to the next entry */
|
|
|
-static void *find_next_entry_inc(struct trace_iterator *iter)
|
|
|
+void *trace_find_next_entry_inc(struct trace_iterator *iter)
|
|
|
{
|
|
|
iter->ent = __find_next_entry(iter, &iter->cpu,
|
|
|
&iter->lost_events, &iter->ts);
|
|
@@ -1676,19 +1668,19 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
return NULL;
|
|
|
|
|
|
if (iter->idx < 0)
|
|
|
- ent = find_next_entry_inc(iter);
|
|
|
+ ent = trace_find_next_entry_inc(iter);
|
|
|
else
|
|
|
ent = iter;
|
|
|
|
|
|
while (ent && iter->idx < i)
|
|
|
- ent = find_next_entry_inc(iter);
|
|
|
+ ent = trace_find_next_entry_inc(iter);
|
|
|
|
|
|
iter->pos = *pos;
|
|
|
|
|
|
return ent;
|
|
|
}
|
|
|
|
|
|
-static void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
|
|
+void tracing_iter_reset(struct trace_iterator *iter, int cpu)
|
|
|
{
|
|
|
struct trace_array *tr = iter->tr;
|
|
|
struct ring_buffer_event *event;
|
|
@@ -2049,7 +2041,7 @@ int trace_empty(struct trace_iterator *iter)
|
|
|
}
|
|
|
|
|
|
/* Called with trace_event_read_lock() held. */
|
|
|
-static enum print_line_t print_trace_line(struct trace_iterator *iter)
|
|
|
+enum print_line_t print_trace_line(struct trace_iterator *iter)
|
|
|
{
|
|
|
enum print_line_t ret;
|
|
|
|
|
@@ -3211,7 +3203,7 @@ waitagain:
|
|
|
|
|
|
trace_event_read_lock();
|
|
|
trace_access_lock(iter->cpu_file);
|
|
|
- while (find_next_entry_inc(iter) != NULL) {
|
|
|
+ while (trace_find_next_entry_inc(iter) != NULL) {
|
|
|
enum print_line_t ret;
|
|
|
int len = iter->seq.len;
|
|
|
|
|
@@ -3294,7 +3286,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
|
|
|
if (ret != TRACE_TYPE_NO_CONSUME)
|
|
|
trace_consume(iter);
|
|
|
rem -= count;
|
|
|
- if (!find_next_entry_inc(iter)) {
|
|
|
+ if (!trace_find_next_entry_inc(iter)) {
|
|
|
rem = 0;
|
|
|
iter->ent = NULL;
|
|
|
break;
|
|
@@ -3350,7 +3342,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|
|
if (ret <= 0)
|
|
|
goto out_err;
|
|
|
|
|
|
- if (!iter->ent && !find_next_entry_inc(iter)) {
|
|
|
+ if (!iter->ent && !trace_find_next_entry_inc(iter)) {
|
|
|
ret = -EFAULT;
|
|
|
goto out_err;
|
|
|
}
|
|
@@ -4414,7 +4406,7 @@ static struct notifier_block trace_die_notifier = {
|
|
|
*/
|
|
|
#define KERN_TRACE KERN_EMERG
|
|
|
|
|
|
-static void
|
|
|
+void
|
|
|
trace_printk_seq(struct trace_seq *s)
|
|
|
{
|
|
|
/* Probably should print a warning here. */
|
|
@@ -4429,6 +4421,13 @@ trace_printk_seq(struct trace_seq *s)
|
|
|
trace_seq_init(s);
|
|
|
}
|
|
|
|
|
|
+void trace_init_global_iter(struct trace_iterator *iter)
|
|
|
+{
|
|
|
+ iter->tr = &global_trace;
|
|
|
+ iter->trace = current_trace;
|
|
|
+ iter->cpu_file = TRACE_PIPE_ALL_CPU;
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
|
{
|
|
@@ -4454,8 +4453,10 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
|
if (disable_tracing)
|
|
|
ftrace_kill();
|
|
|
|
|
|
+ trace_init_global_iter(&iter);
|
|
|
+
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
- atomic_inc(&global_trace.data[cpu]->disabled);
|
|
|
+ atomic_inc(&iter.tr->data[cpu]->disabled);
|
|
|
}
|
|
|
|
|
|
old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
|
|
@@ -4504,7 +4505,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
|
iter.iter_flags |= TRACE_FILE_LAT_FMT;
|
|
|
iter.pos = -1;
|
|
|
|
|
|
- if (find_next_entry_inc(&iter) != NULL) {
|
|
|
+ if (trace_find_next_entry_inc(&iter) != NULL) {
|
|
|
int ret;
|
|
|
|
|
|
ret = print_trace_line(&iter);
|
|
@@ -4526,7 +4527,7 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
|
trace_flags |= old_userobj;
|
|
|
|
|
|
for_each_tracing_cpu(cpu) {
|
|
|
- atomic_dec(&global_trace.data[cpu]->disabled);
|
|
|
+ atomic_dec(&iter.tr->data[cpu]->disabled);
|
|
|
}
|
|
|
tracing_on();
|
|
|
}
|