|
@@ -5997,36 +5997,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
|
|
|
iter->trace_buffer = &global_trace.trace_buffer;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
|
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|
|
{
|
|
|
- static arch_spinlock_t ftrace_dump_lock =
|
|
|
- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
/* use static because iter can be a bit big for the stack */
|
|
|
static struct trace_iterator iter;
|
|
|
+ static atomic_t dump_running;
|
|
|
unsigned int old_userobj;
|
|
|
- static int dump_ran;
|
|
|
unsigned long flags;
|
|
|
int cnt = 0, cpu;
|
|
|
|
|
|
- /* only one dump */
|
|
|
- local_irq_save(flags);
|
|
|
- arch_spin_lock(&ftrace_dump_lock);
|
|
|
- if (dump_ran)
|
|
|
- goto out;
|
|
|
-
|
|
|
- dump_ran = 1;
|
|
|
+ /* Only allow one dump user at a time. */
|
|
|
+ if (atomic_inc_return(&dump_running) != 1) {
|
|
|
+ atomic_dec(&dump_running);
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
+ /*
|
|
|
+ * Always turn off tracing when we dump.
|
|
|
+ * We don't need to show trace output of what happens
|
|
|
+ * between multiple crashes.
|
|
|
+ *
|
|
|
+ * If the user does a sysrq-z, then they can re-enable
|
|
|
+ * tracing with echo 1 > tracing_on.
|
|
|
+ */
|
|
|
tracing_off();
|
|
|
|
|
|
- /* Did function tracer already get disabled? */
|
|
|
- if (ftrace_is_dead()) {
|
|
|
- printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
|
|
|
- printk("# MAY BE MISSING FUNCTION EVENTS\n");
|
|
|
- }
|
|
|
-
|
|
|
- if (disable_tracing)
|
|
|
- ftrace_kill();
|
|
|
+ local_irq_save(flags);
|
|
|
|
|
|
/* Simulate the iterator */
|
|
|
trace_init_global_iter(&iter);
|
|
@@ -6056,6 +6052,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
|
|
|
|
printk(KERN_TRACE "Dumping ftrace buffer:\n");
|
|
|
|
|
|
+ /* Did function tracer already get disabled? */
|
|
|
+ if (ftrace_is_dead()) {
|
|
|
+ printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
|
|
|
+ printk("# MAY BE MISSING FUNCTION EVENTS\n");
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* We need to stop all tracing on all CPUS to read the
|
|
|
* the next buffer. This is a bit expensive, but is
|
|
@@ -6095,26 +6097,14 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
|
printk(KERN_TRACE "---------------------------------\n");
|
|
|
|
|
|
out_enable:
|
|
|
- /* Re-enable tracing if requested */
|
|
|
- if (!disable_tracing) {
|
|
|
- trace_flags |= old_userobj;
|
|
|
+ trace_flags |= old_userobj;
|
|
|
|
|
|
- for_each_tracing_cpu(cpu) {
|
|
|
- atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
|
|
|
- }
|
|
|
- tracing_on();
|
|
|
+ for_each_tracing_cpu(cpu) {
|
|
|
+ atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
|
|
|
}
|
|
|
-
|
|
|
- out:
|
|
|
- arch_spin_unlock(&ftrace_dump_lock);
|
|
|
+ atomic_dec(&dump_running);
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
-
|
|
|
-/* By default: disable tracing after the dump */
|
|
|
-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|
|
-{
|
|
|
- __ftrace_dump(true, oops_dump_mode);
|
|
|
-}
|
|
|
EXPORT_SYMBOL_GPL(ftrace_dump);
|
|
|
|
|
|
__init static int tracer_alloc_buffers(void)
|