|
@@ -374,6 +374,21 @@ static int __init set_buf_size(char *str)
|
|
|
}
|
|
|
__setup("trace_buf_size=", set_buf_size);
|
|
|
|
|
|
+static int __init set_tracing_thresh(char *str)
|
|
|
+{
|
|
|
+ unsigned long threshhold;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!str)
|
|
|
+ return 0;
|
|
|
+ ret = strict_strtoul(str, 0, &threshhold);
|
|
|
+ if (ret < 0)
|
|
|
+ return 0;
|
|
|
+ tracing_thresh = threshhold * 1000;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+__setup("tracing_thresh=", set_tracing_thresh);
|
|
|
+
|
|
|
unsigned long nsecs_to_usecs(unsigned long nsecs)
|
|
|
{
|
|
|
return nsecs / 1000;
|
|
@@ -579,9 +594,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
|
|
|
static arch_spinlock_t ftrace_max_lock =
|
|
|
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
|
|
|
+unsigned long __read_mostly tracing_thresh;
|
|
|
+
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
unsigned long __read_mostly tracing_max_latency;
|
|
|
-unsigned long __read_mostly tracing_thresh;
|
|
|
|
|
|
/*
|
|
|
* Copy the new maximum trace into the separate maximum-trace
|
|
@@ -592,7 +608,7 @@ static void
|
|
|
__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
{
|
|
|
struct trace_array_cpu *data = tr->data[cpu];
|
|
|
- struct trace_array_cpu *max_data = tr->data[cpu];
|
|
|
+ struct trace_array_cpu *max_data;
|
|
|
|
|
|
max_tr.cpu = cpu;
|
|
|
max_tr.time_start = data->preempt_timestamp;
|
|
@@ -602,7 +618,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
max_data->critical_start = data->critical_start;
|
|
|
max_data->critical_end = data->critical_end;
|
|
|
|
|
|
- memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
|
|
|
+ memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
|
|
|
max_data->pid = tsk->pid;
|
|
|
max_data->uid = task_uid(tsk);
|
|
|
max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
|
|
@@ -824,10 +840,10 @@ out:
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
}
|
|
|
|
|
|
-static void __tracing_reset(struct trace_array *tr, int cpu)
|
|
|
+static void __tracing_reset(struct ring_buffer *buffer, int cpu)
|
|
|
{
|
|
|
ftrace_disable_cpu();
|
|
|
- ring_buffer_reset_cpu(tr->buffer, cpu);
|
|
|
+ ring_buffer_reset_cpu(buffer, cpu);
|
|
|
ftrace_enable_cpu();
|
|
|
}
|
|
|
|
|
@@ -839,7 +855,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
|
|
|
|
|
|
/* Make sure all commits have finished */
|
|
|
synchronize_sched();
|
|
|
- __tracing_reset(tr, cpu);
|
|
|
+ __tracing_reset(buffer, cpu);
|
|
|
|
|
|
ring_buffer_record_enable(buffer);
|
|
|
}
|
|
@@ -857,7 +873,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
|
|
|
tr->time_start = ftrace_now(tr->cpu);
|
|
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
- __tracing_reset(tr, cpu);
|
|
|
+ __tracing_reset(buffer, cpu);
|
|
|
|
|
|
ring_buffer_record_enable(buffer);
|
|
|
}
|
|
@@ -934,6 +950,8 @@ void tracing_start(void)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ /* Prevent the buffers from switching */
|
|
|
+ arch_spin_lock(&ftrace_max_lock);
|
|
|
|
|
|
buffer = global_trace.buffer;
|
|
|
if (buffer)
|
|
@@ -943,6 +961,8 @@ void tracing_start(void)
|
|
|
if (buffer)
|
|
|
ring_buffer_record_enable(buffer);
|
|
|
|
|
|
+ arch_spin_unlock(&ftrace_max_lock);
|
|
|
+
|
|
|
ftrace_start();
|
|
|
out:
|
|
|
spin_unlock_irqrestore(&tracing_start_lock, flags);
|
|
@@ -964,6 +984,9 @@ void tracing_stop(void)
|
|
|
if (trace_stop_count++)
|
|
|
goto out;
|
|
|
|
|
|
+ /* Prevent the buffers from switching */
|
|
|
+ arch_spin_lock(&ftrace_max_lock);
|
|
|
+
|
|
|
buffer = global_trace.buffer;
|
|
|
if (buffer)
|
|
|
ring_buffer_record_disable(buffer);
|
|
@@ -972,6 +995,8 @@ void tracing_stop(void)
|
|
|
if (buffer)
|
|
|
ring_buffer_record_disable(buffer);
|
|
|
|
|
|
+ arch_spin_unlock(&ftrace_max_lock);
|
|
|
+
|
|
|
out:
|
|
|
spin_unlock_irqrestore(&tracing_start_lock, flags);
|
|
|
}
|
|
@@ -1259,6 +1284,13 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
|
if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
|
|
|
return;
|
|
|
|
|
|
+ /*
|
|
|
+ * NMIs can not handle page faults, even with fix ups.
|
|
|
+ * The save user stack can (and often does) fault.
|
|
|
+ */
|
|
|
+ if (unlikely(in_nmi()))
|
|
|
+ return;
|
|
|
+
|
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
|
|
|
sizeof(*entry), flags, pc);
|
|
|
if (!event)
|
|
@@ -1703,6 +1735,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
|
|
|
|
|
|
ftrace_enable_cpu();
|
|
|
|
|
|
+ iter->leftover = 0;
|
|
|
for (p = iter; p && l < *pos; p = s_next(m, p, &l))
|
|
|
;
|
|
|
|
|
@@ -4248,10 +4281,10 @@ static __init int tracer_init_debugfs(void)
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
trace_create_file("tracing_max_latency", 0644, d_tracer,
|
|
|
&tracing_max_latency, &tracing_max_lat_fops);
|
|
|
+#endif
|
|
|
|
|
|
trace_create_file("tracing_thresh", 0644, d_tracer,
|
|
|
&tracing_thresh, &tracing_max_lat_fops);
|
|
|
-#endif
|
|
|
|
|
|
trace_create_file("README", 0444, d_tracer,
|
|
|
NULL, &tracing_readme_fops);
|