|
@@ -87,18 +87,6 @@ static int tracing_disabled = 1;
|
|
|
|
|
|
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
|
|
|
|
|
|
-static inline void ftrace_disable_cpu(void)
|
|
|
-{
|
|
|
- preempt_disable();
|
|
|
- __this_cpu_inc(ftrace_cpu_disabled);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void ftrace_enable_cpu(void)
|
|
|
-{
|
|
|
- __this_cpu_dec(ftrace_cpu_disabled);
|
|
|
- preempt_enable();
|
|
|
-}
|
|
|
-
|
|
|
cpumask_var_t __read_mostly tracing_buffer_mask;
|
|
|
|
|
|
/*
|
|
@@ -748,8 +736,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
|
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
|
|
|
|
- ftrace_disable_cpu();
|
|
|
-
|
|
|
ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
|
|
|
|
|
|
if (ret == -EBUSY) {
|
|
@@ -763,8 +749,6 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
"Failed to swap buffers due to commit in progress\n");
|
|
|
}
|
|
|
|
|
|
- ftrace_enable_cpu();
|
|
|
-
|
|
|
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
|
|
|
|
|
|
__update_max_tr(tr, tsk, cpu);
|
|
@@ -916,13 +900,6 @@ out:
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
}
|
|
|
|
|
|
-static void __tracing_reset(struct ring_buffer *buffer, int cpu)
|
|
|
-{
|
|
|
- ftrace_disable_cpu();
|
|
|
- ring_buffer_reset_cpu(buffer, cpu);
|
|
|
- ftrace_enable_cpu();
|
|
|
-}
|
|
|
-
|
|
|
void tracing_reset(struct trace_array *tr, int cpu)
|
|
|
{
|
|
|
struct ring_buffer *buffer = tr->buffer;
|
|
@@ -931,7 +908,7 @@ void tracing_reset(struct trace_array *tr, int cpu)
|
|
|
|
|
|
/* Make sure all commits have finished */
|
|
|
synchronize_sched();
|
|
|
- __tracing_reset(buffer, cpu);
|
|
|
+ ring_buffer_reset_cpu(buffer, cpu);
|
|
|
|
|
|
ring_buffer_record_enable(buffer);
|
|
|
}
|
|
@@ -949,7 +926,7 @@ void tracing_reset_online_cpus(struct trace_array *tr)
|
|
|
tr->time_start = ftrace_now(tr->cpu);
|
|
|
|
|
|
for_each_online_cpu(cpu)
|
|
|
- __tracing_reset(buffer, cpu);
|
|
|
+ ring_buffer_reset_cpu(buffer, cpu);
|
|
|
|
|
|
ring_buffer_record_enable(buffer);
|
|
|
}
|
|
@@ -1733,14 +1710,9 @@ EXPORT_SYMBOL_GPL(trace_vprintk);
|
|
|
|
|
|
static void trace_iterator_increment(struct trace_iterator *iter)
|
|
|
{
|
|
|
- /* Don't allow ftrace to trace into the ring buffers */
|
|
|
- ftrace_disable_cpu();
|
|
|
-
|
|
|
iter->idx++;
|
|
|
if (iter->buffer_iter[iter->cpu])
|
|
|
ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
|
|
|
-
|
|
|
- ftrace_enable_cpu();
|
|
|
}
|
|
|
|
|
|
static struct trace_entry *
|
|
@@ -1750,17 +1722,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
|
|
|
struct ring_buffer_event *event;
|
|
|
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
|
|
|
|
|
|
- /* Don't allow ftrace to trace into the ring buffers */
|
|
|
- ftrace_disable_cpu();
|
|
|
-
|
|
|
if (buf_iter)
|
|
|
event = ring_buffer_iter_peek(buf_iter, ts);
|
|
|
else
|
|
|
event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
|
|
|
lost_events);
|
|
|
|
|
|
- ftrace_enable_cpu();
|
|
|
-
|
|
|
if (event) {
|
|
|
iter->ent_size = ring_buffer_event_length(event);
|
|
|
return ring_buffer_event_data(event);
|
|
@@ -1850,11 +1817,8 @@ void *trace_find_next_entry_inc(struct trace_iterator *iter)
|
|
|
|
|
|
static void trace_consume(struct trace_iterator *iter)
|
|
|
{
|
|
|
- /* Don't allow ftrace to trace into the ring buffers */
|
|
|
- ftrace_disable_cpu();
|
|
|
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
|
|
|
&iter->lost_events);
|
|
|
- ftrace_enable_cpu();
|
|
|
}
|
|
|
|
|
|
static void *s_next(struct seq_file *m, void *v, loff_t *pos)
|
|
@@ -1943,16 +1907,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
|
|
|
iter->cpu = 0;
|
|
|
iter->idx = -1;
|
|
|
|
|
|
- ftrace_disable_cpu();
|
|
|
-
|
|
|
if (cpu_file == TRACE_PIPE_ALL_CPU) {
|
|
|
for_each_tracing_cpu(cpu)
|
|
|
tracing_iter_reset(iter, cpu);
|
|
|
} else
|
|
|
tracing_iter_reset(iter, cpu_file);
|
|
|
|
|
|
- ftrace_enable_cpu();
|
|
|
-
|
|
|
iter->leftover = 0;
|
|
|
for (p = iter; p && l < *pos; p = s_next(m, p, &l))
|
|
|
;
|