|
@@ -660,6 +660,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
return;
|
|
|
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
+ if (!current_trace->use_max_tr) {
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
+ return;
|
|
|
+ }
|
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
|
|
|
|
tr->buffer = max_tr.buffer;
|
|
@@ -686,6 +690,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
return;
|
|
|
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
+ if (!current_trace->use_max_tr) {
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
arch_spin_lock(&ftrace_max_lock);
|
|
|
|
|
|
ftrace_disable_cpu();
|
|
@@ -2801,6 +2810,9 @@ static int tracing_resize_ring_buffer(unsigned long size)
|
|
|
if (ret < 0)
|
|
|
return ret;
|
|
|
|
|
|
+ if (!current_trace->use_max_tr)
|
|
|
+ goto out;
|
|
|
+
|
|
|
ret = ring_buffer_resize(max_tr.buffer, size);
|
|
|
if (ret < 0) {
|
|
|
int r;
|
|
@@ -2828,11 +2840,14 @@ static int tracing_resize_ring_buffer(unsigned long size)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+ max_tr.entries = size;
|
|
|
+ out:
|
|
|
global_trace.entries = size;
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
/**
|
|
|
* tracing_update_buffers - used by tracing facility to expand ring buffers
|
|
|
*
|
|
@@ -2893,12 +2908,26 @@ static int tracing_set_tracer(const char *buf)
|
|
|
trace_branch_disable();
|
|
|
if (current_trace && current_trace->reset)
|
|
|
current_trace->reset(tr);
|
|
|
-
|
|
|
+ if (current_trace && current_trace->use_max_tr) {
|
|
|
+ /*
|
|
|
+ * We don't free the ring buffer. instead, resize it because
|
|
|
+ * The max_tr ring buffer has some state (e.g. ring->clock) and
|
|
|
+ * we want preserve it.
|
|
|
+ */
|
|
|
+ ring_buffer_resize(max_tr.buffer, 1);
|
|
|
+ max_tr.entries = 1;
|
|
|
+ }
|
|
|
destroy_trace_option_files(topts);
|
|
|
|
|
|
current_trace = t;
|
|
|
|
|
|
topts = create_trace_option_files(current_trace);
|
|
|
+ if (current_trace->use_max_tr) {
|
|
|
+ ret = ring_buffer_resize(max_tr.buffer, global_trace.entries);
|
|
|
+ if (ret < 0)
|
|
|
+ goto out;
|
|
|
+ max_tr.entries = global_trace.entries;
|
|
|
+ }
|
|
|
|
|
|
if (t->init) {
|
|
|
ret = tracer_init(t, tr);
|
|
@@ -3480,7 +3509,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
|
}
|
|
|
|
|
|
tracing_start();
|
|
|
- max_tr.entries = global_trace.entries;
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
return cnt;
|
|
@@ -4578,16 +4606,14 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
|
|
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
|
- max_tr.buffer = ring_buffer_alloc(ring_buf_size,
|
|
|
- TRACE_BUFFER_FLAGS);
|
|
|
+ max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS);
|
|
|
if (!max_tr.buffer) {
|
|
|
printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
|
|
|
WARN_ON(1);
|
|
|
ring_buffer_free(global_trace.buffer);
|
|
|
goto out_free_cpumask;
|
|
|
}
|
|
|
- max_tr.entries = ring_buffer_size(max_tr.buffer);
|
|
|
- WARN_ON(max_tr.entries != global_trace.entries);
|
|
|
+ max_tr.entries = 1;
|
|
|
#endif
|
|
|
|
|
|
/* Allocate the first page for all buffers */
|