|
@@ -2676,7 +2676,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
|
{
|
|
|
unsigned long val;
|
|
|
char buf[64];
|
|
|
- int ret;
|
|
|
+ int ret, cpu;
|
|
|
struct trace_array *tr = filp->private_data;
|
|
|
|
|
|
if (cnt >= sizeof(buf))
|
|
@@ -2704,6 +2704,14 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ /* disable all cpu buffers */
|
|
|
+ for_each_tracing_cpu(cpu) {
|
|
|
+ if (global_trace.data[cpu])
|
|
|
+ atomic_inc(&global_trace.data[cpu]->disabled);
|
|
|
+ if (max_tr.data[cpu])
|
|
|
+ atomic_inc(&max_tr.data[cpu]->disabled);
|
|
|
+ }
|
|
|
+
|
|
|
if (val != global_trace.entries) {
|
|
|
ret = ring_buffer_resize(global_trace.buffer, val);
|
|
|
if (ret < 0) {
|
|
@@ -2735,6 +2743,13 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
|
|
|
if (tracing_disabled)
|
|
|
cnt = -ENOMEM;
|
|
|
out:
|
|
|
+ for_each_tracing_cpu(cpu) {
|
|
|
+ if (global_trace.data[cpu])
|
|
|
+ atomic_dec(&global_trace.data[cpu]->disabled);
|
|
|
+ if (max_tr.data[cpu])
|
|
|
+ atomic_dec(&max_tr.data[cpu]->disabled);
|
|
|
+ }
|
|
|
+
|
|
|
max_tr.entries = global_trace.entries;
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|