|
@@ -1294,20 +1294,32 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
return ent;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * No necessary locking here. The worst thing which can
|
|
|
+ * happen is loosing events consumed at the same time
|
|
|
+ * by a trace_pipe reader.
|
|
|
+ * Other than that, we don't risk to crash the ring buffer
|
|
|
+ * because it serializes the readers.
|
|
|
+ *
|
|
|
+ * The current tracer is copied to avoid a global locking
|
|
|
+ * all around.
|
|
|
+ */
|
|
|
static void *s_start(struct seq_file *m, loff_t *pos)
|
|
|
{
|
|
|
struct trace_iterator *iter = m->private;
|
|
|
+ static struct tracer *old_tracer;
|
|
|
int cpu_file = iter->cpu_file;
|
|
|
void *p = NULL;
|
|
|
loff_t l = 0;
|
|
|
int cpu;
|
|
|
|
|
|
+ /* copy the tracer to avoid using a global lock all around */
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
-
|
|
|
- if (!current_trace || current_trace != iter->trace) {
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
- return NULL;
|
|
|
+ if (unlikely(old_tracer != current_trace && current_trace)) {
|
|
|
+ old_tracer = current_trace;
|
|
|
+ *iter->trace = *current_trace;
|
|
|
}
|
|
|
+ mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
atomic_inc(&trace_record_cmdline_disabled);
|
|
|
|
|
@@ -1341,7 +1353,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
|
|
|
static void s_stop(struct seq_file *m, void *p)
|
|
|
{
|
|
|
atomic_dec(&trace_record_cmdline_disabled);
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
}
|
|
|
|
|
|
static void print_lat_help_header(struct seq_file *m)
|
|
@@ -1691,13 +1702,25 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * We make a copy of the current tracer to avoid concurrent
|
|
|
+ * changes on it while we are reading.
|
|
|
+ */
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
+ iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
|
|
|
+ if (!iter->trace) {
|
|
|
+ *ret = -ENOMEM;
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+ if (current_trace)
|
|
|
+ *iter->trace = *current_trace;
|
|
|
+
|
|
|
if (current_trace && current_trace->print_max)
|
|
|
iter->tr = &max_tr;
|
|
|
else
|
|
|
iter->tr = &global_trace;
|
|
|
- iter->trace = current_trace;
|
|
|
iter->pos = -1;
|
|
|
+ mutex_init(&iter->mutex);
|
|
|
iter->cpu_file = cpu_file;
|
|
|
|
|
|
/* Notify the tracer early; before we stop tracing. */
|
|
@@ -1747,8 +1770,9 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
|
|
|
if (iter->buffer_iter[cpu])
|
|
|
ring_buffer_read_finish(iter->buffer_iter[cpu]);
|
|
|
}
|
|
|
-fail:
|
|
|
+ fail:
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
+ kfree(iter->trace);
|
|
|
kfree(iter);
|
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
@@ -1783,6 +1807,8 @@ static int tracing_release(struct inode *inode, struct file *file)
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
seq_release(inode, file);
|
|
|
+ mutex_destroy(&iter->mutex);
|
|
|
+ kfree(iter->trace);
|
|
|
kfree(iter);
|
|
|
return 0;
|
|
|
}
|
|
@@ -2392,10 +2418,21 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * We make a copy of the current tracer to avoid concurrent
|
|
|
+ * changes on it while we are reading.
|
|
|
+ */
|
|
|
+ iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
|
|
|
+ if (!iter->trace) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+ if (current_trace)
|
|
|
+ *iter->trace = *current_trace;
|
|
|
+
|
|
|
if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
|
|
|
- kfree(iter);
|
|
|
ret = -ENOMEM;
|
|
|
- goto out;
|
|
|
+ goto fail;
|
|
|
}
|
|
|
|
|
|
/* trace pipe does not show start of buffer */
|
|
@@ -2403,7 +2440,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
|
|
|
|
iter->cpu_file = cpu_file;
|
|
|
iter->tr = &global_trace;
|
|
|
- iter->trace = current_trace;
|
|
|
+ mutex_init(&iter->mutex);
|
|
|
filp->private_data = iter;
|
|
|
|
|
|
if (iter->trace->pipe_open)
|
|
@@ -2412,6 +2449,12 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
|
|
|
out:
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
return ret;
|
|
|
+
|
|
|
+fail:
|
|
|
+ kfree(iter->trace);
|
|
|
+ kfree(iter);
|
|
|
+ mutex_unlock(&trace_types_lock);
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
static int tracing_release_pipe(struct inode *inode, struct file *file)
|
|
@@ -2428,6 +2471,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
free_cpumask_var(iter->started);
|
|
|
+ mutex_destroy(&iter->mutex);
|
|
|
+ kfree(iter->trace);
|
|
|
kfree(iter);
|
|
|
|
|
|
return 0;
|
|
@@ -2497,18 +2542,15 @@ static int tracing_wait_pipe(struct file *filp)
|
|
|
return -EAGAIN;
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
+ mutex_unlock(&iter->mutex);
|
|
|
|
|
|
iter->trace->wait_pipe(iter);
|
|
|
|
|
|
- mutex_lock(&trace_types_lock);
|
|
|
+ mutex_lock(&iter->mutex);
|
|
|
|
|
|
if (signal_pending(current))
|
|
|
return -EINTR;
|
|
|
|
|
|
- if (iter->trace != current_trace)
|
|
|
- return 0;
|
|
|
-
|
|
|
/*
|
|
|
* We block until we read something and tracing is disabled.
|
|
|
* We still block if tracing is disabled, but we have never
|
|
@@ -2533,6 +2575,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
{
|
|
|
struct trace_iterator *iter = filp->private_data;
|
|
|
+ static struct tracer *old_tracer;
|
|
|
ssize_t sret;
|
|
|
|
|
|
/* return any leftover data */
|
|
@@ -2542,7 +2585,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
|
|
|
|
|
|
trace_seq_reset(&iter->seq);
|
|
|
|
|
|
+ /* copy the tracer to avoid using a global lock all around */
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
+ if (unlikely(old_tracer != current_trace && current_trace)) {
|
|
|
+ old_tracer = current_trace;
|
|
|
+ *iter->trace = *current_trace;
|
|
|
+ }
|
|
|
+ mutex_unlock(&trace_types_lock);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Avoid more than one consumer on a single file descriptor
|
|
|
+ * This is just a matter of traces coherency, the ring buffer itself
|
|
|
+ * is protected.
|
|
|
+ */
|
|
|
+ mutex_lock(&iter->mutex);
|
|
|
if (iter->trace->read) {
|
|
|
sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
|
|
|
if (sret)
|
|
@@ -2599,7 +2655,7 @@ waitagain:
|
|
|
goto waitagain;
|
|
|
|
|
|
out:
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
+ mutex_unlock(&iter->mutex);
|
|
|
|
|
|
return sret;
|
|
|
}
|
|
@@ -2676,11 +2732,20 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|
|
.ops = &tracing_pipe_buf_ops,
|
|
|
.spd_release = tracing_spd_release_pipe,
|
|
|
};
|
|
|
+ static struct tracer *old_tracer;
|
|
|
ssize_t ret;
|
|
|
size_t rem;
|
|
|
unsigned int i;
|
|
|
|
|
|
+ /* copy the tracer to avoid using a global lock all around */
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
+ if (unlikely(old_tracer != current_trace && current_trace)) {
|
|
|
+ old_tracer = current_trace;
|
|
|
+ *iter->trace = *current_trace;
|
|
|
+ }
|
|
|
+ mutex_unlock(&trace_types_lock);
|
|
|
+
|
|
|
+ mutex_lock(&iter->mutex);
|
|
|
|
|
|
if (iter->trace->splice_read) {
|
|
|
ret = iter->trace->splice_read(iter, filp,
|
|
@@ -2720,14 +2785,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|
|
trace_seq_reset(&iter->seq);
|
|
|
}
|
|
|
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
+ mutex_unlock(&iter->mutex);
|
|
|
|
|
|
spd.nr_pages = i;
|
|
|
|
|
|
return splice_to_pipe(pipe, &spd);
|
|
|
|
|
|
out_err:
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
+ mutex_unlock(&iter->mutex);
|
|
|
|
|
|
return ret;
|
|
|
}
|