|
@@ -313,7 +313,6 @@ static const char *trace_options[] = {
|
|
|
"bin",
|
|
|
"block",
|
|
|
"stacktrace",
|
|
|
- "sched-tree",
|
|
|
"trace_printk",
|
|
|
"ftrace_preempt",
|
|
|
"branch",
|
|
@@ -1151,6 +1150,22 @@ void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
|
|
|
__ftrace_trace_stack(tr->buffer, flags, skip, pc);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * trace_dump_stack - record a stack back trace in the trace buffer
|
|
|
+ */
|
|
|
+void trace_dump_stack(void)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ if (tracing_disabled || tracing_selftest_running)
|
|
|
+ return;
|
|
|
+
|
|
|
+ local_save_flags(flags);
|
|
|
+
|
|
|
+ /* skipping 3 traces, seems to get us at the caller of this function */
|
|
|
+ __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
|
|
|
+}
|
|
|
+
|
|
|
void
|
|
|
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
|
{
|
|
@@ -2316,67 +2331,49 @@ static const struct file_operations tracing_cpumask_fops = {
|
|
|
.write = tracing_cpumask_write,
|
|
|
};
|
|
|
|
|
|
-static ssize_t
|
|
|
-tracing_trace_options_read(struct file *filp, char __user *ubuf,
|
|
|
- size_t cnt, loff_t *ppos)
|
|
|
+static int tracing_trace_options_show(struct seq_file *m, void *v)
|
|
|
{
|
|
|
struct tracer_opt *trace_opts;
|
|
|
u32 tracer_flags;
|
|
|
- int len = 0;
|
|
|
- char *buf;
|
|
|
- int r = 0;
|
|
|
int i;
|
|
|
|
|
|
-
|
|
|
- /* calculate max size */
|
|
|
- for (i = 0; trace_options[i]; i++) {
|
|
|
- len += strlen(trace_options[i]);
|
|
|
- len += 3; /* "no" and newline */
|
|
|
- }
|
|
|
-
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
tracer_flags = current_trace->flags->val;
|
|
|
trace_opts = current_trace->flags->opts;
|
|
|
|
|
|
- /*
|
|
|
- * Increase the size with names of options specific
|
|
|
- * of the current tracer.
|
|
|
- */
|
|
|
- for (i = 0; trace_opts[i].name; i++) {
|
|
|
- len += strlen(trace_opts[i].name);
|
|
|
- len += 3; /* "no" and newline */
|
|
|
- }
|
|
|
-
|
|
|
- /* +1 for \0 */
|
|
|
- buf = kmalloc(len + 1, GFP_KERNEL);
|
|
|
- if (!buf) {
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
-
|
|
|
for (i = 0; trace_options[i]; i++) {
|
|
|
if (trace_flags & (1 << i))
|
|
|
- r += sprintf(buf + r, "%s\n", trace_options[i]);
|
|
|
+ seq_printf(m, "%s\n", trace_options[i]);
|
|
|
else
|
|
|
- r += sprintf(buf + r, "no%s\n", trace_options[i]);
|
|
|
+ seq_printf(m, "no%s\n", trace_options[i]);
|
|
|
}
|
|
|
|
|
|
for (i = 0; trace_opts[i].name; i++) {
|
|
|
if (tracer_flags & trace_opts[i].bit)
|
|
|
- r += sprintf(buf + r, "%s\n",
|
|
|
- trace_opts[i].name);
|
|
|
+ seq_printf(m, "%s\n", trace_opts[i].name);
|
|
|
else
|
|
|
- r += sprintf(buf + r, "no%s\n",
|
|
|
- trace_opts[i].name);
|
|
|
+ seq_printf(m, "no%s\n", trace_opts[i].name);
|
|
|
}
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
- WARN_ON(r >= len + 1);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
|
|
|
- r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
+static int __set_tracer_option(struct tracer *trace,
|
|
|
+ struct tracer_flags *tracer_flags,
|
|
|
+ struct tracer_opt *opts, int neg)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
|
|
|
- kfree(buf);
|
|
|
- return r;
|
|
|
+ ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (neg)
|
|
|
+ tracer_flags->val &= ~opts->bit;
|
|
|
+ else
|
|
|
+ tracer_flags->val |= opts->bit;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/* Try to assign a tracer specific option */
|
|
@@ -2384,33 +2381,17 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
|
|
|
{
|
|
|
struct tracer_flags *tracer_flags = trace->flags;
|
|
|
struct tracer_opt *opts = NULL;
|
|
|
- int ret = 0, i = 0;
|
|
|
- int len;
|
|
|
+ int i;
|
|
|
|
|
|
for (i = 0; tracer_flags->opts[i].name; i++) {
|
|
|
opts = &tracer_flags->opts[i];
|
|
|
- len = strlen(opts->name);
|
|
|
|
|
|
- if (strncmp(cmp, opts->name, len) == 0) {
|
|
|
- ret = trace->set_flag(tracer_flags->val,
|
|
|
- opts->bit, !neg);
|
|
|
- break;
|
|
|
- }
|
|
|
+ if (strcmp(cmp, opts->name) == 0)
|
|
|
+ return __set_tracer_option(trace, trace->flags,
|
|
|
+ opts, neg);
|
|
|
}
|
|
|
- /* Not found */
|
|
|
- if (!tracer_flags->opts[i].name)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- /* Refused to handle */
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
- if (neg)
|
|
|
- tracer_flags->val &= ~opts->bit;
|
|
|
- else
|
|
|
- tracer_flags->val |= opts->bit;
|
|
|
|
|
|
- return 0;
|
|
|
+ return -EINVAL;
|
|
|
}
|
|
|
|
|
|
static void set_tracer_flags(unsigned int mask, int enabled)
|
|
@@ -2430,7 +2411,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
|
size_t cnt, loff_t *ppos)
|
|
|
{
|
|
|
char buf[64];
|
|
|
- char *cmp = buf;
|
|
|
+ char *cmp;
|
|
|
int neg = 0;
|
|
|
int ret;
|
|
|
int i;
|
|
@@ -2442,16 +2423,15 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
|
return -EFAULT;
|
|
|
|
|
|
buf[cnt] = 0;
|
|
|
+ cmp = strstrip(buf);
|
|
|
|
|
|
- if (strncmp(buf, "no", 2) == 0) {
|
|
|
+ if (strncmp(cmp, "no", 2) == 0) {
|
|
|
neg = 1;
|
|
|
cmp += 2;
|
|
|
}
|
|
|
|
|
|
for (i = 0; trace_options[i]; i++) {
|
|
|
- int len = strlen(trace_options[i]);
|
|
|
-
|
|
|
- if (strncmp(cmp, trace_options[i], len) == 0) {
|
|
|
+ if (strcmp(cmp, trace_options[i]) == 0) {
|
|
|
set_tracer_flags(1 << i, !neg);
|
|
|
break;
|
|
|
}
|
|
@@ -2471,9 +2451,18 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
|
return cnt;
|
|
|
}
|
|
|
|
|
|
+static int tracing_trace_options_open(struct inode *inode, struct file *file)
|
|
|
+{
|
|
|
+ if (tracing_disabled)
|
|
|
+ return -ENODEV;
|
|
|
+ return single_open(file, tracing_trace_options_show, NULL);
|
|
|
+}
|
|
|
+
|
|
|
static const struct file_operations tracing_iter_fops = {
|
|
|
- .open = tracing_open_generic,
|
|
|
- .read = tracing_trace_options_read,
|
|
|
+ .open = tracing_trace_options_open,
|
|
|
+ .read = seq_read,
|
|
|
+ .llseek = seq_lseek,
|
|
|
+ .release = single_release,
|
|
|
.write = tracing_trace_options_write,
|
|
|
};
|
|
|
|
|
@@ -3392,21 +3381,18 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
|
|
|
return cnt;
|
|
|
}
|
|
|
|
|
|
-static ssize_t tracing_clock_read(struct file *filp, char __user *ubuf,
|
|
|
- size_t cnt, loff_t *ppos)
|
|
|
+static int tracing_clock_show(struct seq_file *m, void *v)
|
|
|
{
|
|
|
- char buf[64];
|
|
|
- int bufiter = 0;
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
|
|
|
- bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter,
|
|
|
+ seq_printf(m,
|
|
|
"%s%s%s%s", i ? " " : "",
|
|
|
i == trace_clock_id ? "[" : "", trace_clocks[i].name,
|
|
|
i == trace_clock_id ? "]" : "");
|
|
|
- bufiter += snprintf(buf + bufiter, sizeof(buf) - bufiter, "\n");
|
|
|
+ seq_putc(m, '\n');
|
|
|
|
|
|
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, bufiter);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
|
|
@@ -3448,6 +3434,13 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
|
|
|
return cnt;
|
|
|
}
|
|
|
|
|
|
+static int tracing_clock_open(struct inode *inode, struct file *file)
|
|
|
+{
|
|
|
+ if (tracing_disabled)
|
|
|
+ return -ENODEV;
|
|
|
+ return single_open(file, tracing_clock_show, NULL);
|
|
|
+}
|
|
|
+
|
|
|
static const struct file_operations tracing_max_lat_fops = {
|
|
|
.open = tracing_open_generic,
|
|
|
.read = tracing_max_lat_read,
|
|
@@ -3486,8 +3479,10 @@ static const struct file_operations tracing_mark_fops = {
|
|
|
};
|
|
|
|
|
|
static const struct file_operations trace_clock_fops = {
|
|
|
- .open = tracing_open_generic,
|
|
|
- .read = tracing_clock_read,
|
|
|
+ .open = tracing_clock_open,
|
|
|
+ .read = seq_read,
|
|
|
+ .llseek = seq_lseek,
|
|
|
+ .release = single_release,
|
|
|
.write = tracing_clock_write,
|
|
|
};
|
|
|
|
|
@@ -3948,39 +3943,16 @@ trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
if (ret < 0)
|
|
|
return ret;
|
|
|
|
|
|
- ret = 0;
|
|
|
- switch (val) {
|
|
|
- case 0:
|
|
|
- /* do nothing if already cleared */
|
|
|
- if (!(topt->flags->val & topt->opt->bit))
|
|
|
- break;
|
|
|
-
|
|
|
- mutex_lock(&trace_types_lock);
|
|
|
- if (current_trace->set_flag)
|
|
|
- ret = current_trace->set_flag(topt->flags->val,
|
|
|
- topt->opt->bit, 0);
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
- topt->flags->val &= ~topt->opt->bit;
|
|
|
- break;
|
|
|
- case 1:
|
|
|
- /* do nothing if already set */
|
|
|
- if (topt->flags->val & topt->opt->bit)
|
|
|
- break;
|
|
|
+ if (val != 0 && val != 1)
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
+ if (!!(topt->flags->val & topt->opt->bit) != val) {
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
- if (current_trace->set_flag)
|
|
|
- ret = current_trace->set_flag(topt->flags->val,
|
|
|
- topt->opt->bit, 1);
|
|
|
+ ret = __set_tracer_option(current_trace, topt->flags,
|
|
|
+ topt->opt, val);
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
- topt->flags->val |= topt->opt->bit;
|
|
|
- break;
|
|
|
-
|
|
|
- default:
|
|
|
- return -EINVAL;
|
|
|
}
|
|
|
|
|
|
*ppos += cnt;
|