|
@@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
|
|
|
mutex_unlock(&event_mutex);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Open and update trace_array ref count.
|
|
|
- * Must have the current trace_array passed to it.
|
|
|
- */
|
|
|
-static int tracing_open_generic_file(struct inode *inode, struct file *filp)
|
|
|
+static void remove_subsystem(struct ftrace_subsystem_dir *dir)
|
|
|
{
|
|
|
- struct ftrace_event_file *file = inode->i_private;
|
|
|
- struct trace_array *tr = file->tr;
|
|
|
- int ret;
|
|
|
+ if (!dir)
|
|
|
+ return;
|
|
|
|
|
|
- if (trace_array_get(tr) < 0)
|
|
|
- return -ENODEV;
|
|
|
+ if (!--dir->nr_events) {
|
|
|
+ debugfs_remove_recursive(dir->entry);
|
|
|
+ list_del(&dir->list);
|
|
|
+ __put_system_dir(dir);
|
|
|
+ }
|
|
|
+}
|
|
|
|
|
|
- ret = tracing_open_generic(inode, filp);
|
|
|
- if (ret < 0)
|
|
|
- trace_array_put(tr);
|
|
|
- return ret;
|
|
|
+static void *event_file_data(struct file *filp)
|
|
|
+{
|
|
|
+ return ACCESS_ONCE(file_inode(filp)->i_private);
|
|
|
}
|
|
|
|
|
|
-static int tracing_release_generic_file(struct inode *inode, struct file *filp)
|
|
|
+static void remove_event_file_dir(struct ftrace_event_file *file)
|
|
|
{
|
|
|
- struct ftrace_event_file *file = inode->i_private;
|
|
|
- struct trace_array *tr = file->tr;
|
|
|
+ struct dentry *dir = file->dir;
|
|
|
+ struct dentry *child;
|
|
|
|
|
|
- trace_array_put(tr);
|
|
|
+ if (dir) {
|
|
|
+ spin_lock(&dir->d_lock); /* probably unneeded */
|
|
|
+ list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
|
|
|
+ if (child->d_inode) /* probably unneeded */
|
|
|
+ child->d_inode->i_private = NULL;
|
|
|
+ }
|
|
|
+ spin_unlock(&dir->d_lock);
|
|
|
|
|
|
- return 0;
|
|
|
+ debugfs_remove_recursive(dir);
|
|
|
+ }
|
|
|
+
|
|
|
+ list_del(&file->list);
|
|
|
+ remove_subsystem(file->system);
|
|
|
+ kmem_cache_free(file_cachep, file);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -679,15 +688,25 @@ static ssize_t
|
|
|
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
loff_t *ppos)
|
|
|
{
|
|
|
- struct ftrace_event_file *file = filp->private_data;
|
|
|
+ struct ftrace_event_file *file;
|
|
|
+ unsigned long flags;
|
|
|
char buf[4] = "0";
|
|
|
|
|
|
- if (file->flags & FTRACE_EVENT_FL_ENABLED &&
|
|
|
- !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
|
|
|
+ mutex_lock(&event_mutex);
|
|
|
+ file = event_file_data(filp);
|
|
|
+ if (likely(file))
|
|
|
+ flags = file->flags;
|
|
|
+ mutex_unlock(&event_mutex);
|
|
|
+
|
|
|
+ if (!file)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ if (flags & FTRACE_EVENT_FL_ENABLED &&
|
|
|
+ !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
|
|
|
strcpy(buf, "1");
|
|
|
|
|
|
- if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
|
|
|
- file->flags & FTRACE_EVENT_FL_SOFT_MODE)
|
|
|
+ if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
|
|
|
+ flags & FTRACE_EVENT_FL_SOFT_MODE)
|
|
|
strcat(buf, "*");
|
|
|
|
|
|
strcat(buf, "\n");
|
|
@@ -699,13 +718,10 @@ static ssize_t
|
|
|
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
loff_t *ppos)
|
|
|
{
|
|
|
- struct ftrace_event_file *file = filp->private_data;
|
|
|
+ struct ftrace_event_file *file;
|
|
|
unsigned long val;
|
|
|
int ret;
|
|
|
|
|
|
- if (!file)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
if (ret)
|
|
|
return ret;
|
|
@@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
switch (val) {
|
|
|
case 0:
|
|
|
case 1:
|
|
|
+ ret = -ENODEV;
|
|
|
mutex_lock(&event_mutex);
|
|
|
- ret = ftrace_event_enable_disable(file, val);
|
|
|
+ file = event_file_data(filp);
|
|
|
+ if (likely(file))
|
|
|
+ ret = ftrace_event_enable_disable(file, val);
|
|
|
mutex_unlock(&event_mutex);
|
|
|
break;
|
|
|
|
|
@@ -825,7 +844,7 @@ enum {
|
|
|
|
|
|
static void *f_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
{
|
|
|
- struct ftrace_event_call *call = m->private;
|
|
|
+ struct ftrace_event_call *call = event_file_data(m->private);
|
|
|
struct list_head *common_head = &ftrace_common_fields;
|
|
|
struct list_head *head = trace_get_fields(call);
|
|
|
struct list_head *node = v;
|
|
@@ -857,7 +876,7 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
|
|
|
|
|
|
static int f_show(struct seq_file *m, void *v)
|
|
|
{
|
|
|
- struct ftrace_event_call *call = m->private;
|
|
|
+ struct ftrace_event_call *call = event_file_data(m->private);
|
|
|
struct ftrace_event_field *field;
|
|
|
const char *array_descriptor;
|
|
|
|
|
@@ -910,6 +929,11 @@ static void *f_start(struct seq_file *m, loff_t *pos)
|
|
|
void *p = (void *)FORMAT_HEADER;
|
|
|
loff_t l = 0;
|
|
|
|
|
|
+ /* ->stop() is called even if ->start() fails */
|
|
|
+ mutex_lock(&event_mutex);
|
|
|
+ if (!event_file_data(m->private))
|
|
|
+ return ERR_PTR(-ENODEV);
|
|
|
+
|
|
|
while (l < *pos && p)
|
|
|
p = f_next(m, p, &l);
|
|
|
|
|
@@ -918,6 +942,7 @@ static void *f_start(struct seq_file *m, loff_t *pos)
|
|
|
|
|
|
static void f_stop(struct seq_file *m, void *p)
|
|
|
{
|
|
|
+ mutex_unlock(&event_mutex);
|
|
|
}
|
|
|
|
|
|
static const struct seq_operations trace_format_seq_ops = {
|
|
@@ -929,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = {
|
|
|
|
|
|
static int trace_format_open(struct inode *inode, struct file *file)
|
|
|
{
|
|
|
- struct ftrace_event_call *call = inode->i_private;
|
|
|
struct seq_file *m;
|
|
|
int ret;
|
|
|
|
|
@@ -938,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
|
|
|
return ret;
|
|
|
|
|
|
m = file->private_data;
|
|
|
- m->private = call;
|
|
|
+ m->private = file;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -946,14 +970,18 @@ static int trace_format_open(struct inode *inode, struct file *file)
|
|
|
static ssize_t
|
|
|
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
|
|
|
{
|
|
|
- struct ftrace_event_call *call = filp->private_data;
|
|
|
+ int id = (long)event_file_data(filp);
|
|
|
char buf[32];
|
|
|
int len;
|
|
|
|
|
|
if (*ppos)
|
|
|
return 0;
|
|
|
|
|
|
- len = sprintf(buf, "%d\n", call->event.type);
|
|
|
+ if (unlikely(!id))
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ len = sprintf(buf, "%d\n", id);
|
|
|
+
|
|
|
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
|
|
|
}
|
|
|
|
|
@@ -961,21 +989,28 @@ static ssize_t
|
|
|
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
|
|
|
loff_t *ppos)
|
|
|
{
|
|
|
- struct ftrace_event_call *call = filp->private_data;
|
|
|
+ struct ftrace_event_call *call;
|
|
|
struct trace_seq *s;
|
|
|
- int r;
|
|
|
+ int r = -ENODEV;
|
|
|
|
|
|
if (*ppos)
|
|
|
return 0;
|
|
|
|
|
|
s = kmalloc(sizeof(*s), GFP_KERNEL);
|
|
|
+
|
|
|
if (!s)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
trace_seq_init(s);
|
|
|
|
|
|
- print_event_filter(call, s);
|
|
|
- r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
|
|
|
+ mutex_lock(&event_mutex);
|
|
|
+ call = event_file_data(filp);
|
|
|
+ if (call)
|
|
|
+ print_event_filter(call, s);
|
|
|
+ mutex_unlock(&event_mutex);
|
|
|
+
|
|
|
+ if (call)
|
|
|
+ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
|
|
|
|
|
|
kfree(s);
|
|
|
|
|
@@ -986,9 +1021,9 @@ static ssize_t
|
|
|
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
loff_t *ppos)
|
|
|
{
|
|
|
- struct ftrace_event_call *call = filp->private_data;
|
|
|
+ struct ftrace_event_call *call;
|
|
|
char *buf;
|
|
|
- int err;
|
|
|
+ int err = -ENODEV;
|
|
|
|
|
|
if (cnt >= PAGE_SIZE)
|
|
|
return -EINVAL;
|
|
@@ -1003,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
|
|
|
}
|
|
|
buf[cnt] = '\0';
|
|
|
|
|
|
- err = apply_event_filter(call, buf);
|
|
|
+ mutex_lock(&event_mutex);
|
|
|
+ call = event_file_data(filp);
|
|
|
+ if (call)
|
|
|
+ err = apply_event_filter(call, buf);
|
|
|
+ mutex_unlock(&event_mutex);
|
|
|
+
|
|
|
free_page((unsigned long) buf);
|
|
|
if (err < 0)
|
|
|
return err;
|
|
@@ -1225,10 +1265,9 @@ static const struct file_operations ftrace_set_event_fops = {
|
|
|
};
|
|
|
|
|
|
static const struct file_operations ftrace_enable_fops = {
|
|
|
- .open = tracing_open_generic_file,
|
|
|
+ .open = tracing_open_generic,
|
|
|
.read = event_enable_read,
|
|
|
.write = event_enable_write,
|
|
|
- .release = tracing_release_generic_file,
|
|
|
.llseek = default_llseek,
|
|
|
};
|
|
|
|
|
@@ -1240,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = {
|
|
|
};
|
|
|
|
|
|
static const struct file_operations ftrace_event_id_fops = {
|
|
|
- .open = tracing_open_generic,
|
|
|
.read = event_id_read,
|
|
|
.llseek = default_llseek,
|
|
|
};
|
|
@@ -1488,8 +1526,8 @@ event_create_dir(struct dentry *parent,
|
|
|
|
|
|
#ifdef CONFIG_PERF_EVENTS
|
|
|
if (call->event.type && call->class->reg)
|
|
|
- trace_create_file("id", 0444, file->dir, call,
|
|
|
- id);
|
|
|
+ trace_create_file("id", 0444, file->dir,
|
|
|
+ (void *)(long)call->event.type, id);
|
|
|
#endif
|
|
|
|
|
|
/*
|
|
@@ -1514,33 +1552,16 @@ event_create_dir(struct dentry *parent,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
|
|
|
-{
|
|
|
- if (!dir)
|
|
|
- return;
|
|
|
-
|
|
|
- if (!--dir->nr_events) {
|
|
|
- debugfs_remove_recursive(dir->entry);
|
|
|
- list_del(&dir->list);
|
|
|
- __put_system_dir(dir);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
static void remove_event_from_tracers(struct ftrace_event_call *call)
|
|
|
{
|
|
|
struct ftrace_event_file *file;
|
|
|
struct trace_array *tr;
|
|
|
|
|
|
do_for_each_event_file_safe(tr, file) {
|
|
|
-
|
|
|
if (file->event_call != call)
|
|
|
continue;
|
|
|
|
|
|
- list_del(&file->list);
|
|
|
- debugfs_remove_recursive(file->dir);
|
|
|
- remove_subsystem(file->system);
|
|
|
- kmem_cache_free(file_cachep, file);
|
|
|
-
|
|
|
+ remove_event_file_dir(file);
|
|
|
/*
|
|
|
* The do_for_each_event_file_safe() is
|
|
|
* a double loop. After finding the call for this
|
|
@@ -1692,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
|
|
|
destroy_preds(call);
|
|
|
}
|
|
|
|
|
|
+static int probe_remove_event_call(struct ftrace_event_call *call)
|
|
|
+{
|
|
|
+ struct trace_array *tr;
|
|
|
+ struct ftrace_event_file *file;
|
|
|
+
|
|
|
+#ifdef CONFIG_PERF_EVENTS
|
|
|
+ if (call->perf_refcount)
|
|
|
+ return -EBUSY;
|
|
|
+#endif
|
|
|
+ do_for_each_event_file(tr, file) {
|
|
|
+ if (file->event_call != call)
|
|
|
+ continue;
|
|
|
+ /*
|
|
|
+ * We can't rely on ftrace_event_enable_disable(enable => 0)
|
|
|
+ * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
|
|
|
+ * TRACE_REG_UNREGISTER.
|
|
|
+ */
|
|
|
+ if (file->flags & FTRACE_EVENT_FL_ENABLED)
|
|
|
+ return -EBUSY;
|
|
|
+ /*
|
|
|
+ * The do_for_each_event_file_safe() is
|
|
|
+ * a double loop. After finding the call for this
|
|
|
+ * trace_array, we use break to jump to the next
|
|
|
+ * trace_array.
|
|
|
+ */
|
|
|
+ break;
|
|
|
+ } while_for_each_event_file();
|
|
|
+
|
|
|
+ __trace_remove_event_call(call);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/* Remove an event_call */
|
|
|
-void trace_remove_event_call(struct ftrace_event_call *call)
|
|
|
+int trace_remove_event_call(struct ftrace_event_call *call)
|
|
|
{
|
|
|
+ int ret;
|
|
|
+
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
mutex_lock(&event_mutex);
|
|
|
down_write(&trace_event_sem);
|
|
|
- __trace_remove_event_call(call);
|
|
|
+ ret = probe_remove_event_call(call);
|
|
|
up_write(&trace_event_sem);
|
|
|
mutex_unlock(&event_mutex);
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
+
|
|
|
+ return ret;
|
|
|
}
|
|
|
|
|
|
#define for_each_event(event, start, end) \
|
|
@@ -2270,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
|
|
|
{
|
|
|
struct ftrace_event_file *file, *next;
|
|
|
|
|
|
- list_for_each_entry_safe(file, next, &tr->events, list) {
|
|
|
- list_del(&file->list);
|
|
|
- debugfs_remove_recursive(file->dir);
|
|
|
- remove_subsystem(file->system);
|
|
|
- kmem_cache_free(file_cachep, file);
|
|
|
- }
|
|
|
+ list_for_each_entry_safe(file, next, &tr->events, list)
|
|
|
+ remove_event_file_dir(file);
|
|
|
}
|
|
|
|
|
|
static void
|