|
@@ -19,6 +19,7 @@
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/notifier.h>
|
|
|
#include <linux/irqflags.h>
|
|
|
+#include <linux/irq_work.h>
|
|
|
#include <linux/debugfs.h>
|
|
|
#include <linux/pagemap.h>
|
|
|
#include <linux/hardirq.h>
|
|
@@ -77,6 +78,21 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * To prevent the comm cache from being overwritten when no
|
|
|
+ * tracing is active, only save the comm when a trace event
|
|
|
+ * occurred.
|
|
|
+ */
|
|
|
+static DEFINE_PER_CPU(bool, trace_cmdline_save);
|
|
|
+
|
|
|
+/*
|
|
|
+ * When a reader is waiting for data, then this variable is
|
|
|
+ * set to true.
|
|
|
+ */
|
|
|
+static bool trace_wakeup_needed;
|
|
|
+
|
|
|
+static struct irq_work trace_work_wakeup;
|
|
|
+
|
|
|
/*
|
|
|
* Kill all tracing for good (never come back).
|
|
|
* It is initialized to 1 but will turn to zero if the initialization
|
|
@@ -139,6 +155,18 @@ static int __init set_ftrace_dump_on_oops(char *str)
|
|
|
}
|
|
|
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
|
|
|
|
|
|
+
|
|
|
+static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
|
|
|
+static char *trace_boot_options __initdata;
|
|
|
+
|
|
|
+static int __init set_trace_boot_options(char *str)
|
|
|
+{
|
|
|
+ strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
|
|
|
+ trace_boot_options = trace_boot_options_buf;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+__setup("trace_options=", set_trace_boot_options);
|
|
|
+
|
|
|
unsigned long long ns2usecs(cycle_t nsec)
|
|
|
{
|
|
|
nsec += 500;
|
|
@@ -198,20 +226,9 @@ static struct trace_array max_tr;
|
|
|
|
|
|
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
|
|
|
|
|
|
-/* tracer_enabled is used to toggle activation of a tracer */
|
|
|
-static int tracer_enabled = 1;
|
|
|
-
|
|
|
-/**
|
|
|
- * tracing_is_enabled - return tracer_enabled status
|
|
|
- *
|
|
|
- * This function is used by other tracers to know the status
|
|
|
- * of the tracer_enabled flag. Tracers may use this function
|
|
|
- * to know if it should enable their features when starting
|
|
|
- * up. See irqsoff tracer for an example (start_irqsoff_tracer).
|
|
|
- */
|
|
|
int tracing_is_enabled(void)
|
|
|
{
|
|
|
- return tracer_enabled;
|
|
|
+ return tracing_is_on();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -333,12 +350,18 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
|
|
|
static int trace_stop_count;
|
|
|
static DEFINE_RAW_SPINLOCK(tracing_start_lock);
|
|
|
|
|
|
-static void wakeup_work_handler(struct work_struct *work)
|
|
|
+/**
|
|
|
+ * trace_wake_up - wake up tasks waiting for trace input
|
|
|
+ *
|
|
|
+ * Schedules a delayed work to wake up any task that is blocked on the
|
|
|
+ * trace_wait queue. These is used with trace_poll for tasks polling the
|
|
|
+ * trace.
|
|
|
+ */
|
|
|
+static void trace_wake_up(struct irq_work *work)
|
|
|
{
|
|
|
- wake_up(&trace_wait);
|
|
|
-}
|
|
|
+ wake_up_all(&trace_wait);
|
|
|
|
|
|
-static DECLARE_DELAYED_WORK(wakeup_work, wakeup_work_handler);
|
|
|
+}
|
|
|
|
|
|
/**
|
|
|
* tracing_on - enable tracing buffers
|
|
@@ -393,22 +416,6 @@ int tracing_is_on(void)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(tracing_is_on);
|
|
|
|
|
|
-/**
|
|
|
- * trace_wake_up - wake up tasks waiting for trace input
|
|
|
- *
|
|
|
- * Schedules a delayed work to wake up any task that is blocked on the
|
|
|
- * trace_wait queue. These is used with trace_poll for tasks polling the
|
|
|
- * trace.
|
|
|
- */
|
|
|
-void trace_wake_up(void)
|
|
|
-{
|
|
|
- const unsigned long delay = msecs_to_jiffies(2);
|
|
|
-
|
|
|
- if (trace_flags & TRACE_ITER_BLOCK)
|
|
|
- return;
|
|
|
- schedule_delayed_work(&wakeup_work, delay);
|
|
|
-}
|
|
|
-
|
|
|
static int __init set_buf_size(char *str)
|
|
|
{
|
|
|
unsigned long buf_size;
|
|
@@ -431,7 +438,7 @@ static int __init set_tracing_thresh(char *str)
|
|
|
|
|
|
if (!str)
|
|
|
return 0;
|
|
|
- ret = strict_strtoul(str, 0, &threshold);
|
|
|
+ ret = kstrtoul(str, 0, &threshold);
|
|
|
if (ret < 0)
|
|
|
return 0;
|
|
|
tracing_thresh = threshold * 1000;
|
|
@@ -757,6 +764,40 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
|
|
}
|
|
|
#endif /* CONFIG_TRACER_MAX_TRACE */
|
|
|
|
|
|
+static void default_wait_pipe(struct trace_iterator *iter)
|
|
|
+{
|
|
|
+ DEFINE_WAIT(wait);
|
|
|
+
|
|
|
+ prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The events can happen in critical sections where
|
|
|
+ * checking a work queue can cause deadlocks.
|
|
|
+ * After adding a task to the queue, this flag is set
|
|
|
+ * only to notify events to try to wake up the queue
|
|
|
+ * using irq_work.
|
|
|
+ *
|
|
|
+ * We don't clear it even if the buffer is no longer
|
|
|
+ * empty. The flag only causes the next event to run
|
|
|
+ * irq_work to do the work queue wake up. The worse
|
|
|
+ * that can happen if we race with !trace_empty() is that
|
|
|
+ * an event will cause an irq_work to try to wake up
|
|
|
+ * an empty queue.
|
|
|
+ *
|
|
|
+ * There's no reason to protect this flag either, as
|
|
|
+ * the work queue and irq_work logic will do the necessary
|
|
|
+ * synchronization for the wake ups. The only thing
|
|
|
+ * that is necessary is that the wake up happens after
|
|
|
+ * a task has been queued. It's OK for spurious wake ups.
|
|
|
+ */
|
|
|
+ trace_wakeup_needed = true;
|
|
|
+
|
|
|
+ if (trace_empty(iter))
|
|
|
+ schedule();
|
|
|
+
|
|
|
+ finish_wait(&trace_wait, &wait);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* register_tracer - register a tracer with the ftrace system.
|
|
|
* @type - the plugin for the tracer
|
|
@@ -875,32 +916,6 @@ int register_tracer(struct tracer *type)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-void unregister_tracer(struct tracer *type)
|
|
|
-{
|
|
|
- struct tracer **t;
|
|
|
-
|
|
|
- mutex_lock(&trace_types_lock);
|
|
|
- for (t = &trace_types; *t; t = &(*t)->next) {
|
|
|
- if (*t == type)
|
|
|
- goto found;
|
|
|
- }
|
|
|
- pr_info("Tracer %s not registered\n", type->name);
|
|
|
- goto out;
|
|
|
-
|
|
|
- found:
|
|
|
- *t = (*t)->next;
|
|
|
-
|
|
|
- if (type == current_trace && tracer_enabled) {
|
|
|
- tracer_enabled = 0;
|
|
|
- tracing_stop();
|
|
|
- if (current_trace->stop)
|
|
|
- current_trace->stop(&global_trace);
|
|
|
- current_trace = &nop_trace;
|
|
|
- }
|
|
|
-out:
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
-}
|
|
|
-
|
|
|
void tracing_reset(struct trace_array *tr, int cpu)
|
|
|
{
|
|
|
struct ring_buffer *buffer = tr->buffer;
|
|
@@ -1131,10 +1146,14 @@ void trace_find_cmdline(int pid, char comm[])
|
|
|
|
|
|
void tracing_record_cmdline(struct task_struct *tsk)
|
|
|
{
|
|
|
- if (atomic_read(&trace_record_cmdline_disabled) || !tracer_enabled ||
|
|
|
- !tracing_is_on())
|
|
|
+ if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!__this_cpu_read(trace_cmdline_save))
|
|
|
return;
|
|
|
|
|
|
+ __this_cpu_write(trace_cmdline_save, false);
|
|
|
+
|
|
|
trace_save_cmdline(tsk);
|
|
|
}
|
|
|
|
|
@@ -1178,27 +1197,36 @@ trace_buffer_lock_reserve(struct ring_buffer *buffer,
|
|
|
return event;
|
|
|
}
|
|
|
|
|
|
+void
|
|
|
+__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
|
|
|
+{
|
|
|
+ __this_cpu_write(trace_cmdline_save, true);
|
|
|
+ if (trace_wakeup_needed) {
|
|
|
+ trace_wakeup_needed = false;
|
|
|
+ /* irq_work_queue() supplies it's own memory barriers */
|
|
|
+ irq_work_queue(&trace_work_wakeup);
|
|
|
+ }
|
|
|
+ ring_buffer_unlock_commit(buffer, event);
|
|
|
+}
|
|
|
+
|
|
|
static inline void
|
|
|
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
struct ring_buffer_event *event,
|
|
|
- unsigned long flags, int pc,
|
|
|
- int wake)
|
|
|
+ unsigned long flags, int pc)
|
|
|
{
|
|
|
- ring_buffer_unlock_commit(buffer, event);
|
|
|
+ __buffer_unlock_commit(buffer, event);
|
|
|
|
|
|
ftrace_trace_stack(buffer, flags, 6, pc);
|
|
|
ftrace_trace_userstack(buffer, flags, pc);
|
|
|
-
|
|
|
- if (wake)
|
|
|
- trace_wake_up();
|
|
|
}
|
|
|
|
|
|
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
struct ring_buffer_event *event,
|
|
|
unsigned long flags, int pc)
|
|
|
{
|
|
|
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
|
|
|
+ __trace_buffer_unlock_commit(buffer, event, flags, pc);
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
|
|
|
|
|
|
struct ring_buffer_event *
|
|
|
trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
|
|
@@ -1215,29 +1243,21 @@ void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
struct ring_buffer_event *event,
|
|
|
unsigned long flags, int pc)
|
|
|
{
|
|
|
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 1);
|
|
|
+ __trace_buffer_unlock_commit(buffer, event, flags, pc);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
|
|
|
|
|
|
-void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer,
|
|
|
- struct ring_buffer_event *event,
|
|
|
- unsigned long flags, int pc)
|
|
|
-{
|
|
|
- __trace_buffer_unlock_commit(buffer, event, flags, pc, 0);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit);
|
|
|
-
|
|
|
-void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer,
|
|
|
- struct ring_buffer_event *event,
|
|
|
- unsigned long flags, int pc,
|
|
|
- struct pt_regs *regs)
|
|
|
+void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
|
|
|
+ struct ring_buffer_event *event,
|
|
|
+ unsigned long flags, int pc,
|
|
|
+ struct pt_regs *regs)
|
|
|
{
|
|
|
- ring_buffer_unlock_commit(buffer, event);
|
|
|
+ __buffer_unlock_commit(buffer, event);
|
|
|
|
|
|
ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
|
|
|
ftrace_trace_userstack(buffer, flags, pc);
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(trace_nowake_buffer_unlock_commit_regs);
|
|
|
+EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
|
|
|
|
|
|
void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
|
|
|
struct ring_buffer_event *event)
|
|
@@ -1269,7 +1289,7 @@ trace_function(struct trace_array *tr,
|
|
|
entry->parent_ip = parent_ip;
|
|
|
|
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
|
- ring_buffer_unlock_commit(buffer, event);
|
|
|
+ __buffer_unlock_commit(buffer, event);
|
|
|
}
|
|
|
|
|
|
void
|
|
@@ -1362,7 +1382,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
|
|
|
entry->size = trace.nr_entries;
|
|
|
|
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
|
- ring_buffer_unlock_commit(buffer, event);
|
|
|
+ __buffer_unlock_commit(buffer, event);
|
|
|
|
|
|
out:
|
|
|
/* Again, don't let gcc optimize things here */
|
|
@@ -1458,7 +1478,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
|
|
|
|
|
|
save_stack_trace_user(&trace);
|
|
|
if (!filter_check_discard(call, entry, buffer, event))
|
|
|
- ring_buffer_unlock_commit(buffer, event);
|
|
|
+ __buffer_unlock_commit(buffer, event);
|
|
|
|
|
|
out_drop_count:
|
|
|
__this_cpu_dec(user_stack_count);
|
|
@@ -1559,10 +1579,10 @@ static int alloc_percpu_trace_buffer(void)
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
+static int buffers_allocated;
|
|
|
+
|
|
|
void trace_printk_init_buffers(void)
|
|
|
{
|
|
|
- static int buffers_allocated;
|
|
|
-
|
|
|
if (buffers_allocated)
|
|
|
return;
|
|
|
|
|
@@ -1571,7 +1591,38 @@ void trace_printk_init_buffers(void)
|
|
|
|
|
|
pr_info("ftrace: Allocated trace_printk buffers\n");
|
|
|
|
|
|
+ /* Expand the buffers to set size */
|
|
|
+ tracing_update_buffers();
|
|
|
+
|
|
|
buffers_allocated = 1;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * trace_printk_init_buffers() can be called by modules.
|
|
|
+ * If that happens, then we need to start cmdline recording
|
|
|
+ * directly here. If the global_trace.buffer is already
|
|
|
+ * allocated here, then this was called by module code.
|
|
|
+ */
|
|
|
+ if (global_trace.buffer)
|
|
|
+ tracing_start_cmdline_record();
|
|
|
+}
|
|
|
+
|
|
|
+void trace_printk_start_comm(void)
|
|
|
+{
|
|
|
+ /* Start tracing comms if trace printk is set */
|
|
|
+ if (!buffers_allocated)
|
|
|
+ return;
|
|
|
+ tracing_start_cmdline_record();
|
|
|
+}
|
|
|
+
|
|
|
+static void trace_printk_start_stop_comm(int enabled)
|
|
|
+{
|
|
|
+ if (!buffers_allocated)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (enabled)
|
|
|
+ tracing_start_cmdline_record();
|
|
|
+ else
|
|
|
+ tracing_stop_cmdline_record();
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -1622,7 +1673,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
|
|
|
|
|
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
|
|
|
if (!filter_check_discard(call, entry, buffer, event)) {
|
|
|
- ring_buffer_unlock_commit(buffer, event);
|
|
|
+ __buffer_unlock_commit(buffer, event);
|
|
|
ftrace_trace_stack(buffer, flags, 6, pc);
|
|
|
}
|
|
|
|
|
@@ -1693,7 +1744,7 @@ int trace_array_vprintk(struct trace_array *tr,
|
|
|
memcpy(&entry->buf, tbuffer, len);
|
|
|
entry->buf[len] = '\0';
|
|
|
if (!filter_check_discard(call, entry, buffer, event)) {
|
|
|
- ring_buffer_unlock_commit(buffer, event);
|
|
|
+ __buffer_unlock_commit(buffer, event);
|
|
|
ftrace_trace_stack(buffer, flags, 6, pc);
|
|
|
}
|
|
|
out:
|
|
@@ -2794,26 +2845,19 @@ static void set_tracer_flags(unsigned int mask, int enabled)
|
|
|
|
|
|
if (mask == TRACE_ITER_OVERWRITE)
|
|
|
ring_buffer_change_overwrite(global_trace.buffer, enabled);
|
|
|
+
|
|
|
+ if (mask == TRACE_ITER_PRINTK)
|
|
|
+ trace_printk_start_stop_comm(enabled);
|
|
|
}
|
|
|
|
|
|
-static ssize_t
|
|
|
-tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
|
- size_t cnt, loff_t *ppos)
|
|
|
+static int trace_set_options(char *option)
|
|
|
{
|
|
|
- char buf[64];
|
|
|
char *cmp;
|
|
|
int neg = 0;
|
|
|
- int ret;
|
|
|
+ int ret = 0;
|
|
|
int i;
|
|
|
|
|
|
- if (cnt >= sizeof(buf))
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- if (copy_from_user(&buf, ubuf, cnt))
|
|
|
- return -EFAULT;
|
|
|
-
|
|
|
- buf[cnt] = 0;
|
|
|
- cmp = strstrip(buf);
|
|
|
+ cmp = strstrip(option);
|
|
|
|
|
|
if (strncmp(cmp, "no", 2) == 0) {
|
|
|
neg = 1;
|
|
@@ -2832,10 +2876,25 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
|
mutex_lock(&trace_types_lock);
|
|
|
ret = set_tracer_option(current_trace, cmp, neg);
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
}
|
|
|
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static ssize_t
|
|
|
+tracing_trace_options_write(struct file *filp, const char __user *ubuf,
|
|
|
+ size_t cnt, loff_t *ppos)
|
|
|
+{
|
|
|
+ char buf[64];
|
|
|
+
|
|
|
+ if (cnt >= sizeof(buf))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (copy_from_user(&buf, ubuf, cnt))
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ trace_set_options(buf);
|
|
|
+
|
|
|
*ppos += cnt;
|
|
|
|
|
|
return cnt;
|
|
@@ -2939,56 +2998,6 @@ static const struct file_operations tracing_saved_cmdlines_fops = {
|
|
|
.llseek = generic_file_llseek,
|
|
|
};
|
|
|
|
|
|
-static ssize_t
|
|
|
-tracing_ctrl_read(struct file *filp, char __user *ubuf,
|
|
|
- size_t cnt, loff_t *ppos)
|
|
|
-{
|
|
|
- char buf[64];
|
|
|
- int r;
|
|
|
-
|
|
|
- r = sprintf(buf, "%u\n", tracer_enabled);
|
|
|
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
|
|
|
-}
|
|
|
-
|
|
|
-static ssize_t
|
|
|
-tracing_ctrl_write(struct file *filp, const char __user *ubuf,
|
|
|
- size_t cnt, loff_t *ppos)
|
|
|
-{
|
|
|
- struct trace_array *tr = filp->private_data;
|
|
|
- unsigned long val;
|
|
|
- int ret;
|
|
|
-
|
|
|
- ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
- val = !!val;
|
|
|
-
|
|
|
- mutex_lock(&trace_types_lock);
|
|
|
- if (tracer_enabled ^ val) {
|
|
|
-
|
|
|
- /* Only need to warn if this is used to change the state */
|
|
|
- WARN_ONCE(1, "tracing_enabled is deprecated. Use tracing_on");
|
|
|
-
|
|
|
- if (val) {
|
|
|
- tracer_enabled = 1;
|
|
|
- if (current_trace->start)
|
|
|
- current_trace->start(tr);
|
|
|
- tracing_start();
|
|
|
- } else {
|
|
|
- tracer_enabled = 0;
|
|
|
- tracing_stop();
|
|
|
- if (current_trace->stop)
|
|
|
- current_trace->stop(tr);
|
|
|
- }
|
|
|
- }
|
|
|
- mutex_unlock(&trace_types_lock);
|
|
|
-
|
|
|
- *ppos += cnt;
|
|
|
-
|
|
|
- return cnt;
|
|
|
-}
|
|
|
-
|
|
|
static ssize_t
|
|
|
tracing_set_trace_read(struct file *filp, char __user *ubuf,
|
|
|
size_t cnt, loff_t *ppos)
|
|
@@ -3030,6 +3039,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
|
|
|
*/
|
|
|
ring_buffer_expanded = 1;
|
|
|
|
|
|
+ /* May be called before buffers are initialized */
|
|
|
+ if (!global_trace.buffer)
|
|
|
+ return 0;
|
|
|
+
|
|
|
ret = ring_buffer_resize(global_trace.buffer, size, cpu);
|
|
|
if (ret < 0)
|
|
|
return ret;
|
|
@@ -3385,19 +3398,6 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-
|
|
|
-void default_wait_pipe(struct trace_iterator *iter)
|
|
|
-{
|
|
|
- DEFINE_WAIT(wait);
|
|
|
-
|
|
|
- prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
|
|
|
-
|
|
|
- if (trace_empty(iter))
|
|
|
- schedule();
|
|
|
-
|
|
|
- finish_wait(&trace_wait, &wait);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* This is a make-shift waitqueue.
|
|
|
* A tracer might use this callback on some rare cases:
|
|
@@ -3438,7 +3438,7 @@ static int tracing_wait_pipe(struct file *filp)
|
|
|
return -EINTR;
|
|
|
|
|
|
/*
|
|
|
- * We block until we read something and tracing is disabled.
|
|
|
+ * We block until we read something and tracing is enabled.
|
|
|
* We still block if tracing is disabled, but we have never
|
|
|
* read anything. This allows a user to cat this file, and
|
|
|
* then enable tracing. But after we have read something,
|
|
@@ -3446,7 +3446,7 @@ static int tracing_wait_pipe(struct file *filp)
|
|
|
*
|
|
|
* iter->pos will be 0 if we haven't read anything.
|
|
|
*/
|
|
|
- if (!tracer_enabled && iter->pos)
|
|
|
+ if (tracing_is_enabled() && iter->pos)
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -3955,7 +3955,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
|
|
|
} else
|
|
|
entry->buf[cnt] = '\0';
|
|
|
|
|
|
- ring_buffer_unlock_commit(buffer, event);
|
|
|
+ __buffer_unlock_commit(buffer, event);
|
|
|
|
|
|
written = cnt;
|
|
|
|
|
@@ -4016,6 +4016,14 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
|
|
|
if (max_tr.buffer)
|
|
|
ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
|
|
|
|
|
|
+ /*
|
|
|
+ * New clock may not be consistent with the previous clock.
|
|
|
+ * Reset the buffer so that it doesn't have incomparable timestamps.
|
|
|
+ */
|
|
|
+ tracing_reset_online_cpus(&global_trace);
|
|
|
+ if (max_tr.buffer)
|
|
|
+ tracing_reset_online_cpus(&max_tr);
|
|
|
+
|
|
|
mutex_unlock(&trace_types_lock);
|
|
|
|
|
|
*fpos += cnt;
|
|
@@ -4037,13 +4045,6 @@ static const struct file_operations tracing_max_lat_fops = {
|
|
|
.llseek = generic_file_llseek,
|
|
|
};
|
|
|
|
|
|
-static const struct file_operations tracing_ctrl_fops = {
|
|
|
- .open = tracing_open_generic,
|
|
|
- .read = tracing_ctrl_read,
|
|
|
- .write = tracing_ctrl_write,
|
|
|
- .llseek = generic_file_llseek,
|
|
|
-};
|
|
|
-
|
|
|
static const struct file_operations set_tracer_fops = {
|
|
|
.open = tracing_open_generic,
|
|
|
.read = tracing_set_trace_read,
|
|
@@ -4385,6 +4386,9 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
|
|
|
usec_rem = do_div(t, USEC_PER_SEC);
|
|
|
trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
|
|
|
|
|
|
+ cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
|
|
|
+ trace_seq_printf(s, "dropped events: %ld\n", cnt);
|
|
|
+
|
|
|
count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
|
|
|
|
|
|
kfree(s);
|
|
@@ -4815,9 +4819,6 @@ static __init int tracer_init_debugfs(void)
|
|
|
|
|
|
d_tracer = tracing_init_dentry();
|
|
|
|
|
|
- trace_create_file("tracing_enabled", 0644, d_tracer,
|
|
|
- &global_trace, &tracing_ctrl_fops);
|
|
|
-
|
|
|
trace_create_file("trace_options", 0644, d_tracer,
|
|
|
NULL, &tracing_iter_fops);
|
|
|
|
|
@@ -5089,6 +5090,7 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
/* Only allocate trace_printk buffers if a trace_printk exists */
|
|
|
if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
|
|
|
+ /* Must be called before global_trace.buffer is allocated */
|
|
|
trace_printk_init_buffers();
|
|
|
|
|
|
/* To save memory, keep the ring buffer size to its minimum */
|
|
@@ -5136,6 +5138,7 @@ __init static int tracer_alloc_buffers(void)
|
|
|
#endif
|
|
|
|
|
|
trace_init_cmdlines();
|
|
|
+ init_irq_work(&trace_work_wakeup, trace_wake_up);
|
|
|
|
|
|
register_tracer(&nop_trace);
|
|
|
current_trace = &nop_trace;
|
|
@@ -5147,6 +5150,13 @@ __init static int tracer_alloc_buffers(void)
|
|
|
|
|
|
register_die_notifier(&trace_die_notifier);
|
|
|
|
|
|
+ while (trace_boot_options) {
|
|
|
+ char *option;
|
|
|
+
|
|
|
+ option = strsep(&trace_boot_options, ",");
|
|
|
+ trace_set_options(option);
|
|
|
+ }
|
|
|
+
|
|
|
return 0;
|
|
|
|
|
|
out_free_cpumask:
|