|
@@ -4,6 +4,7 @@
|
|
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
|
|
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
|
|
*/
|
|
*/
|
|
#include <linux/ring_buffer.h>
|
|
#include <linux/ring_buffer.h>
|
|
|
|
+#include <linux/ftrace_irq.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/uaccess.h>
|
|
@@ -18,6 +19,35 @@
|
|
|
|
|
|
#include "trace.h"
|
|
#include "trace.h"
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Since the write to the buffer is still not fully lockless,
|
|
|
|
+ * we must be careful with NMIs. The locks in the writers
|
|
|
|
+ * are taken when a write crosses to a new page. The locks
|
|
|
|
+ * protect against races with the readers (this will soon
|
|
|
|
+ * be fixed with a lockless solution).
|
|
|
|
+ *
|
|
|
|
+ * Because we can not protect against NMIs, and we want to
|
|
|
|
+ * keep traces reentrant, we need to manage what happens
|
|
|
|
+ * when we are in an NMI.
|
|
|
|
+ */
|
|
|
|
+static DEFINE_PER_CPU(int, rb_in_nmi);
|
|
|
|
+
|
|
|
|
+void ftrace_nmi_enter(void)
|
|
|
|
+{
|
|
|
|
+ __get_cpu_var(rb_in_nmi)++;
|
|
|
|
+ /* call arch specific handler too */
|
|
|
|
+ arch_ftrace_nmi_enter();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void ftrace_nmi_exit(void)
|
|
|
|
+{
|
|
|
|
+ arch_ftrace_nmi_exit();
|
|
|
|
+ __get_cpu_var(rb_in_nmi)--;
|
|
|
|
+ /* NMIs are not recursive */
|
|
|
|
+ WARN_ON_ONCE(__get_cpu_var(rb_in_nmi));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* A fast way to enable or disable all ring buffers is to
|
|
* A fast way to enable or disable all ring buffers is to
|
|
* call tracing_on or tracing_off. Turning off the ring buffers
|
|
* call tracing_on or tracing_off. Turning off the ring buffers
|
|
@@ -982,6 +1012,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
|
struct ring_buffer *buffer = cpu_buffer->buffer;
|
|
struct ring_buffer *buffer = cpu_buffer->buffer;
|
|
struct ring_buffer_event *event;
|
|
struct ring_buffer_event *event;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
+ bool lock_taken = false;
|
|
|
|
|
|
commit_page = cpu_buffer->commit_page;
|
|
commit_page = cpu_buffer->commit_page;
|
|
/* we just need to protect against interrupts */
|
|
/* we just need to protect against interrupts */
|
|
@@ -995,7 +1026,19 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
|
struct buffer_page *next_page = tail_page;
|
|
struct buffer_page *next_page = tail_page;
|
|
|
|
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
- __raw_spin_lock(&cpu_buffer->lock);
|
|
|
|
|
|
+ /*
|
|
|
|
+ * NMIs can happen after we take the lock.
|
|
|
|
+ * If we are in an NMI, only take the lock
|
|
|
|
+ * if it is not already taken. Otherwise
|
|
|
|
+ * simply fail.
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(__get_cpu_var(rb_in_nmi))) {
|
|
|
|
+ if (!__raw_spin_trylock(&cpu_buffer->lock))
|
|
|
|
+ goto out_unlock;
|
|
|
|
+ } else
|
|
|
|
+ __raw_spin_lock(&cpu_buffer->lock);
|
|
|
|
+
|
|
|
|
+ lock_taken = true;
|
|
|
|
|
|
rb_inc_page(cpu_buffer, &next_page);
|
|
rb_inc_page(cpu_buffer, &next_page);
|
|
|
|
|
|
@@ -1097,7 +1140,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
|
if (tail <= BUF_PAGE_SIZE)
|
|
if (tail <= BUF_PAGE_SIZE)
|
|
local_set(&tail_page->write, tail);
|
|
local_set(&tail_page->write, tail);
|
|
|
|
|
|
- __raw_spin_unlock(&cpu_buffer->lock);
|
|
|
|
|
|
+ if (likely(lock_taken))
|
|
|
|
+ __raw_spin_unlock(&cpu_buffer->lock);
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|