|
@@ -620,12 +620,6 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
|
|
|
kfree(cpu_buffer);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Causes compile errors if the struct buffer_page gets bigger
|
|
|
- * than the struct page.
|
|
|
- */
|
|
|
-extern int ring_buffer_page_too_big(void);
|
|
|
-
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
static int rb_cpu_notify(struct notifier_block *self,
|
|
|
unsigned long action, void *hcpu);
|
|
@@ -648,11 +642,6 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
|
|
|
int bsize;
|
|
|
int cpu;
|
|
|
|
|
|
- /* Paranoid! Optimizes out when all is well */
|
|
|
- if (sizeof(struct buffer_page) > sizeof(struct page))
|
|
|
- ring_buffer_page_too_big();
|
|
|
-
|
|
|
-
|
|
|
/* keep it in its own cache line */
|
|
|
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
|
|
|
GFP_KERNEL);
|
|
@@ -668,8 +657,8 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
|
|
|
buffer->reader_lock_key = key;
|
|
|
|
|
|
/* need at least two pages */
|
|
|
- if (buffer->pages == 1)
|
|
|
- buffer->pages++;
|
|
|
+ if (buffer->pages < 2)
|
|
|
+ buffer->pages = 2;
|
|
|
|
|
|
/*
|
|
|
* In case of non-hotplug cpu, if the ring-buffer is allocated
|
|
@@ -1013,7 +1002,7 @@ rb_event_index(struct ring_buffer_event *event)
|
|
|
{
|
|
|
unsigned long addr = (unsigned long)event;
|
|
|
|
|
|
- return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
|
|
|
+ return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
|
|
|
}
|
|
|
|
|
|
static inline int
|
|
@@ -1334,9 +1323,6 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
|
|
|
|
|
/* We reserved something on the buffer */
|
|
|
|
|
|
- if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
|
|
|
- return NULL;
|
|
|
-
|
|
|
event = __rb_page_index(tail_page, tail);
|
|
|
rb_update_event(event, type, length);
|
|
|
|
|
@@ -2480,6 +2466,21 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
|
|
|
|
|
|
+static inline int rb_ok_to_lock(void)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * If an NMI die dumps out the content of the ring buffer
|
|
|
+ * do not grab locks. We also permanently disable the ring
|
|
|
+ * buffer too. A one time deal is all you get from reading
|
|
|
+ * the ring buffer from an NMI.
|
|
|
+ */
|
|
|
+ if (likely(!in_nmi() && !oops_in_progress))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ tracing_off_permanent();
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* ring_buffer_peek - peek at the next event to be read
|
|
|
* @buffer: The ring buffer to read
|
|
@@ -2495,14 +2496,20 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
|
|
|
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
|
|
|
struct ring_buffer_event *event;
|
|
|
unsigned long flags;
|
|
|
+ int dolock;
|
|
|
|
|
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
|
return NULL;
|
|
|
|
|
|
+ dolock = rb_ok_to_lock();
|
|
|
again:
|
|
|
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
|
+ local_irq_save(flags);
|
|
|
+ if (dolock)
|
|
|
+ spin_lock(&cpu_buffer->reader_lock);
|
|
|
event = rb_buffer_peek(buffer, cpu, ts);
|
|
|
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
|
+ if (dolock)
|
|
|
+ spin_unlock(&cpu_buffer->reader_lock);
|
|
|
+ local_irq_restore(flags);
|
|
|
|
|
|
if (event && event->type_len == RINGBUF_TYPE_PADDING) {
|
|
|
cpu_relax();
|
|
@@ -2554,6 +2561,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
struct ring_buffer_event *event = NULL;
|
|
|
unsigned long flags;
|
|
|
+ int dolock;
|
|
|
+
|
|
|
+ dolock = rb_ok_to_lock();
|
|
|
|
|
|
again:
|
|
|
/* might be called in atomic */
|
|
@@ -2563,7 +2573,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
|
|
goto out;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
|
- spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
|
+ local_irq_save(flags);
|
|
|
+ if (dolock)
|
|
|
+ spin_lock(&cpu_buffer->reader_lock);
|
|
|
|
|
|
event = rb_buffer_peek(buffer, cpu, ts);
|
|
|
if (!event)
|
|
@@ -2572,7 +2584,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
|
|
|
rb_advance_reader(cpu_buffer);
|
|
|
|
|
|
out_unlock:
|
|
|
- spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
|
+ if (dolock)
|
|
|
+ spin_unlock(&cpu_buffer->reader_lock);
|
|
|
+ local_irq_restore(flags);
|
|
|
|
|
|
out:
|
|
|
preempt_enable();
|
|
@@ -2770,12 +2784,25 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
|
|
|
int ring_buffer_empty(struct ring_buffer *buffer)
|
|
|
{
|
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
+ unsigned long flags;
|
|
|
+ int dolock;
|
|
|
int cpu;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ dolock = rb_ok_to_lock();
|
|
|
|
|
|
/* yes this is racy, but if you don't like the race, lock the buffer */
|
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
|
- if (!rb_per_cpu_empty(cpu_buffer))
|
|
|
+ local_irq_save(flags);
|
|
|
+ if (dolock)
|
|
|
+ spin_lock(&cpu_buffer->reader_lock);
|
|
|
+ ret = rb_per_cpu_empty(cpu_buffer);
|
|
|
+ if (dolock)
|
|
|
+ spin_unlock(&cpu_buffer->reader_lock);
|
|
|
+ local_irq_restore(flags);
|
|
|
+
|
|
|
+ if (!ret)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2791,14 +2818,23 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
|
|
|
int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
|
|
|
{
|
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
+ unsigned long flags;
|
|
|
+ int dolock;
|
|
|
int ret;
|
|
|
|
|
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
|
return 1;
|
|
|
|
|
|
+ dolock = rb_ok_to_lock();
|
|
|
+
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
|
+ local_irq_save(flags);
|
|
|
+ if (dolock)
|
|
|
+ spin_lock(&cpu_buffer->reader_lock);
|
|
|
ret = rb_per_cpu_empty(cpu_buffer);
|
|
|
-
|
|
|
+ if (dolock)
|
|
|
+ spin_unlock(&cpu_buffer->reader_lock);
|
|
|
+ local_irq_restore(flags);
|
|
|
|
|
|
return ret;
|
|
|
}
|