|
@@ -488,12 +488,14 @@ struct ring_buffer_per_cpu {
|
|
|
struct buffer_page *reader_page;
|
|
|
unsigned long lost_events;
|
|
|
unsigned long last_overrun;
|
|
|
+ local_t entries_bytes;
|
|
|
local_t commit_overrun;
|
|
|
local_t overrun;
|
|
|
local_t entries;
|
|
|
local_t committing;
|
|
|
local_t commits;
|
|
|
unsigned long read;
|
|
|
+ unsigned long read_bytes;
|
|
|
u64 write_stamp;
|
|
|
u64 read_stamp;
|
|
|
};
|
|
@@ -1708,6 +1710,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
|
|
|
* the counters.
|
|
|
*/
|
|
|
local_add(entries, &cpu_buffer->overrun);
|
|
|
+ local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
|
|
|
|
|
|
/*
|
|
|
* The entries will be zeroed out when we move the
|
|
@@ -1863,6 +1866,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
|
|
event = __rb_page_index(tail_page, tail);
|
|
|
kmemcheck_annotate_bitfield(event, bitfield);
|
|
|
|
|
|
+ /* account for padding bytes */
|
|
|
+ local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
|
|
|
+
|
|
|
/*
|
|
|
* Save the original length to the meta data.
|
|
|
* This will be used by the reader to add lost event
|
|
@@ -2054,6 +2060,9 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
|
|
|
if (!tail)
|
|
|
tail_page->page->time_stamp = ts;
|
|
|
|
|
|
+ /* account for these added bytes */
|
|
|
+ local_add(length, &cpu_buffer->entries_bytes);
|
|
|
+
|
|
|
return event;
|
|
|
}
|
|
|
|
|
@@ -2076,6 +2085,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
|
|
|
if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
|
|
|
unsigned long write_mask =
|
|
|
local_read(&bpage->write) & ~RB_WRITE_MASK;
|
|
|
+ unsigned long event_length = rb_event_length(event);
|
|
|
/*
|
|
|
* This is on the tail page. It is possible that
|
|
|
* a write could come in and move the tail page
|
|
@@ -2085,8 +2095,11 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
|
|
|
old_index += write_mask;
|
|
|
new_index += write_mask;
|
|
|
index = local_cmpxchg(&bpage->write, old_index, new_index);
|
|
|
- if (index == old_index)
|
|
|
+ if (index == old_index) {
|
|
|
+ /* update counters */
|
|
|
+ local_sub(event_length, &cpu_buffer->entries_bytes);
|
|
|
return 1;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* could not discard */
|
|
@@ -2660,6 +2673,58 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
|
|
|
(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
|
|
|
+ * @buffer: The ring buffer
|
|
|
+ * @cpu: The per CPU buffer to read from.
|
|
|
+ */
|
|
|
+unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
+ struct buffer_page *bpage;
|
|
|
+ unsigned long ret;
|
|
|
+
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ cpu_buffer = buffer->buffers[cpu];
|
|
|
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
|
|
+ /*
|
|
|
+ * if the tail is on reader_page, oldest time stamp is on the reader
|
|
|
+ * page
|
|
|
+ */
|
|
|
+ if (cpu_buffer->tail_page == cpu_buffer->reader_page)
|
|
|
+ bpage = cpu_buffer->reader_page;
|
|
|
+ else
|
|
|
+ bpage = rb_set_head_page(cpu_buffer);
|
|
|
+ ret = bpage->page->time_stamp;
|
|
|
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
|
|
|
+
|
|
|
+/**
|
|
|
+ * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
|
|
|
+ * @buffer: The ring buffer
|
|
|
+ * @cpu: The per CPU buffer to read from.
|
|
|
+ */
|
|
|
+unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
|
|
|
+{
|
|
|
+ struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
+ unsigned long ret;
|
|
|
+
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ cpu_buffer = buffer->buffers[cpu];
|
|
|
+ ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
|
|
|
+
|
|
|
/**
|
|
|
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
|
|
|
* @buffer: The ring buffer
|
|
@@ -3527,11 +3592,13 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
|
|
|
cpu_buffer->reader_page->read = 0;
|
|
|
|
|
|
local_set(&cpu_buffer->commit_overrun, 0);
|
|
|
+ local_set(&cpu_buffer->entries_bytes, 0);
|
|
|
local_set(&cpu_buffer->overrun, 0);
|
|
|
local_set(&cpu_buffer->entries, 0);
|
|
|
local_set(&cpu_buffer->committing, 0);
|
|
|
local_set(&cpu_buffer->commits, 0);
|
|
|
cpu_buffer->read = 0;
|
|
|
+ cpu_buffer->read_bytes = 0;
|
|
|
|
|
|
cpu_buffer->write_stamp = 0;
|
|
|
cpu_buffer->read_stamp = 0;
|
|
@@ -3918,6 +3985,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
|
|
|
} else {
|
|
|
/* update the entry counter */
|
|
|
cpu_buffer->read += rb_page_entries(reader);
|
|
|
+ cpu_buffer->read_bytes += BUF_PAGE_SIZE;
|
|
|
|
|
|
/* swap the pages */
|
|
|
rb_init_page(bpage);
|