|
@@ -460,9 +460,10 @@ struct ring_buffer_per_cpu {
|
|
|
unsigned long lost_events;
|
|
|
unsigned long last_overrun;
|
|
|
local_t entries_bytes;
|
|
|
- local_t commit_overrun;
|
|
|
- local_t overrun;
|
|
|
local_t entries;
|
|
|
+ local_t overrun;
|
|
|
+ local_t commit_overrun;
|
|
|
+ local_t dropped_events;
|
|
|
local_t committing;
|
|
|
local_t commits;
|
|
|
unsigned long read;
|
|
@@ -2155,8 +2156,10 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
|
|
|
* If we are not in overwrite mode,
|
|
|
* this is easy, just stop here.
|
|
|
*/
|
|
|
- if (!(buffer->flags & RB_FL_OVERWRITE))
|
|
|
+ if (!(buffer->flags & RB_FL_OVERWRITE)) {
|
|
|
+ local_inc(&cpu_buffer->dropped_events);
|
|
|
goto out_reset;
|
|
|
+ }
|
|
|
|
|
|
ret = rb_handle_head_page(cpu_buffer,
|
|
|
tail_page,
|
|
@@ -2995,7 +2998,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
|
|
|
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
|
|
|
|
|
|
/**
|
|
|
- * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
|
|
|
+ * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
|
|
|
+ * buffer wrapping around (only if RB_FL_OVERWRITE is on).
|
|
|
* @buffer: The ring buffer
|
|
|
* @cpu: The per CPU buffer to get the number of overruns from
|
|
|
*/
|
|
@@ -3015,7 +3019,9 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
|
|
|
EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
|
|
|
|
|
|
/**
|
|
|
- * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
|
|
|
+ * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
|
|
|
+ * commits failing due to the buffer wrapping around while there are uncommitted
|
|
|
+ * events, such as during an interrupt storm.
|
|
|
* @buffer: The ring buffer
|
|
|
* @cpu: The per CPU buffer to get the number of overruns from
|
|
|
*/
|
|
@@ -3035,6 +3041,28 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
|
|
|
|
|
|
+/**
|
|
|
+ * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
|
|
|
+ * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
|
|
|
+ * @buffer: The ring buffer
|
|
|
+ * @cpu: The per CPU buffer to get the number of overruns from
|
|
|
+ */
|
|
|
+unsigned long
|
|
|
+ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
|
|
|
+{
|
|
|
+ struct ring_buffer_per_cpu *cpu_buffer;
|
|
|
+ unsigned long ret;
|
|
|
+
|
|
|
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ cpu_buffer = buffer->buffers[cpu];
|
|
|
+ ret = local_read(&cpu_buffer->dropped_events);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
|
|
|
+
|
|
|
/**
|
|
|
* ring_buffer_entries - get the number of entries in a buffer
|
|
|
* @buffer: The ring buffer
|
|
@@ -3864,9 +3892,10 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
|
|
|
local_set(&cpu_buffer->reader_page->page->commit, 0);
|
|
|
cpu_buffer->reader_page->read = 0;
|
|
|
|
|
|
- local_set(&cpu_buffer->commit_overrun, 0);
|
|
|
local_set(&cpu_buffer->entries_bytes, 0);
|
|
|
local_set(&cpu_buffer->overrun, 0);
|
|
|
+ local_set(&cpu_buffer->commit_overrun, 0);
|
|
|
+ local_set(&cpu_buffer->dropped_events, 0);
|
|
|
local_set(&cpu_buffer->entries, 0);
|
|
|
local_set(&cpu_buffer->committing, 0);
|
|
|
local_set(&cpu_buffer->commits, 0);
|