|
@@ -2606,6 +2606,19 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
|
|
EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * The total entries in the ring buffer is the running counter
|
|
|
|
+ * of entries entered into the ring buffer, minus the sum of
|
|
|
|
+ * the entries read from the ring buffer and the number of
|
|
|
|
+ * entries that were overwritten.
|
|
|
|
+ */
|
|
|
|
+static inline unsigned long
|
|
|
|
+rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
|
|
|
|
+{
|
|
|
|
+ return local_read(&cpu_buffer->entries) -
|
|
|
|
+ (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
|
|
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
|
|
* @buffer: The ring buffer
|
|
* @buffer: The ring buffer
|
|
@@ -2614,16 +2627,13 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
|
|
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
|
|
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
|
|
{
|
|
{
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
struct ring_buffer_per_cpu *cpu_buffer;
|
|
- unsigned long ret;
|
|
|
|
|
|
|
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
if (!cpumask_test_cpu(cpu, buffer->cpumask))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
- ret = (local_read(&cpu_buffer->entries) - local_read(&cpu_buffer->overrun))
|
|
|
|
- - cpu_buffer->read;
|
|
|
|
|
|
|
|
- return ret;
|
|
|
|
|
|
+ return rb_num_of_entries(cpu_buffer);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
|
|
EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
|
|
|
|
|
|
@@ -2684,8 +2694,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
|
|
/* if you care about this being correct, lock the buffer */
|
|
/* if you care about this being correct, lock the buffer */
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
for_each_buffer_cpu(buffer, cpu) {
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
cpu_buffer = buffer->buffers[cpu];
|
|
- entries += (local_read(&cpu_buffer->entries) -
|
|
|
|
- local_read(&cpu_buffer->overrun)) - cpu_buffer->read;
|
|
|
|
|
|
+ entries += rb_num_of_entries(cpu_buffer);
|
|
}
|
|
}
|
|
|
|
|
|
return entries;
|
|
return entries;
|