|
@@ -18,12 +18,24 @@
|
|
|
static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
|
|
|
unsigned long offset, unsigned long head)
|
|
|
{
|
|
|
- unsigned long mask;
|
|
|
+ unsigned long sz = perf_data_size(rb);
|
|
|
+ unsigned long mask = sz - 1;
|
|
|
|
|
|
- if (!rb->writable)
|
|
|
+ /*
|
|
|
+ * check if user-writable
|
|
|
+ * overwrite : over-write its own tail
|
|
|
+ * !overwrite: buffer possibly drops events.
|
|
|
+ */
|
|
|
+ if (rb->overwrite)
|
|
|
return true;
|
|
|
|
|
|
- mask = perf_data_size(rb) - 1;
|
|
|
+ /*
|
|
|
+ * verify that payload is not bigger than buffer
|
|
|
+ * otherwise masking logic may fail to detect
|
|
|
+ * the "not enough space" condition
|
|
|
+ */
|
|
|
+ if ((head - offset) > sz)
|
|
|
+ return false;
|
|
|
|
|
|
offset = (offset - tail) & mask;
|
|
|
head = (head - tail) & mask;
|
|
@@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
|
|
|
rb->watermark = max_size / 2;
|
|
|
|
|
|
if (flags & RING_BUFFER_WRITABLE)
|
|
|
- rb->writable = 1;
|
|
|
+ rb->overwrite = 0;
|
|
|
+ else
|
|
|
+ rb->overwrite = 1;
|
|
|
|
|
|
atomic_set(&rb->refcount, 1);
|
|
|
|