|
@@ -9,6 +9,7 @@
|
|
#include <linux/kernel.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mmiotrace.h>
|
|
#include <linux/mmiotrace.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/pci.h>
|
|
|
|
+#include <asm/atomic.h>
|
|
|
|
|
|
#include "trace.h"
|
|
#include "trace.h"
|
|
#include "trace_output.h"
|
|
#include "trace_output.h"
|
|
@@ -20,6 +21,7 @@ struct header_iter {
|
|
static struct trace_array *mmio_trace_array;
|
|
static struct trace_array *mmio_trace_array;
|
|
static bool overrun_detected;
|
|
static bool overrun_detected;
|
|
static unsigned long prev_overruns;
|
|
static unsigned long prev_overruns;
|
|
|
|
+static atomic_t dropped_count;
|
|
|
|
|
|
static void mmio_reset_data(struct trace_array *tr)
|
|
static void mmio_reset_data(struct trace_array *tr)
|
|
{
|
|
{
|
|
@@ -122,11 +124,11 @@ static void mmio_close(struct trace_iterator *iter)
|
|
|
|
|
|
static unsigned long count_overruns(struct trace_iterator *iter)
|
|
static unsigned long count_overruns(struct trace_iterator *iter)
|
|
{
|
|
{
|
|
- unsigned long cnt = 0;
|
|
|
|
|
|
+ unsigned long cnt = atomic_xchg(&dropped_count, 0);
|
|
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
|
|
unsigned long over = ring_buffer_overruns(iter->tr->buffer);
|
|
|
|
|
|
if (over > prev_overruns)
|
|
if (over > prev_overruns)
|
|
- cnt = over - prev_overruns;
|
|
|
|
|
|
+ cnt += over - prev_overruns;
|
|
prev_overruns = over;
|
|
prev_overruns = over;
|
|
return cnt;
|
|
return cnt;
|
|
}
|
|
}
|
|
@@ -308,8 +310,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
|
|
|
|
|
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
|
&irq_flags);
|
|
&irq_flags);
|
|
- if (!event)
|
|
|
|
|
|
+ if (!event) {
|
|
|
|
+ atomic_inc(&dropped_count);
|
|
return;
|
|
return;
|
|
|
|
+ }
|
|
entry = ring_buffer_event_data(event);
|
|
entry = ring_buffer_event_data(event);
|
|
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
|
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
|
entry->ent.type = TRACE_MMIO_RW;
|
|
entry->ent.type = TRACE_MMIO_RW;
|
|
@@ -336,8 +340,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
|
|
|
|
|
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
|
event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
|
|
&irq_flags);
|
|
&irq_flags);
|
|
- if (!event)
|
|
|
|
|
|
+ if (!event) {
|
|
|
|
+ atomic_inc(&dropped_count);
|
|
return;
|
|
return;
|
|
|
|
+ }
|
|
entry = ring_buffer_event_data(event);
|
|
entry = ring_buffer_event_data(event);
|
|
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
|
tracing_generic_entry_update(&entry->ent, 0, preempt_count());
|
|
entry->ent.type = TRACE_MMIO_MAP;
|
|
entry->ent.type = TRACE_MMIO_MAP;
|