|
@@ -398,6 +398,25 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static u64 check_and_compute_delta(u64 prev, u64 val)
|
|
|
+{
|
|
|
+ u64 delta = (val - prev) & 0xfffffffful;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * POWER7 can roll back counter values, if the new value is smaller
|
|
|
+ * than the previous value it will cause the delta and the counter to
|
|
|
+ * have bogus values unless we rolled a counter over. If a coutner is
|
|
|
+ * rolled back, it will be smaller, but within 256, which is the maximum
|
|
|
+ * number of events to rollback at once. If we dectect a rollback
|
|
|
+ * return 0. This can lead to a small lack of precision in the
|
|
|
+ * counters.
|
|
|
+ */
|
|
|
+ if (prev > val && (prev - val) < 256)
|
|
|
+ delta = 0;
|
|
|
+
|
|
|
+ return delta;
|
|
|
+}
|
|
|
+
|
|
|
static void power_pmu_read(struct perf_event *event)
|
|
|
{
|
|
|
s64 val, delta, prev;
|
|
@@ -416,10 +435,11 @@ static void power_pmu_read(struct perf_event *event)
|
|
|
prev = local64_read(&event->hw.prev_count);
|
|
|
barrier();
|
|
|
val = read_pmc(event->hw.idx);
|
|
|
+ delta = check_and_compute_delta(prev, val);
|
|
|
+ if (!delta)
|
|
|
+ return;
|
|
|
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
|
|
|
|
|
|
- /* The counters are only 32 bits wide */
|
|
|
- delta = (val - prev) & 0xfffffffful;
|
|
|
local64_add(delta, &event->count);
|
|
|
local64_sub(delta, &event->hw.period_left);
|
|
|
}
|
|
@@ -449,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
|
|
|
val = (event->hw.idx == 5) ? pmc5 : pmc6;
|
|
|
prev = local64_read(&event->hw.prev_count);
|
|
|
event->hw.idx = 0;
|
|
|
- delta = (val - prev) & 0xfffffffful;
|
|
|
- local64_add(delta, &event->count);
|
|
|
+ delta = check_and_compute_delta(prev, val);
|
|
|
+ if (delta)
|
|
|
+ local64_add(delta, &event->count);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -458,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
|
|
|
unsigned long pmc5, unsigned long pmc6)
|
|
|
{
|
|
|
struct perf_event *event;
|
|
|
- u64 val;
|
|
|
+ u64 val, prev;
|
|
|
int i;
|
|
|
|
|
|
for (i = 0; i < cpuhw->n_limited; ++i) {
|
|
|
event = cpuhw->limited_counter[i];
|
|
|
event->hw.idx = cpuhw->limited_hwidx[i];
|
|
|
val = (event->hw.idx == 5) ? pmc5 : pmc6;
|
|
|
- local64_set(&event->hw.prev_count, val);
|
|
|
+ prev = local64_read(&event->hw.prev_count);
|
|
|
+ if (check_and_compute_delta(prev, val))
|
|
|
+ local64_set(&event->hw.prev_count, val);
|
|
|
perf_event_update_userpage(event);
|
|
|
}
|
|
|
}
|
|
@@ -1197,7 +1220,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
|
|
|
|
|
/* we don't have to worry about interrupts here */
|
|
|
prev = local64_read(&event->hw.prev_count);
|
|
|
- delta = (val - prev) & 0xfffffffful;
|
|
|
+ delta = check_and_compute_delta(prev, val);
|
|
|
local64_add(delta, &event->count);
|
|
|
|
|
|
/*
|