|
@@ -1282,6 +1282,11 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
|
|
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
|
|
|
}
|
|
|
|
|
|
+static inline bool event_is_checkpointed(struct perf_event *event)
|
|
|
+{
|
|
|
+ return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Save and restart an expired event. Called by NMI contexts,
|
|
|
* so it has to be careful about preempting normal event ops:
|
|
@@ -1289,6 +1294,17 @@ static void intel_pmu_enable_event(struct perf_event *event)
|
|
|
int intel_pmu_save_and_restart(struct perf_event *event)
|
|
|
{
|
|
|
x86_perf_event_update(event);
|
|
|
+ /*
|
|
|
+ * For a checkpointed counter always reset back to 0. This
|
|
|
+ * avoids a situation where the counter overflows, aborts the
|
|
|
+ * transaction and is then set back to shortly before the
|
|
|
+ * overflow, and overflows and aborts again.
|
|
|
+ */
|
|
|
+ if (unlikely(event_is_checkpointed(event))) {
|
|
|
+ /* No race with NMIs because the counter should not be armed */
|
|
|
+ wrmsrl(event->hw.event_base, 0);
|
|
|
+ local64_set(&event->hw.prev_count, 0);
|
|
|
+ }
|
|
|
return x86_perf_event_set_period(event);
|
|
|
}
|
|
|
|
|
@@ -1372,6 +1388,13 @@ again:
|
|
|
x86_pmu.drain_pebs(regs);
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * To avoid spurious interrupts with perf stat always reset checkpointed
|
|
|
+ * counters.
|
|
|
+ */
|
|
|
+ if (cpuc->events[2] && event_is_checkpointed(cpuc->events[2]))
|
|
|
+ status |= (1ULL << 2);
|
|
|
+
|
|
|
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
|
|
|
struct perf_event *event = cpuc->events[bit];
|
|
|
|
|
@@ -1837,6 +1860,20 @@ static int hsw_hw_config(struct perf_event *event)
|
|
|
event->attr.precise_ip > 0))
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
+ if (event_is_checkpointed(event)) {
|
|
|
+ /*
|
|
|
+ * Sampling of checkpointed events can cause situations where
|
|
|
+ * the CPU constantly aborts because of a overflow, which is
|
|
|
+ * then checkpointed back and ignored. Forbid checkpointing
|
|
|
+ * for sampling.
|
|
|
+ *
|
|
|
+ * But still allow a long sampling period, so that perf stat
|
|
|
+ * from KVM works.
|
|
|
+ */
|
|
|
+ if (event->attr.sample_period > 0 &&
|
|
|
+ event->attr.sample_period < 0x7fffffff)
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+ }
|
|
|
return 0;
|
|
|
}
|
|
|
|