|
@@ -1336,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
intel_put_shared_regs_event_constraints(cpuc, event);
|
|
|
}
|
|
|
|
|
|
-static int intel_pmu_hw_config(struct perf_event *event)
|
|
|
+static void intel_pebs_aliases_core2(struct perf_event *event)
|
|
|
{
|
|
|
- int ret = x86_pmu_hw_config(event);
|
|
|
-
|
|
|
- if (ret)
|
|
|
- return ret;
|
|
|
-
|
|
|
- if (event->attr.precise_ip &&
|
|
|
- (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
|
|
|
+ if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
|
|
|
/*
|
|
|
* Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
|
|
|
* (0x003c) so that we can use it with PEBS.
|
|
@@ -1365,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|
|
*/
|
|
|
u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
|
|
|
|
|
|
+ alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
|
|
|
+ event->hw.config = alt_config;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void intel_pebs_aliases_snb(struct perf_event *event)
|
|
|
+{
|
|
|
+ if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
|
|
|
+ /*
|
|
|
+ * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
|
|
|
+ * (0x003c) so that we can use it with PEBS.
|
|
|
+ *
|
|
|
+ * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
|
|
|
+ * PEBS capable. However we can use UOPS_RETIRED.ALL
|
|
|
+ * (0x01c2), which is a PEBS capable event, to get the same
|
|
|
+ * count.
|
|
|
+ *
|
|
|
+ * UOPS_RETIRED.ALL counts the number of cycles that retires
|
|
|
+ * CNTMASK micro-ops. By setting CNTMASK to a value (16)
|
|
|
+ * larger than the maximum number of micro-ops that can be
|
|
|
+ * retired per cycle (4) and then inverting the condition, we
|
|
|
+ * count all cycles that retire 16 or less micro-ops, which
|
|
|
+ * is every cycle.
|
|
|
+ *
|
|
|
+ * Thereby we gain a PEBS capable cycle counter.
|
|
|
+ */
|
|
|
+ u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
|
|
|
|
|
|
alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
|
|
|
event->hw.config = alt_config;
|
|
|
}
|
|
|
+}
|
|
|
+
|
|
|
+static int intel_pmu_hw_config(struct perf_event *event)
|
|
|
+{
|
|
|
+ int ret = x86_pmu_hw_config(event);
|
|
|
+
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (event->attr.precise_ip && x86_pmu.pebs_aliases)
|
|
|
+ x86_pmu.pebs_aliases(event);
|
|
|
|
|
|
if (intel_pmu_needs_lbr_smpl(event)) {
|
|
|
ret = intel_pmu_setup_lbr_filter(event);
|
|
@@ -1643,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|
|
.max_period = (1ULL << 31) - 1,
|
|
|
.get_event_constraints = intel_get_event_constraints,
|
|
|
.put_event_constraints = intel_put_event_constraints,
|
|
|
+ .pebs_aliases = intel_pebs_aliases_core2,
|
|
|
|
|
|
.format_attrs = intel_arch3_formats_attr,
|
|
|
|
|
@@ -1885,6 +1918,7 @@ __init int intel_pmu_init(void)
|
|
|
|
|
|
x86_pmu.event_constraints = intel_snb_event_constraints;
|
|
|
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
|
|
|
+ x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
|
|
|
x86_pmu.extra_regs = intel_snb_extra_regs;
|
|
|
/* all extra regs are per-cpu when HT is on */
|
|
|
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|