|
@@ -100,8 +100,6 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
|
|
|
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
|
|
/* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
|
|
|
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
|
|
|
- INTEL_EVENT_CONSTRAINT(0xb7, 0x1), /* OFF_CORE_RESPONSE_0 */
|
|
|
- INTEL_EVENT_CONSTRAINT(0xbb, 0x8), /* OFF_CORE_RESPONSE_1 */
|
|
|
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
|
|
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
|
|
EVENT_CONSTRAINT_END
|
|
@@ -122,6 +120,12 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
|
|
|
EVENT_CONSTRAINT_END
|
|
|
};
|
|
|
|
|
|
+static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
|
|
|
+ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
|
|
|
+ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
|
|
|
+ EVENT_EXTRA_END
|
|
|
+};
|
|
|
+
|
|
|
static u64 intel_pmu_event_map(int hw_event)
|
|
|
{
|
|
|
return intel_perfmon_event_map[hw_event];
|
|
@@ -1260,7 +1264,7 @@ static void intel_pmu_cpu_starting(int cpu)
|
|
|
*/
|
|
|
intel_pmu_lbr_reset();
|
|
|
|
|
|
- if (!cpuc->shared_regs)
|
|
|
+ if (!cpuc->shared_regs || x86_pmu.regs_no_ht_sharing)
|
|
|
return;
|
|
|
|
|
|
for_each_cpu(i, topology_thread_cpumask(cpu)) {
|
|
@@ -1502,6 +1506,9 @@ static __init int intel_pmu_init(void)
|
|
|
|
|
|
x86_pmu.event_constraints = intel_snb_event_constraints;
|
|
|
x86_pmu.pebs_constraints = intel_snb_pebs_events;
|
|
|
+ x86_pmu.extra_regs = intel_snb_extra_regs;
|
|
|
+ /* all extra regs are per-cpu when HT is on */
|
|
|
+ x86_pmu.regs_no_ht_sharing = true;
|
|
|
|
|
|
/* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
|
|
|
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x180010e;
|