|
@@ -25,7 +25,7 @@ struct intel_percore {
|
|
|
/*
|
|
|
* Intel PerfMon, used on Core and later.
|
|
|
*/
|
|
|
-static const u64 intel_perfmon_event_map[] =
|
|
|
+static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
|
|
|
{
|
|
|
[PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
|
|
|
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
|
@@ -933,6 +933,16 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
|
|
|
|
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
|
+ /*
|
|
|
+ * Some chipsets need to unmask the LVTPC in a particular spot
|
|
|
+ * inside the nmi handler. As a result, the unmasking was pushed
|
|
|
+ * into all the nmi handlers.
|
|
|
+ *
|
|
|
+ * This handler doesn't seem to have any issues with the unmasking
|
|
|
+ * so it was left at the top.
|
|
|
+ */
|
|
|
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
|
|
|
+
|
|
|
intel_pmu_disable_all();
|
|
|
handled = intel_pmu_drain_bts_buffer();
|
|
|
status = intel_pmu_get_status();
|
|
@@ -998,6 +1008,9 @@ intel_bts_constraints(struct perf_event *event)
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
unsigned int hw_event, bts_event;
|
|
|
|
|
|
+ if (event->attr.freq)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
|
|
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
|
|
|
|
@@ -1305,7 +1318,7 @@ static void intel_clovertown_quirks(void)
|
|
|
* AJ106 could possibly be worked around by not allowing LBR
|
|
|
* usage from PEBS, including the fixup.
|
|
|
* AJ68 could possibly be worked around by always programming
|
|
|
- * a pebs_event_reset[0] value and coping with the lost events.
|
|
|
+ * a pebs_event_reset[0] value and coping with the lost events.
|
|
|
*
|
|
|
* But taken together it might just make sense to not enable PEBS on
|
|
|
* these chips.
|
|
@@ -1409,6 +1422,18 @@ static __init int intel_pmu_init(void)
|
|
|
x86_pmu.percore_constraints = intel_nehalem_percore_constraints;
|
|
|
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
|
|
x86_pmu.extra_regs = intel_nehalem_extra_regs;
|
|
|
+
|
|
|
+ if (ebx & 0x40) {
|
|
|
+ /*
|
|
|
+ * Erratum AAJ80 detected, we work it around by using
|
|
|
+ * the BR_MISP_EXEC.ANY event. This will over-count
|
|
|
+ * branch-misses, but it's still much better than the
|
|
|
+ * architectural event which is often completely bogus:
|
|
|
+ */
|
|
|
+ intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
|
|
|
+
|
|
|
+ pr_cont("erratum AAJ80 worked around, ");
|
|
|
+ }
|
|
|
pr_cont("Nehalem events, ");
|
|
|
break;
|
|
|
|