|
@@ -1185,15 +1185,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
|
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
|
/*
|
|
|
- * Some chipsets need to unmask the LVTPC in a particular spot
|
|
|
- * inside the nmi handler. As a result, the unmasking was pushed
|
|
|
- * into all the nmi handlers.
|
|
|
- *
|
|
|
- * This handler doesn't seem to have any issues with the unmasking
|
|
|
- * so it was left at the top.
|
|
|
+ * No known reason to not always do late ACK,
|
|
|
+ * but just in case do it opt-in.
|
|
|
*/
|
|
|
- apic_write(APIC_LVTPC, APIC_DM_NMI);
|
|
|
-
|
|
|
+ if (!x86_pmu.late_ack)
|
|
|
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
|
|
|
intel_pmu_disable_all();
|
|
|
handled = intel_pmu_drain_bts_buffer();
|
|
|
status = intel_pmu_get_status();
|
|
@@ -1257,6 +1253,13 @@ again:
|
|
|
|
|
|
done:
|
|
|
intel_pmu_enable_all(0);
|
|
|
+ /*
|
|
|
+ * Only unmask the NMI after the overflow counters
|
|
|
+ * have been reset. This avoids spurious NMIs on
|
|
|
+ * Haswell CPUs.
|
|
|
+ */
|
|
|
+ if (x86_pmu.late_ack)
|
|
|
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
|
|
|
return handled;
|
|
|
}
|
|
|
|
|
@@ -2260,6 +2263,7 @@ __init int intel_pmu_init(void)
|
|
|
case 70:
|
|
|
case 71:
|
|
|
case 63:
|
|
|
+ x86_pmu.late_ack = true;
|
|
|
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
|
|
|
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
|
|
|
|