|
@@ -102,6 +102,7 @@ struct cpu_hw_events {
|
|
|
*/
|
|
|
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
|
|
|
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
+ unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
|
|
int enabled;
|
|
|
|
|
|
int n_events;
|
|
@@ -1010,6 +1011,7 @@ static int x86_pmu_start(struct perf_event *event)
|
|
|
x86_perf_event_set_period(event);
|
|
|
cpuc->events[idx] = event;
|
|
|
__set_bit(idx, cpuc->active_mask);
|
|
|
+ __set_bit(idx, cpuc->running);
|
|
|
x86_pmu.enable(event);
|
|
|
perf_event_update_userpage(event);
|
|
|
|
|
@@ -1141,8 +1143,16 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
|
|
|
cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
|
|
|
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
|
|
- if (!test_bit(idx, cpuc->active_mask))
|
|
|
+ if (!test_bit(idx, cpuc->active_mask)) {
|
|
|
+ /*
|
|
|
+ * Though we deactivated the counter some cpus
|
|
|
+ * might still deliver spurious interrupts still
|
|
|
+ * in flight. Catch them:
|
|
|
+ */
|
|
|
+ if (__test_and_clear_bit(idx, cpuc->running))
|
|
|
+ handled++;
|
|
|
continue;
|
|
|
+ }
|
|
|
|
|
|
event = cpuc->events[idx];
|
|
|
hwc = &event->hw;
|