Bläddra i källkod

perf/x86: Don't assume there can be only 4 PEBS events

On Sandy Bridge in non HT mode there are 8 counters available.
Since every counter can write a PEBS record assuming there are
4 max is incorrect. Use the reported counter number -- with an
upper limit for a static array -- instead.

Also I made the warning messages a bit more informational.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1338944211-28275-2-git-send-email-andi@firstfloor.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Andi Kleen 13 år sedan
förälder
incheckning
70ab7003de

+ 2 - 1
arch/x86/kernel/cpu/perf_event.h

@@ -69,7 +69,7 @@ struct amd_nb {
 };
 };
 
 
 /* The maximal number of PEBS events: */
 /* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS		4
+#define MAX_PEBS_EVENTS		8
 
 
 /*
 /*
  * A debug store configuration.
  * A debug store configuration.
@@ -378,6 +378,7 @@ struct x86_pmu {
 	void		(*drain_pebs)(struct pt_regs *regs);
 	void		(*drain_pebs)(struct pt_regs *regs);
 	struct event_constraint *pebs_constraints;
 	struct event_constraint *pebs_constraints;
 	void		(*pebs_aliases)(struct perf_event *event);
 	void		(*pebs_aliases)(struct perf_event *event);
+	int 		max_pebs_events;
 
 
 	/*
 	/*
 	 * Intel LBR
 	 * Intel LBR

+ 2 - 0
arch/x86/kernel/cpu/perf_event_intel.c

@@ -1800,6 +1800,8 @@ __init int intel_pmu_init(void)
 	x86_pmu.events_maskl		= ebx.full;
 	x86_pmu.events_maskl		= ebx.full;
 	x86_pmu.events_mask_len		= eax.split.mask_length;
 	x86_pmu.events_mask_len		= eax.split.mask_length;
 
 
+	x86_pmu.max_pebs_events		= min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
+
 	/*
 	/*
 	 * Quirk: v2 perfmon does not report fixed-purpose events, so
 	 * Quirk: v2 perfmon does not report fixed-purpose events, so
 	 * assume at least 3 events:
 	 * assume at least 3 events:

+ 4 - 4
arch/x86/kernel/cpu/perf_event_intel_ds.c

@@ -620,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
 	 * Should not happen, we program the threshold at 1 and do not
 	 * Should not happen, we program the threshold at 1 and do not
 	 * set a reset value.
 	 * set a reset value.
 	 */
 	 */
-	WARN_ON_ONCE(n > 1);
+	WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
 	at += n - 1;
 	at += n - 1;
 
 
 	__intel_pmu_pebs_event(event, iregs, at);
 	__intel_pmu_pebs_event(event, iregs, at);
@@ -651,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
 	 * Should not happen, we program the threshold at 1 and do not
 	 * Should not happen, we program the threshold at 1 and do not
 	 * set a reset value.
 	 * set a reset value.
 	 */
 	 */
-	WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
+	WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
 
 
 	for ( ; at < top; at++) {
 	for ( ; at < top; at++) {
-		for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
+		for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
 			event = cpuc->events[bit];
 			event = cpuc->events[bit];
 			if (!test_bit(bit, cpuc->active_mask))
 			if (!test_bit(bit, cpuc->active_mask))
 				continue;
 				continue;
@@ -670,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
 			break;
 			break;
 		}
 		}
 
 
-		if (!event || bit >= MAX_PEBS_EVENTS)
+		if (!event || bit >= x86_pmu.max_pebs_events)
 			continue;
 			continue;
 
 
 		__intel_pmu_pebs_event(event, iregs, at);
 		__intel_pmu_pebs_event(event, iregs, at);