|
@@ -82,6 +82,11 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static inline int siar_valid(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
#endif /* CONFIG_PPC32 */
|
|
|
|
|
|
/*
|
|
@@ -106,14 +111,20 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
|
|
|
* If we're not doing instruction sampling, give them the SDAR
|
|
|
* (sampled data address). If we are doing instruction sampling, then
|
|
|
* only give them the SDAR if it corresponds to the instruction
|
|
|
- * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
|
|
|
- * bit in MMCRA.
|
|
|
+ * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
|
|
|
+ * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
|
|
|
*/
|
|
|
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
|
|
|
{
|
|
|
unsigned long mmcra = regs->dsisr;
|
|
|
- unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
|
|
|
- POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
|
|
|
+ unsigned long sdsync;
|
|
|
+
|
|
|
+ if (ppmu->flags & PPMU_SIAR_VALID)
|
|
|
+ sdsync = POWER7P_MMCRA_SDAR_VALID;
|
|
|
+ else if (ppmu->flags & PPMU_ALT_SIPR)
|
|
|
+ sdsync = POWER6_MMCRA_SDSYNC;
|
|
|
+ else
|
|
|
+ sdsync = MMCRA_SDSYNC;
|
|
|
|
|
|
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
|
|
|
*addrp = mfspr(SPRN_SDAR);
|
|
@@ -230,6 +241,24 @@ static inline int perf_intr_is_nmi(struct pt_regs *regs)
|
|
|
return !regs->softe;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * On processors like P7+ that have the SIAR-Valid bit, marked instructions
|
|
|
+ * must be sampled only if the SIAR-valid bit is set.
|
|
|
+ *
|
|
|
+ * For unmarked instructions and for processors that don't have the SIAR-Valid
|
|
|
+ * bit, assume that SIAR is valid.
|
|
|
+ */
|
|
|
+static inline int siar_valid(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ unsigned long mmcra = regs->dsisr;
|
|
|
+ int marked = mmcra & MMCRA_SAMPLE_ENABLE;
|
|
|
+
|
|
|
+ if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
|
|
|
+ return mmcra & POWER7P_MMCRA_SIAR_VALID;
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
|
|
static void perf_event_interrupt(struct pt_regs *regs);
|
|
@@ -1291,6 +1320,7 @@ struct pmu power_pmu = {
|
|
|
.event_idx = power_pmu_event_idx,
|
|
|
};
|
|
|
|
|
|
+
|
|
|
/*
|
|
|
* A counter has overflowed; update its count and record
|
|
|
* things if requested. Note that interrupts are hard-disabled
|
|
@@ -1324,7 +1354,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
|
|
left += period;
|
|
|
if (left <= 0)
|
|
|
left = period;
|
|
|
- record = 1;
|
|
|
+ record = siar_valid(regs);
|
|
|
event->hw.last_period = event->hw.sample_period;
|
|
|
}
|
|
|
if (left < 0x80000000LL)
|
|
@@ -1374,8 +1404,10 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
|
|
{
|
|
|
unsigned long use_siar = regs->result;
|
|
|
|
|
|
- if (use_siar)
|
|
|
+ if (use_siar && siar_valid(regs))
|
|
|
return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
|
|
|
+ else if (use_siar)
|
|
|
+ return 0; // no valid instruction pointer
|
|
|
else
|
|
|
return regs->nip;
|
|
|
}
|