|
@@ -77,6 +77,9 @@ static unsigned int freeze_events_kernel = MMCR0_FCS;
|
|
|
#define MMCR0_PMCjCE MMCR0_PMCnCE
|
|
|
#define MMCR0_FC56 0
|
|
|
#define MMCR0_PMAO 0
|
|
|
+#define MMCR0_EBE 0
|
|
|
+#define MMCR0_PMCC 0
|
|
|
+#define MMCR0_PMCC_U6 0
|
|
|
|
|
|
#define SPRN_MMCRA SPRN_MMCR2
|
|
|
#define MMCRA_SAMPLE_ENABLE 0
|
|
@@ -104,6 +107,15 @@ static inline int siar_valid(struct pt_regs *regs)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+static bool is_ebb_event(struct perf_event *event) { return false; }
|
|
|
+static int ebb_event_check(struct perf_event *event) { return 0; }
|
|
|
+static void ebb_event_add(struct perf_event *event) { }
|
|
|
+static void ebb_switch_out(unsigned long mmcr0) { }
|
|
|
+static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
|
|
|
+{
|
|
|
+ return mmcr0;
|
|
|
+}
|
|
|
+
|
|
|
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
|
|
|
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
|
|
|
void power_pmu_flush_branch_stack(void) {}
|
|
@@ -464,6 +476,89 @@ void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
+static bool is_ebb_event(struct perf_event *event)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * This could be a per-PMU callback, but we'd rather avoid the cost. We
|
|
|
+ * check that the PMU supports EBB, meaning those that don't can still
|
|
|
+ * use bit 63 of the event code for something else if they wish.
|
|
|
+ */
|
|
|
+ return (ppmu->flags & PPMU_EBB) &&
|
|
|
+ ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1);
|
|
|
+}
|
|
|
+
|
|
|
+static int ebb_event_check(struct perf_event *event)
|
|
|
+{
|
|
|
+ struct perf_event *leader = event->group_leader;
|
|
|
+
|
|
|
+ /* Event and group leader must agree on EBB */
|
|
|
+ if (is_ebb_event(leader) != is_ebb_event(event))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (is_ebb_event(event)) {
|
|
|
+ if (!(event->attach_state & PERF_ATTACH_TASK))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (!leader->attr.pinned || !leader->attr.exclusive)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (event->attr.inherit || event->attr.sample_period ||
|
|
|
+ event->attr.enable_on_exec || event->attr.freq)
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void ebb_event_add(struct perf_event *event)
|
|
|
+{
|
|
|
+ if (!is_ebb_event(event) || current->thread.used_ebb)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * IFF this is the first time we've added an EBB event, set
|
|
|
+ * PMXE in the user MMCR0 so we can detect when it's cleared by
|
|
|
+ * userspace. We need this so that we can context switch while
|
|
|
+ * userspace is in the EBB handler (where PMXE is 0).
|
|
|
+ */
|
|
|
+ current->thread.used_ebb = 1;
|
|
|
+ current->thread.mmcr0 |= MMCR0_PMXE;
|
|
|
+}
|
|
|
+
|
|
|
+static void ebb_switch_out(unsigned long mmcr0)
|
|
|
+{
|
|
|
+ if (!(mmcr0 & MMCR0_EBE))
|
|
|
+ return;
|
|
|
+
|
|
|
+ current->thread.siar = mfspr(SPRN_SIAR);
|
|
|
+ current->thread.sier = mfspr(SPRN_SIER);
|
|
|
+ current->thread.sdar = mfspr(SPRN_SDAR);
|
|
|
+ current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK;
|
|
|
+ current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
|
|
|
+{
|
|
|
+ if (!ebb)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ /* Enable EBB and read/write to all 6 PMCs for userspace */
|
|
|
+ mmcr0 |= MMCR0_EBE | MMCR0_PMCC_U6;
|
|
|
+
|
|
|
+ /* Add any bits from the user reg, FC or PMAO */
|
|
|
+ mmcr0 |= current->thread.mmcr0;
|
|
|
+
|
|
|
+ /* Be careful not to set PMXE if userspace had it cleared */
|
|
|
+ if (!(current->thread.mmcr0 & MMCR0_PMXE))
|
|
|
+ mmcr0 &= ~MMCR0_PMXE;
|
|
|
+
|
|
|
+ mtspr(SPRN_SIAR, current->thread.siar);
|
|
|
+ mtspr(SPRN_SIER, current->thread.sier);
|
|
|
+ mtspr(SPRN_SDAR, current->thread.sdar);
|
|
|
+ mtspr(SPRN_MMCR2, current->thread.mmcr2);
|
|
|
+out:
|
|
|
+ return mmcr0;
|
|
|
+}
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
|
|
static void perf_event_interrupt(struct pt_regs *regs);
|
|
@@ -734,6 +829,13 @@ static void power_pmu_read(struct perf_event *event)
|
|
|
|
|
|
if (!event->hw.idx)
|
|
|
return;
|
|
|
+
|
|
|
+ if (is_ebb_event(event)) {
|
|
|
+ val = read_pmc(event->hw.idx);
|
|
|
+ local64_set(&event->hw.prev_count, val);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Performance monitor interrupts come even when interrupts
|
|
|
* are soft-disabled, as long as interrupts are hard-enabled.
|
|
@@ -854,7 +956,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
|
|
|
static void power_pmu_disable(struct pmu *pmu)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuhw;
|
|
|
- unsigned long flags, val;
|
|
|
+ unsigned long flags, mmcr0, val;
|
|
|
|
|
|
if (!ppmu)
|
|
|
return;
|
|
@@ -871,11 +973,11 @@ static void power_pmu_disable(struct pmu *pmu)
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Set the 'freeze counters' bit, clear PMAO/FC56.
|
|
|
+ * Set the 'freeze counters' bit, clear EBE/PMCC/PMAO/FC56.
|
|
|
*/
|
|
|
- val = mfspr(SPRN_MMCR0);
|
|
|
+ val = mmcr0 = mfspr(SPRN_MMCR0);
|
|
|
val |= MMCR0_FC;
|
|
|
- val &= ~(MMCR0_PMAO | MMCR0_FC56);
|
|
|
+ val &= ~(MMCR0_EBE | MMCR0_PMCC | MMCR0_PMAO | MMCR0_FC56);
|
|
|
|
|
|
/*
|
|
|
* The barrier is to make sure the mtspr has been
|
|
@@ -896,7 +998,10 @@ static void power_pmu_disable(struct pmu *pmu)
|
|
|
|
|
|
cpuhw->disabled = 1;
|
|
|
cpuhw->n_added = 0;
|
|
|
+
|
|
|
+ ebb_switch_out(mmcr0);
|
|
|
}
|
|
|
+
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
@@ -911,15 +1016,15 @@ static void power_pmu_enable(struct pmu *pmu)
|
|
|
struct cpu_hw_events *cpuhw;
|
|
|
unsigned long flags;
|
|
|
long i;
|
|
|
- unsigned long val;
|
|
|
+ unsigned long val, mmcr0;
|
|
|
s64 left;
|
|
|
unsigned int hwc_index[MAX_HWEVENTS];
|
|
|
int n_lim;
|
|
|
int idx;
|
|
|
+ bool ebb;
|
|
|
|
|
|
if (!ppmu)
|
|
|
return;
|
|
|
-
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
cpuhw = &__get_cpu_var(cpu_hw_events);
|
|
@@ -933,6 +1038,13 @@ static void power_pmu_enable(struct pmu *pmu)
|
|
|
|
|
|
cpuhw->disabled = 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * EBB requires an exclusive group and all events must have the EBB
|
|
|
+ * flag set, or not set, so we can just check a single event. Also we
|
|
|
+ * know we have at least one event.
|
|
|
+ */
|
|
|
+ ebb = is_ebb_event(cpuhw->event[0]);
|
|
|
+
|
|
|
/*
|
|
|
* If we didn't change anything, or only removed events,
|
|
|
* no need to recalculate MMCR* settings and reset the PMCs.
|
|
@@ -1008,25 +1120,34 @@ static void power_pmu_enable(struct pmu *pmu)
|
|
|
++n_lim;
|
|
|
continue;
|
|
|
}
|
|
|
- val = 0;
|
|
|
- if (event->hw.sample_period) {
|
|
|
- left = local64_read(&event->hw.period_left);
|
|
|
- if (left < 0x80000000L)
|
|
|
- val = 0x80000000L - left;
|
|
|
+
|
|
|
+ if (ebb)
|
|
|
+ val = local64_read(&event->hw.prev_count);
|
|
|
+ else {
|
|
|
+ val = 0;
|
|
|
+ if (event->hw.sample_period) {
|
|
|
+ left = local64_read(&event->hw.period_left);
|
|
|
+ if (left < 0x80000000L)
|
|
|
+ val = 0x80000000L - left;
|
|
|
+ }
|
|
|
+ local64_set(&event->hw.prev_count, val);
|
|
|
}
|
|
|
- local64_set(&event->hw.prev_count, val);
|
|
|
+
|
|
|
event->hw.idx = idx;
|
|
|
if (event->hw.state & PERF_HES_STOPPED)
|
|
|
val = 0;
|
|
|
write_pmc(idx, val);
|
|
|
+
|
|
|
perf_event_update_userpage(event);
|
|
|
}
|
|
|
cpuhw->n_limited = n_lim;
|
|
|
cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
|
|
|
|
|
|
out_enable:
|
|
|
+ mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
|
|
|
+
|
|
|
mb();
|
|
|
- write_mmcr0(cpuhw, cpuhw->mmcr[0]);
|
|
|
+ write_mmcr0(cpuhw, mmcr0);
|
|
|
|
|
|
/*
|
|
|
* Enable instruction sampling if necessary
|
|
@@ -1124,6 +1245,8 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
|
|
|
event->hw.config = cpuhw->events[n0];
|
|
|
|
|
|
nocheck:
|
|
|
+ ebb_event_add(event);
|
|
|
+
|
|
|
++cpuhw->n_events;
|
|
|
++cpuhw->n_added;
|
|
|
|
|
@@ -1484,6 +1607,11 @@ static int power_pmu_event_init(struct perf_event *event)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /* Extra checks for EBB */
|
|
|
+ err = ebb_event_check(event);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
/*
|
|
|
* If this is in a group, check if it can go on with all the
|
|
|
* other hardware events in the group. We assume the event
|
|
@@ -1522,6 +1650,13 @@ static int power_pmu_event_init(struct perf_event *event)
|
|
|
event->hw.last_period = event->hw.sample_period;
|
|
|
local64_set(&event->hw.period_left, event->hw.last_period);
|
|
|
|
|
|
+ /*
|
|
|
+ * For EBB events we just context switch the PMC value, we don't do any
|
|
|
+ * of the sample_period logic. We use hw.prev_count for this.
|
|
|
+ */
|
|
|
+ if (is_ebb_event(event))
|
|
|
+ local64_set(&event->hw.prev_count, 0);
|
|
|
+
|
|
|
/*
|
|
|
* See if we need to reserve the PMU.
|
|
|
* If no events are currently in use, then we have to take a
|