|
@@ -188,19 +188,12 @@ static inline int amd_has_nb(struct cpu_hw_events *cpuc)
|
|
|
return nb && nb->nb_id != -1;
|
|
|
}
|
|
|
|
|
|
-static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
- struct perf_event *event)
|
|
|
+static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
+ struct perf_event *event)
|
|
|
{
|
|
|
- struct hw_perf_event *hwc = &event->hw;
|
|
|
struct amd_nb *nb = cpuc->amd_nb;
|
|
|
int i;
|
|
|
|
|
|
- /*
|
|
|
- * only care about NB events
|
|
|
- */
|
|
|
- if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
|
|
|
- return;
|
|
|
-
|
|
|
/*
|
|
|
* need to scan whole list because event may not have
|
|
|
* been assigned during scheduling
|
|
@@ -247,24 +240,19 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
*
|
|
|
* Given that resources are allocated (cmpxchg), they must be
|
|
|
* eventually freed for others to use. This is accomplished by
|
|
|
- * calling amd_put_event_constraints().
|
|
|
+ * calling __amd_put_nb_event_constraints()
|
|
|
*
|
|
|
* Non NB events are not impacted by this restriction.
|
|
|
*/
|
|
|
static struct event_constraint *
|
|
|
-amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|
|
+__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
|
|
|
+ struct event_constraint *c)
|
|
|
{
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
struct amd_nb *nb = cpuc->amd_nb;
|
|
|
struct perf_event *old;
|
|
|
int idx, new = -1;
|
|
|
|
|
|
- /*
|
|
|
- * if not NB event or no NB, then no constraints
|
|
|
- */
|
|
|
- if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
|
|
|
- return &unconstrained;
|
|
|
-
|
|
|
/*
|
|
|
* detect if already present, if so reuse
|
|
|
*
|
|
@@ -275,7 +263,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|
|
* because of successive calls to x86_schedule_events() from
|
|
|
* hw_perf_group_sched_in() without hw_perf_enable()
|
|
|
*/
|
|
|
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
|
|
+ for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
|
|
|
if (new == -1 || hwc->idx == idx)
|
|
|
/* assign free slot, prefer hwc->idx */
|
|
|
old = cmpxchg(nb->owners + idx, NULL, event);
|
|
@@ -391,6 +379,25 @@ static void amd_pmu_cpu_dead(int cpu)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static struct event_constraint *
|
|
|
+amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * if not NB event or no NB, then no constraints
|
|
|
+ */
|
|
|
+ if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
|
|
|
+ return &unconstrained;
|
|
|
+
|
|
|
+ return __amd_get_nb_event_constraints(cpuc, event, &unconstrained);
|
|
|
+}
|
|
|
+
|
|
|
+static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
|
|
|
+ struct perf_event *event)
|
|
|
+{
|
|
|
+ if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
|
|
|
+ __amd_put_nb_event_constraints(cpuc, event);
|
|
|
+}
|
|
|
+
|
|
|
PMU_FORMAT_ATTR(event, "config:0-7,32-35");
|
|
|
PMU_FORMAT_ATTR(umask, "config:8-15" );
|
|
|
PMU_FORMAT_ATTR(edge, "config:18" );
|