|
@@ -1689,6 +1689,40 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
|
|
|
perf_pmu_enable(pmu);
|
|
|
return 0;
|
|
|
}
|
|
|
+/*
|
|
|
+ * a fake_cpuc is used to validate event groups. Due to
|
|
|
+ * the extra reg logic, we need to also allocate a fake
|
|
|
+ * per_core and per_cpu structure. Otherwise, group events
|
|
|
+ * using extra reg may conflict without the kernel being
|
|
|
+ * able to catch this when the last event gets added to
|
|
|
+ * the group.
|
|
|
+ */
|
|
|
+static void free_fake_cpuc(struct cpu_hw_events *cpuc)
|
|
|
+{
|
|
|
+ kfree(cpuc->shared_regs);
|
|
|
+ kfree(cpuc);
|
|
|
+}
|
|
|
+
|
|
|
+static struct cpu_hw_events *allocate_fake_cpuc(void)
|
|
|
+{
|
|
|
+ struct cpu_hw_events *cpuc;
|
|
|
+ int cpu = raw_smp_processor_id();
|
|
|
+
|
|
|
+ cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
|
|
|
+ if (!cpuc)
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+
|
|
|
+ /* only needed, if we have extra_regs */
|
|
|
+ if (x86_pmu.extra_regs) {
|
|
|
+ cpuc->shared_regs = allocate_shared_regs(cpu);
|
|
|
+ if (!cpuc->shared_regs)
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
+ return cpuc;
|
|
|
+error:
|
|
|
+ free_fake_cpuc(cpuc);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+}
|
|
|
|
|
|
/*
|
|
|
* validate that we can schedule this event
|
|
@@ -1699,9 +1733,9 @@ static int validate_event(struct perf_event *event)
|
|
|
struct event_constraint *c;
|
|
|
int ret = 0;
|
|
|
|
|
|
- fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
|
|
|
- if (!fake_cpuc)
|
|
|
- return -ENOMEM;
|
|
|
+ fake_cpuc = allocate_fake_cpuc();
|
|
|
+ if (IS_ERR(fake_cpuc))
|
|
|
+ return PTR_ERR(fake_cpuc);
|
|
|
|
|
|
c = x86_pmu.get_event_constraints(fake_cpuc, event);
|
|
|
|
|
@@ -1711,7 +1745,7 @@ static int validate_event(struct perf_event *event)
|
|
|
if (x86_pmu.put_event_constraints)
|
|
|
x86_pmu.put_event_constraints(fake_cpuc, event);
|
|
|
|
|
|
- kfree(fake_cpuc);
|
|
|
+ free_fake_cpuc(fake_cpuc);
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -1731,35 +1765,32 @@ static int validate_group(struct perf_event *event)
|
|
|
{
|
|
|
struct perf_event *leader = event->group_leader;
|
|
|
struct cpu_hw_events *fake_cpuc;
|
|
|
- int ret, n;
|
|
|
+ int ret = -ENOSPC, n;
|
|
|
|
|
|
- ret = -ENOMEM;
|
|
|
- fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
|
|
|
- if (!fake_cpuc)
|
|
|
- goto out;
|
|
|
+ fake_cpuc = allocate_fake_cpuc();
|
|
|
+ if (IS_ERR(fake_cpuc))
|
|
|
+ return PTR_ERR(fake_cpuc);
|
|
|
/*
|
|
|
* the event is not yet connected with its
|
|
|
* siblings therefore we must first collect
|
|
|
* existing siblings, then add the new event
|
|
|
* before we can simulate the scheduling
|
|
|
*/
|
|
|
- ret = -ENOSPC;
|
|
|
n = collect_events(fake_cpuc, leader, true);
|
|
|
if (n < 0)
|
|
|
- goto out_free;
|
|
|
+ goto out;
|
|
|
|
|
|
fake_cpuc->n_events = n;
|
|
|
n = collect_events(fake_cpuc, event, false);
|
|
|
if (n < 0)
|
|
|
- goto out_free;
|
|
|
+ goto out;
|
|
|
|
|
|
fake_cpuc->n_events = n;
|
|
|
|
|
|
ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
|
|
|
|
|
|
-out_free:
|
|
|
- kfree(fake_cpuc);
|
|
|
out:
|
|
|
+ free_fake_cpuc(fake_cpuc);
|
|
|
return ret;
|
|
|
}
|
|
|
|