|
@@ -83,14 +83,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
|
|
|
void __weak hw_perf_disable(void) { barrier(); }
|
|
|
void __weak hw_perf_enable(void) { barrier(); }
|
|
|
|
|
|
-int __weak
|
|
|
-hw_perf_group_sched_in(struct perf_event *group_leader,
|
|
|
- struct perf_cpu_context *cpuctx,
|
|
|
- struct perf_event_context *ctx)
|
|
|
-{
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
void __weak perf_event_print_debug(void) { }
|
|
|
|
|
|
static DEFINE_PER_CPU(int, perf_disable_count);
|
|
@@ -644,15 +636,20 @@ group_sched_in(struct perf_event *group_event,
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
struct perf_event_context *ctx)
|
|
|
{
|
|
|
- struct perf_event *event, *partial_group;
|
|
|
+ struct perf_event *event, *partial_group = NULL;
|
|
|
+ const struct pmu *pmu = group_event->pmu;
|
|
|
+ bool txn = false;
|
|
|
int ret;
|
|
|
|
|
|
if (group_event->state == PERF_EVENT_STATE_OFF)
|
|
|
return 0;
|
|
|
|
|
|
- ret = hw_perf_group_sched_in(group_event, cpuctx, ctx);
|
|
|
- if (ret)
|
|
|
- return ret < 0 ? ret : 0;
|
|
|
+ /* Check if group transaction availabe */
|
|
|
+ if (pmu->start_txn)
|
|
|
+ txn = true;
|
|
|
+
|
|
|
+ if (txn)
|
|
|
+ pmu->start_txn(pmu);
|
|
|
|
|
|
if (event_sched_in(group_event, cpuctx, ctx))
|
|
|
return -EAGAIN;
|
|
@@ -667,9 +664,19 @@ group_sched_in(struct perf_event *group_event,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- return 0;
|
|
|
+ if (txn) {
|
|
|
+ ret = pmu->commit_txn(pmu);
|
|
|
+ if (!ret) {
|
|
|
+ pmu->cancel_txn(pmu);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
group_error:
|
|
|
+ if (txn)
|
|
|
+ pmu->cancel_txn(pmu);
|
|
|
+
|
|
|
/*
|
|
|
* Groups can be scheduled in as one unit only, so undo any
|
|
|
* partial group before returning:
|