|
@@ -691,6 +691,8 @@ group_sched_in(struct perf_event *group_event,
|
|
{
|
|
{
|
|
struct perf_event *event, *partial_group = NULL;
|
|
struct perf_event *event, *partial_group = NULL;
|
|
struct pmu *pmu = group_event->pmu;
|
|
struct pmu *pmu = group_event->pmu;
|
|
|
|
+ u64 now = ctx->time;
|
|
|
|
+ bool simulate = false;
|
|
|
|
|
|
if (group_event->state == PERF_EVENT_STATE_OFF)
|
|
if (group_event->state == PERF_EVENT_STATE_OFF)
|
|
return 0;
|
|
return 0;
|
|
@@ -719,11 +721,27 @@ group_error:
|
|
/*
|
|
/*
|
|
* Groups can be scheduled in as one unit only, so undo any
|
|
* Groups can be scheduled in as one unit only, so undo any
|
|
* partial group before returning:
|
|
* partial group before returning:
|
|
|
|
+ * The events up to the failed event are scheduled out normally,
|
|
|
|
+ * tstamp_stopped will be updated.
|
|
|
|
+ *
|
|
|
|
+ * The failed events and the remaining siblings need to have
|
|
|
|
+ * their timings updated as if they had gone thru event_sched_in()
|
|
|
|
+ * and event_sched_out(). This is required to get consistent timings
|
|
|
|
+ * across the group. This also takes care of the case where the group
|
|
|
|
+ * could never be scheduled by ensuring tstamp_stopped is set to mark
|
|
|
|
+ * the time the event was actually stopped, such that time delta
|
|
|
|
+ * calculation in update_event_times() is correct.
|
|
*/
|
|
*/
|
|
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
|
|
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
|
|
if (event == partial_group)
|
|
if (event == partial_group)
|
|
- break;
|
|
|
|
- event_sched_out(event, cpuctx, ctx);
|
|
|
|
|
|
+ simulate = true;
|
|
|
|
+
|
|
|
|
+ if (simulate) {
|
|
|
|
+ event->tstamp_running += now - event->tstamp_stopped;
|
|
|
|
+ event->tstamp_stopped = now;
|
|
|
|
+ } else {
|
|
|
|
+ event_sched_out(event, cpuctx, ctx);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
event_sched_out(group_event, cpuctx, ctx);
|
|
event_sched_out(group_event, cpuctx, ctx);
|
|
|
|
|