|
@@ -1763,8 +1763,9 @@ static void ctx_sched_out(struct perf_event_context *ctx,
|
|
|
enum event_type_t event_type)
|
|
|
{
|
|
|
struct perf_event *event;
|
|
|
+ int is_active = ctx->is_active;
|
|
|
|
|
|
- ctx->is_active = 0;
|
|
|
+ ctx->is_active &= ~event_type;
|
|
|
if (likely(!ctx->nr_events))
|
|
|
return;
|
|
|
|
|
@@ -1774,12 +1775,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
|
|
|
return;
|
|
|
|
|
|
perf_pmu_disable(ctx->pmu);
|
|
|
- if (event_type & EVENT_PINNED) {
|
|
|
+ if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
|
|
|
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
|
|
|
group_sched_out(event, cpuctx, ctx);
|
|
|
}
|
|
|
|
|
|
- if (event_type & EVENT_FLEXIBLE) {
|
|
|
+ if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
|
|
|
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
|
|
|
group_sched_out(event, cpuctx, ctx);
|
|
|
}
|
|
@@ -2058,8 +2059,9 @@ ctx_sched_in(struct perf_event_context *ctx,
|
|
|
struct task_struct *task)
|
|
|
{
|
|
|
u64 now;
|
|
|
+ int is_active = ctx->is_active;
|
|
|
|
|
|
- ctx->is_active = 1;
|
|
|
+ ctx->is_active |= event_type;
|
|
|
if (likely(!ctx->nr_events))
|
|
|
return;
|
|
|
|
|
@@ -2070,11 +2072,11 @@ ctx_sched_in(struct perf_event_context *ctx,
|
|
|
* First go through the list and put on any pinned groups
|
|
|
* in order to give them the best chance of going on.
|
|
|
*/
|
|
|
- if (event_type & EVENT_PINNED)
|
|
|
+ if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
|
|
|
ctx_pinned_sched_in(ctx, cpuctx);
|
|
|
|
|
|
/* Then walk through the lower prio flexible groups */
|
|
|
- if (event_type & EVENT_FLEXIBLE)
|
|
|
+ if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
|
|
|
ctx_flexible_sched_in(ctx, cpuctx);
|
|
|
}
|
|
|
|