|
@@ -1524,12 +1524,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
|
|
*/
|
|
|
if (interrupts == MAX_INTERRUPTS) {
|
|
|
perf_log_throttle(event, 1);
|
|
|
+ perf_disable();
|
|
|
event->pmu->unthrottle(event);
|
|
|
+ perf_enable();
|
|
|
}
|
|
|
|
|
|
if (!event->attr.freq || !event->attr.sample_freq)
|
|
|
continue;
|
|
|
|
|
|
+ perf_disable();
|
|
|
event->pmu->read(event);
|
|
|
now = atomic64_read(&event->count);
|
|
|
delta = now - hwc->freq_count_stamp;
|
|
@@ -1537,6 +1540,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
|
|
|
|
|
if (delta > 0)
|
|
|
perf_adjust_period(event, TICK_NSEC, delta);
|
|
|
+ perf_enable();
|
|
|
}
|
|
|
raw_spin_unlock(&ctx->lock);
|
|
|
}
|
|
@@ -1546,9 +1550,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
|
|
*/
|
|
|
static void rotate_ctx(struct perf_event_context *ctx)
|
|
|
{
|
|
|
- if (!ctx->nr_events)
|
|
|
- return;
|
|
|
-
|
|
|
raw_spin_lock(&ctx->lock);
|
|
|
|
|
|
/* Rotate the first entry last of non-pinned groups */
|
|
@@ -1561,19 +1562,28 @@ void perf_event_task_tick(struct task_struct *curr)
|
|
|
{
|
|
|
struct perf_cpu_context *cpuctx;
|
|
|
struct perf_event_context *ctx;
|
|
|
+ int rotate = 0;
|
|
|
|
|
|
if (!atomic_read(&nr_events))
|
|
|
return;
|
|
|
|
|
|
cpuctx = &__get_cpu_var(perf_cpu_context);
|
|
|
- ctx = curr->perf_event_ctxp;
|
|
|
+ if (cpuctx->ctx.nr_events &&
|
|
|
+ cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
|
|
|
+ rotate = 1;
|
|
|
|
|
|
- perf_disable();
|
|
|
+ ctx = curr->perf_event_ctxp;
|
|
|
+ if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
|
|
|
+ rotate = 1;
|
|
|
|
|
|
perf_ctx_adjust_freq(&cpuctx->ctx);
|
|
|
if (ctx)
|
|
|
perf_ctx_adjust_freq(ctx);
|
|
|
|
|
|
+ if (!rotate)
|
|
|
+ return;
|
|
|
+
|
|
|
+ perf_disable();
|
|
|
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
|
|
|
if (ctx)
|
|
|
task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
|
|
@@ -1585,7 +1595,6 @@ void perf_event_task_tick(struct task_struct *curr)
|
|
|
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
|
|
|
if (ctx)
|
|
|
task_ctx_sched_in(curr, EVENT_FLEXIBLE);
|
|
|
-
|
|
|
perf_enable();
|
|
|
}
|
|
|
|