|
@@ -2303,7 +2303,7 @@ do { \
|
|
|
static DEFINE_PER_CPU(int, perf_throttled_count);
|
|
|
static DEFINE_PER_CPU(u64, perf_throttled_seq);
|
|
|
|
|
|
-static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
|
|
|
+static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
|
|
|
{
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
s64 period, sample_period;
|
|
@@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
|
|
|
hwc->sample_period = sample_period;
|
|
|
|
|
|
if (local64_read(&hwc->period_left) > 8*sample_period) {
|
|
|
- event->pmu->stop(event, PERF_EF_UPDATE);
|
|
|
+ if (disable)
|
|
|
+ event->pmu->stop(event, PERF_EF_UPDATE);
|
|
|
+
|
|
|
local64_set(&hwc->period_left, 0);
|
|
|
- event->pmu->start(event, PERF_EF_RELOAD);
|
|
|
+
|
|
|
+ if (disable)
|
|
|
+ event->pmu->start(event, PERF_EF_RELOAD);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
|
|
|
return;
|
|
|
|
|
|
raw_spin_lock(&ctx->lock);
|
|
|
+ perf_pmu_disable(ctx->pmu);
|
|
|
|
|
|
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
|
|
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
|
@@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
|
|
|
/*
|
|
|
* restart the event
|
|
|
* reload only if value has changed
|
|
|
+ * we have stopped the event so tell that
|
|
|
+ * to perf_adjust_period() to avoid stopping it
|
|
|
+ * twice.
|
|
|
*/
|
|
|
if (delta > 0)
|
|
|
- perf_adjust_period(event, period, delta);
|
|
|
+ perf_adjust_period(event, period, delta, false);
|
|
|
|
|
|
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
|
|
|
}
|
|
|
|
|
|
+ perf_pmu_enable(ctx->pmu);
|
|
|
raw_spin_unlock(&ctx->lock);
|
|
|
}
|
|
|
|
|
@@ -4562,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
|
|
hwc->freq_time_stamp = now;
|
|
|
|
|
|
if (delta > 0 && delta < 2*TICK_NSEC)
|
|
|
- perf_adjust_period(event, delta, hwc->last_period);
|
|
|
+ perf_adjust_period(event, delta, hwc->last_period, true);
|
|
|
}
|
|
|
|
|
|
/*
|