|
@@ -870,12 +870,8 @@ static void perf_pmu_rotate_start(struct pmu *pmu)
|
|
|
|
|
|
WARN_ON(!irqs_disabled());
|
|
|
|
|
|
- if (list_empty(&cpuctx->rotation_list)) {
|
|
|
- int was_empty = list_empty(head);
|
|
|
+ if (list_empty(&cpuctx->rotation_list))
|
|
|
list_add(&cpuctx->rotation_list, head);
|
|
|
- if (was_empty)
|
|
|
- tick_nohz_full_kick();
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
static void get_ctx(struct perf_event_context *ctx)
|
|
@@ -1875,6 +1871,9 @@ static int __perf_install_in_context(void *info)
|
|
|
perf_pmu_enable(cpuctx->ctx.pmu);
|
|
|
perf_ctx_unlock(cpuctx, task_ctx);
|
|
|
|
|
|
+ if (atomic_read(&__get_cpu_var(perf_freq_events)))
|
|
|
+ tick_nohz_full_kick();
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -2812,10 +2811,11 @@ done:
|
|
|
#ifdef CONFIG_NO_HZ_FULL
|
|
|
bool perf_event_can_stop_tick(void)
|
|
|
{
|
|
|
- if (list_empty(&__get_cpu_var(rotation_list)))
|
|
|
- return true;
|
|
|
- else
|
|
|
+ if (atomic_read(&__get_cpu_var(perf_freq_events)) ||
|
|
|
+ __this_cpu_read(perf_throttled_count))
|
|
|
return false;
|
|
|
+ else
|
|
|
+ return true;
|
|
|
}
|
|
|
#endif
|
|
|
|
|
@@ -5202,6 +5202,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
|
|
__this_cpu_inc(perf_throttled_count);
|
|
|
hwc->interrupts = MAX_INTERRUPTS;
|
|
|
perf_log_throttle(event, 0);
|
|
|
+ tick_nohz_full_kick();
|
|
|
ret = 1;
|
|
|
}
|
|
|
}
|