|
@@ -175,8 +175,8 @@ int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
|
|
|
static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
|
|
|
static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
|
|
|
|
|
|
-static atomic_t perf_sample_allowed_ns __read_mostly =
|
|
|
- ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100);
|
|
|
+static int perf_sample_allowed_ns __read_mostly =
|
|
|
+ DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
|
|
|
|
|
|
void update_perf_cpu_limits(void)
|
|
|
{
|
|
@@ -184,7 +184,7 @@ void update_perf_cpu_limits(void)
|
|
|
|
|
|
tmp *= sysctl_perf_cpu_time_max_percent;
|
|
|
do_div(tmp, 100);
|
|
|
- atomic_set(&perf_sample_allowed_ns, tmp);
|
|
|
+ ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
|
|
|
}
|
|
|
|
|
|
static int perf_rotate_context(struct perf_cpu_context *cpuctx);
|
|
@@ -228,14 +228,15 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
|
|
|
* we detect that events are taking too long.
|
|
|
*/
|
|
|
#define NR_ACCUMULATED_SAMPLES 128
|
|
|
-DEFINE_PER_CPU(u64, running_sample_length);
|
|
|
+static DEFINE_PER_CPU(u64, running_sample_length);
|
|
|
|
|
|
void perf_sample_event_took(u64 sample_len_ns)
|
|
|
{
|
|
|
u64 avg_local_sample_len;
|
|
|
u64 local_samples_len;
|
|
|
+ u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
|
|
|
|
|
|
- if (atomic_read(&perf_sample_allowed_ns) == 0)
|
|
|
+ if (allowed_ns == 0)
|
|
|
return;
|
|
|
|
|
|
/* decay the counter by 1 average sample */
|
|
@@ -251,7 +252,7 @@ void perf_sample_event_took(u64 sample_len_ns)
|
|
|
*/
|
|
|
avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
|
|
|
|
|
|
- if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns))
|
|
|
+ if (avg_local_sample_len <= allowed_ns)
|
|
|
return;
|
|
|
|
|
|
if (max_samples_per_tick <= 1)
|
|
@@ -262,10 +263,9 @@ void perf_sample_event_took(u64 sample_len_ns)
|
|
|
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
|
|
|
|
|
|
printk_ratelimited(KERN_WARNING
|
|
|
- "perf samples too long (%lld > %d), lowering "
|
|
|
+ "perf samples too long (%lld > %lld), lowering "
|
|
|
"kernel.perf_event_max_sample_rate to %d\n",
|
|
|
- avg_local_sample_len,
|
|
|
- atomic_read(&perf_sample_allowed_ns),
|
|
|
+ avg_local_sample_len, allowed_ns,
|
|
|
sysctl_perf_event_sample_rate);
|
|
|
|
|
|
update_perf_cpu_limits();
|