|
@@ -162,15 +162,15 @@ static void fsl_emb_pmu_read(struct perf_event *event)
|
|
* Therefore we treat them like NMIs.
|
|
* Therefore we treat them like NMIs.
|
|
*/
|
|
*/
|
|
do {
|
|
do {
|
|
- prev = atomic64_read(&event->hw.prev_count);
|
|
|
|
|
|
+ prev = local64_read(&event->hw.prev_count);
|
|
barrier();
|
|
barrier();
|
|
val = read_pmc(event->hw.idx);
|
|
val = read_pmc(event->hw.idx);
|
|
- } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
|
|
|
|
|
|
+ } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
|
|
|
|
|
|
/* The counters are only 32 bits wide */
|
|
/* The counters are only 32 bits wide */
|
|
delta = (val - prev) & 0xfffffffful;
|
|
delta = (val - prev) & 0xfffffffful;
|
|
- atomic64_add(delta, &event->count);
|
|
|
|
- atomic64_sub(delta, &event->hw.period_left);
|
|
|
|
|
|
+ local64_add(delta, &event->count);
|
|
|
|
+ local64_sub(delta, &event->hw.period_left);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -296,11 +296,11 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
|
|
|
|
|
|
val = 0;
|
|
val = 0;
|
|
if (event->hw.sample_period) {
|
|
if (event->hw.sample_period) {
|
|
- s64 left = atomic64_read(&event->hw.period_left);
|
|
|
|
|
|
+ s64 left = local64_read(&event->hw.period_left);
|
|
if (left < 0x80000000L)
|
|
if (left < 0x80000000L)
|
|
val = 0x80000000L - left;
|
|
val = 0x80000000L - left;
|
|
}
|
|
}
|
|
- atomic64_set(&event->hw.prev_count, val);
|
|
|
|
|
|
+ local64_set(&event->hw.prev_count, val);
|
|
write_pmc(i, val);
|
|
write_pmc(i, val);
|
|
perf_event_update_userpage(event);
|
|
perf_event_update_userpage(event);
|
|
|
|
|
|
@@ -371,8 +371,8 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
|
|
if (left < 0x80000000L)
|
|
if (left < 0x80000000L)
|
|
val = 0x80000000L - left;
|
|
val = 0x80000000L - left;
|
|
write_pmc(event->hw.idx, val);
|
|
write_pmc(event->hw.idx, val);
|
|
- atomic64_set(&event->hw.prev_count, val);
|
|
|
|
- atomic64_set(&event->hw.period_left, left);
|
|
|
|
|
|
+ local64_set(&event->hw.prev_count, val);
|
|
|
|
+ local64_set(&event->hw.period_left, left);
|
|
perf_event_update_userpage(event);
|
|
perf_event_update_userpage(event);
|
|
perf_enable();
|
|
perf_enable();
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
@@ -500,7 +500,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
|
|
return ERR_PTR(-ENOTSUPP);
|
|
return ERR_PTR(-ENOTSUPP);
|
|
|
|
|
|
event->hw.last_period = event->hw.sample_period;
|
|
event->hw.last_period = event->hw.sample_period;
|
|
- atomic64_set(&event->hw.period_left, event->hw.last_period);
|
|
|
|
|
|
+ local64_set(&event->hw.period_left, event->hw.last_period);
|
|
|
|
|
|
/*
|
|
/*
|
|
* See if we need to reserve the PMU.
|
|
* See if we need to reserve the PMU.
|
|
@@ -541,16 +541,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
|
int record = 0;
|
|
int record = 0;
|
|
|
|
|
|
/* we don't have to worry about interrupts here */
|
|
/* we don't have to worry about interrupts here */
|
|
- prev = atomic64_read(&event->hw.prev_count);
|
|
|
|
|
|
+ prev = local64_read(&event->hw.prev_count);
|
|
delta = (val - prev) & 0xfffffffful;
|
|
delta = (val - prev) & 0xfffffffful;
|
|
- atomic64_add(delta, &event->count);
|
|
|
|
|
|
+ local64_add(delta, &event->count);
|
|
|
|
|
|
/*
|
|
/*
|
|
* See if the total period for this event has expired,
|
|
* See if the total period for this event has expired,
|
|
* and update for the next period.
|
|
* and update for the next period.
|
|
*/
|
|
*/
|
|
val = 0;
|
|
val = 0;
|
|
- left = atomic64_read(&event->hw.period_left) - delta;
|
|
|
|
|
|
+ left = local64_read(&event->hw.period_left) - delta;
|
|
if (period) {
|
|
if (period) {
|
|
if (left <= 0) {
|
|
if (left <= 0) {
|
|
left += period;
|
|
left += period;
|
|
@@ -584,8 +584,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
|
}
|
|
}
|
|
|
|
|
|
write_pmc(event->hw.idx, val);
|
|
write_pmc(event->hw.idx, val);
|
|
- atomic64_set(&event->hw.prev_count, val);
|
|
|
|
- atomic64_set(&event->hw.period_left, left);
|
|
|
|
|
|
+ local64_set(&event->hw.prev_count, val);
|
|
|
|
+ local64_set(&event->hw.period_left, left);
|
|
perf_event_update_userpage(event);
|
|
perf_event_update_userpage(event);
|
|
}
|
|
}
|
|
|
|
|