|
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
|
|
|
u64
|
|
|
armpmu_event_update(struct perf_event *event,
|
|
|
struct hw_perf_event *hwc,
|
|
|
- int idx, int overflow)
|
|
|
+ int idx)
|
|
|
{
|
|
|
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
|
|
u64 delta, prev_raw_count, new_raw_count;
|
|
@@ -193,13 +193,7 @@ again:
|
|
|
new_raw_count) != prev_raw_count)
|
|
|
goto again;
|
|
|
|
|
|
- new_raw_count &= armpmu->max_period;
|
|
|
- prev_raw_count &= armpmu->max_period;
|
|
|
-
|
|
|
- if (overflow)
|
|
|
- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
|
|
|
- else
|
|
|
- delta = new_raw_count - prev_raw_count;
|
|
|
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
|
|
|
|
|
|
local64_add(delta, &event->count);
|
|
|
local64_sub(delta, &hwc->period_left);
|
|
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
|
|
|
if (hwc->idx < 0)
|
|
|
return;
|
|
|
|
|
|
- armpmu_event_update(event, hwc, hwc->idx, 0);
|
|
|
+ armpmu_event_update(event, hwc, hwc->idx);
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
|
|
|
if (!(hwc->state & PERF_HES_STOPPED)) {
|
|
|
armpmu->disable(hwc, hwc->idx);
|
|
|
barrier(); /* why? */
|
|
|
- armpmu_event_update(event, hwc, hwc->idx, 0);
|
|
|
+ armpmu_event_update(event, hwc, hwc->idx);
|
|
|
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
|
|
}
|
|
|
}
|
|
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
|
|
|
hwc->config_base |= (unsigned long)mapping;
|
|
|
|
|
|
if (!hwc->sample_period) {
|
|
|
- hwc->sample_period = armpmu->max_period;
|
|
|
+ /*
|
|
|
+ * For non-sampling runs, limit the sample_period to half
|
|
|
+ * of the counter width. That way, the new counter value
|
|
|
+ * is far less likely to overtake the previous one unless
|
|
|
+ * you have some serious IRQ latency issues.
|
|
|
+ */
|
|
|
+ hwc->sample_period = armpmu->max_period >> 1;
|
|
|
hwc->last_period = hwc->sample_period;
|
|
|
local64_set(&hwc->period_left, hwc->sample_period);
|
|
|
}
|
|
@@ -679,6 +679,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
|
|
|
armpmu->type = ARM_PMU_DEVICE_CPU;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * PMU hardware loses all context when a CPU goes offline.
|
|
|
+ * When a CPU is hotplugged back in, since some hardware registers are
|
|
|
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
|
|
|
+ * junk values out of them.
|
|
|
+ */
|
|
|
+static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
|
|
|
+ unsigned long action, void *hcpu)
|
|
|
+{
|
|
|
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
|
|
|
+ return NOTIFY_DONE;
|
|
|
+
|
|
|
+ if (cpu_pmu && cpu_pmu->reset)
|
|
|
+ cpu_pmu->reset(NULL);
|
|
|
+
|
|
|
+ return NOTIFY_OK;
|
|
|
+}
|
|
|
+
|
|
|
+static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
|
|
|
+ .notifier_call = pmu_cpu_notify,
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* CPU PMU identification and registration.
|
|
|
*/
|
|
@@ -730,6 +752,7 @@ init_hw_perf_events(void)
|
|
|
pr_info("enabled with %s PMU driver, %d counters available\n",
|
|
|
cpu_pmu->name, cpu_pmu->num_events);
|
|
|
cpu_pmu_init(cpu_pmu);
|
|
|
+ register_cpu_notifier(&pmu_cpu_notifier);
|
|
|
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
|
|
|
} else {
|
|
|
pr_info("no hardware support available\n");
|