|
@@ -22,9 +22,9 @@
|
|
|
#include <linux/slab.h>
|
|
|
|
|
|
#include <asm/core_reg.h>
|
|
|
-#include <asm/hwthread.h>
|
|
|
#include <asm/io.h>
|
|
|
#include <asm/irq.h>
|
|
|
+#include <asm/processor.h>
|
|
|
|
|
|
#include "perf_event.h"
|
|
|
|
|
@@ -40,10 +40,10 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
|
|
|
/* PMU admin */
|
|
|
const char *perf_pmu_name(void)
|
|
|
{
|
|
|
- if (metag_pmu)
|
|
|
- return metag_pmu->pmu.name;
|
|
|
+ if (!metag_pmu)
|
|
|
+ return NULL;
|
|
|
|
|
|
- return NULL;
|
|
|
+ return metag_pmu->name;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(perf_pmu_name);
|
|
|
|
|
@@ -171,6 +171,7 @@ static int metag_pmu_event_init(struct perf_event *event)
|
|
|
switch (event->attr.type) {
|
|
|
case PERF_TYPE_HARDWARE:
|
|
|
case PERF_TYPE_HW_CACHE:
|
|
|
+ case PERF_TYPE_RAW:
|
|
|
err = _hw_perf_event_init(event);
|
|
|
break;
|
|
|
|
|
@@ -211,9 +212,10 @@ again:
|
|
|
/*
|
|
|
* Calculate the delta and add it to the counter.
|
|
|
*/
|
|
|
- delta = new_raw_count - prev_raw_count;
|
|
|
+ delta = (new_raw_count - prev_raw_count) & MAX_PERIOD;
|
|
|
|
|
|
local64_add(delta, &event->count);
|
|
|
+ local64_sub(delta, &hwc->period_left);
|
|
|
}
|
|
|
|
|
|
int metag_pmu_event_set_period(struct perf_event *event,
|
|
@@ -223,6 +225,10 @@ int metag_pmu_event_set_period(struct perf_event *event,
|
|
|
s64 period = hwc->sample_period;
|
|
|
int ret = 0;
|
|
|
|
|
|
+ /* The period may have been changed */
|
|
|
+ if (unlikely(period != hwc->last_period))
|
|
|
+ left += period - hwc->last_period;
|
|
|
+
|
|
|
if (unlikely(left <= -period)) {
|
|
|
left = period;
|
|
|
local64_set(&hwc->period_left, left);
|
|
@@ -240,8 +246,10 @@ int metag_pmu_event_set_period(struct perf_event *event,
|
|
|
if (left > (s64)metag_pmu->max_period)
|
|
|
left = metag_pmu->max_period;
|
|
|
|
|
|
- if (metag_pmu->write)
|
|
|
- metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD);
|
|
|
+ if (metag_pmu->write) {
|
|
|
+ local64_set(&hwc->prev_count, -(s32)left);
|
|
|
+ metag_pmu->write(idx, -left & MAX_PERIOD);
|
|
|
+ }
|
|
|
|
|
|
perf_event_update_userpage(event);
|
|
|
|
|
@@ -549,6 +557,10 @@ static int _hw_perf_event_init(struct perf_event *event)
|
|
|
if (err)
|
|
|
return err;
|
|
|
break;
|
|
|
+
|
|
|
+ case PERF_TYPE_RAW:
|
|
|
+ mapping = attr->config;
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
/* Return early if the event is unsupported */
|
|
@@ -610,15 +622,13 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
|
|
|
WARN_ONCE((config != 0x100),
|
|
|
"invalid configuration (%d) for counter (%d)\n",
|
|
|
config, idx);
|
|
|
-
|
|
|
- /* Reset the cycle count */
|
|
|
- __core_reg_set(TXTACTCYC, 0);
|
|
|
+ local64_set(&event->prev_count, __core_reg_get(TXTACTCYC));
|
|
|
goto unlock;
|
|
|
}
|
|
|
|
|
|
/* Check for a core internal or performance channel event. */
|
|
|
if (tmp) {
|
|
|
- void *perf_addr = (void *)PERF_COUNT(idx);
|
|
|
+ void *perf_addr;
|
|
|
|
|
|
/*
|
|
|
* Anything other than a cycle count will write the low-
|
|
@@ -632,9 +642,14 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
|
|
|
case 0xf0:
|
|
|
perf_addr = (void *)PERF_CHAN(idx);
|
|
|
break;
|
|
|
+
|
|
|
+ default:
|
|
|
+ perf_addr = NULL;
|
|
|
+ break;
|
|
|
}
|
|
|
|
|
|
- metag_out32((tmp & 0x0f), perf_addr);
|
|
|
+ if (perf_addr)
|
|
|
+ metag_out32((config & 0x0f), perf_addr);
|
|
|
|
|
|
/*
|
|
|
* Now we use the high nibble as the performance event to
|
|
@@ -643,13 +658,21 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
|
|
|
config = tmp >> 4;
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Enabled counters start from 0. Early cores clear the count on
|
|
|
- * write but newer cores don't, so we make sure that the count is
|
|
|
- * set to 0.
|
|
|
- */
|
|
|
tmp = ((config & 0xf) << 28) |
|
|
|
- ((1 << 24) << cpu_2_hwthread_id[get_cpu()]);
|
|
|
+ ((1 << 24) << hard_processor_id());
|
|
|
+ if (metag_pmu->max_period)
|
|
|
+ /*
|
|
|
+ * Cores supporting overflow interrupts may have had the counter
|
|
|
+ * set to a specific value that needs preserving.
|
|
|
+ */
|
|
|
+ tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
|
|
|
+ else
|
|
|
+ /*
|
|
|
+ * Older cores reset the counter on write, so prev_count needs
|
|
|
+ * resetting too so we can calculate a correct delta.
|
|
|
+ */
|
|
|
+ local64_set(&event->prev_count, 0);
|
|
|
+
|
|
|
metag_out32(tmp, PERF_COUNT(idx));
|
|
|
unlock:
|
|
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
@@ -693,9 +716,8 @@ static u64 metag_pmu_read_counter(int idx)
|
|
|
{
|
|
|
u32 tmp = 0;
|
|
|
|
|
|
- /* The act of reading the cycle counter also clears it */
|
|
|
if (METAG_INST_COUNTER == idx) {
|
|
|
- __core_reg_swap(TXTACTCYC, tmp);
|
|
|
+ tmp = __core_reg_get(TXTACTCYC);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
@@ -764,10 +786,16 @@ static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
|
|
|
|
|
|
/*
|
|
|
* Enable the counter again once core overflow processing has
|
|
|
- * completed.
|
|
|
+ * completed. Note the counter value may have been modified while it was
|
|
|
+ * inactive to set it up ready for the next interrupt.
|
|
|
*/
|
|
|
- if (!perf_event_overflow(event, &sampledata, regs))
|
|
|
+ if (!perf_event_overflow(event, &sampledata, regs)) {
|
|
|
+ __global_lock2(flags);
|
|
|
+ counter = (counter & 0xff000000) |
|
|
|
+ (metag_in32(PERF_COUNT(idx)) & 0x00ffffff);
|
|
|
metag_out32(counter, PERF_COUNT(idx));
|
|
|
+ __global_unlock2(flags);
|
|
|
+ }
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
}
|
|
@@ -830,7 +858,7 @@ static int __init init_hw_perf_events(void)
|
|
|
metag_pmu->max_period = 0;
|
|
|
}
|
|
|
|
|
|
- metag_pmu->name = "Meta 2";
|
|
|
+ metag_pmu->name = "meta2";
|
|
|
metag_pmu->version = version;
|
|
|
metag_pmu->pmu = pmu;
|
|
|
}
|