|
@@ -224,7 +224,8 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
|
|
{
|
|
|
unsigned long pmnc;
|
|
|
struct perf_sample_data data;
|
|
|
- struct pmu_hw_events *cpuc;
|
|
|
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
|
|
|
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
|
|
|
struct pt_regs *regs;
|
|
|
int idx;
|
|
|
|
|
@@ -248,7 +249,6 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
|
|
|
|
|
regs = get_irq_regs();
|
|
|
|
|
|
- cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
|
|
struct perf_event *event = cpuc->events[idx];
|
|
|
struct hw_perf_event *hwc;
|
|
@@ -260,13 +260,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
|
|
continue;
|
|
|
|
|
|
hwc = &event->hw;
|
|
|
- armpmu_event_update(event, hwc, idx);
|
|
|
+ armpmu_event_update(event);
|
|
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
|
|
- if (!armpmu_event_set_period(event, hwc, idx))
|
|
|
+ if (!armpmu_event_set_period(event))
|
|
|
continue;
|
|
|
|
|
|
if (perf_event_overflow(event, &data, regs))
|
|
|
- cpu_pmu->disable(hwc, idx);
|
|
|
+ cpu_pmu->disable(event);
|
|
|
}
|
|
|
|
|
|
irq_work_run();
|
|
@@ -280,11 +280,13 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
|
|
|
return IRQ_HANDLED;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
+static void xscale1pmu_enable_event(struct perf_event *event)
|
|
|
{
|
|
|
unsigned long val, mask, evt, flags;
|
|
|
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
|
|
+ int idx = hwc->idx;
|
|
|
|
|
|
switch (idx) {
|
|
|
case XSCALE_CYCLE_COUNTER:
|
|
@@ -314,11 +316,13 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
|
+static void xscale1pmu_disable_event(struct perf_event *event)
|
|
|
{
|
|
|
unsigned long val, mask, evt, flags;
|
|
|
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
|
|
+ int idx = hwc->idx;
|
|
|
|
|
|
switch (idx) {
|
|
|
case XSCALE_CYCLE_COUNTER:
|
|
@@ -348,9 +352,10 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
|
|
|
|
static int
|
|
|
xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
|
|
- struct hw_perf_event *event)
|
|
|
+ struct perf_event *event)
|
|
|
{
|
|
|
- if (XSCALE_PERFCTR_CCNT == event->config_base) {
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
|
|
|
if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
|
|
|
return -EAGAIN;
|
|
|
|
|
@@ -366,8 +371,7 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-xscale1pmu_start(void)
|
|
|
+static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
|
|
|
{
|
|
|
unsigned long flags, val;
|
|
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
|
@@ -379,8 +383,7 @@ xscale1pmu_start(void)
|
|
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-xscale1pmu_stop(void)
|
|
|
+static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
|
|
|
{
|
|
|
unsigned long flags, val;
|
|
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
|
@@ -392,9 +395,10 @@ xscale1pmu_stop(void)
|
|
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
|
}
|
|
|
|
|
|
-static inline u32
|
|
|
-xscale1pmu_read_counter(int counter)
|
|
|
+static inline u32 xscale1pmu_read_counter(struct perf_event *event)
|
|
|
{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ int counter = hwc->idx;
|
|
|
u32 val = 0;
|
|
|
|
|
|
switch (counter) {
|
|
@@ -412,9 +416,11 @@ xscale1pmu_read_counter(int counter)
|
|
|
return val;
|
|
|
}
|
|
|
|
|
|
-static inline void
|
|
|
-xscale1pmu_write_counter(int counter, u32 val)
|
|
|
+static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
|
|
|
{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ int counter = hwc->idx;
|
|
|
+
|
|
|
switch (counter) {
|
|
|
case XSCALE_CYCLE_COUNTER:
|
|
|
asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
|
|
@@ -565,7 +571,8 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
|
|
{
|
|
|
unsigned long pmnc, of_flags;
|
|
|
struct perf_sample_data data;
|
|
|
- struct pmu_hw_events *cpuc;
|
|
|
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
|
|
|
+ struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
|
|
|
struct pt_regs *regs;
|
|
|
int idx;
|
|
|
|
|
@@ -583,7 +590,6 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
|
|
|
|
|
regs = get_irq_regs();
|
|
|
|
|
|
- cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
|
|
|
struct perf_event *event = cpuc->events[idx];
|
|
|
struct hw_perf_event *hwc;
|
|
@@ -595,13 +601,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
|
|
continue;
|
|
|
|
|
|
hwc = &event->hw;
|
|
|
- armpmu_event_update(event, hwc, idx);
|
|
|
+ armpmu_event_update(event);
|
|
|
perf_sample_data_init(&data, 0, hwc->last_period);
|
|
|
- if (!armpmu_event_set_period(event, hwc, idx))
|
|
|
+ if (!armpmu_event_set_period(event))
|
|
|
continue;
|
|
|
|
|
|
if (perf_event_overflow(event, &data, regs))
|
|
|
- cpu_pmu->disable(hwc, idx);
|
|
|
+ cpu_pmu->disable(event);
|
|
|
}
|
|
|
|
|
|
irq_work_run();
|
|
@@ -615,11 +621,13 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
|
|
|
return IRQ_HANDLED;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
+static void xscale2pmu_enable_event(struct perf_event *event)
|
|
|
{
|
|
|
unsigned long flags, ien, evtsel;
|
|
|
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
|
|
+ int idx = hwc->idx;
|
|
|
|
|
|
ien = xscale2pmu_read_int_enable();
|
|
|
evtsel = xscale2pmu_read_event_select();
|
|
@@ -659,11 +667,13 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
|
+static void xscale2pmu_disable_event(struct perf_event *event)
|
|
|
{
|
|
|
unsigned long flags, ien, evtsel, of_flags;
|
|
|
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
|
|
+ int idx = hwc->idx;
|
|
|
|
|
|
ien = xscale2pmu_read_int_enable();
|
|
|
evtsel = xscale2pmu_read_event_select();
|
|
@@ -711,7 +721,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
|
|
|
|
static int
|
|
|
xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
|
|
|
- struct hw_perf_event *event)
|
|
|
+ struct perf_event *event)
|
|
|
{
|
|
|
int idx = xscale1pmu_get_event_idx(cpuc, event);
|
|
|
if (idx >= 0)
|
|
@@ -725,8 +735,7 @@ out:
|
|
|
return idx;
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-xscale2pmu_start(void)
|
|
|
+static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
|
|
|
{
|
|
|
unsigned long flags, val;
|
|
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
|
@@ -738,8 +747,7 @@ xscale2pmu_start(void)
|
|
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-xscale2pmu_stop(void)
|
|
|
+static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
|
|
|
{
|
|
|
unsigned long flags, val;
|
|
|
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
|
|
@@ -751,9 +759,10 @@ xscale2pmu_stop(void)
|
|
|
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
|
}
|
|
|
|
|
|
-static inline u32
|
|
|
-xscale2pmu_read_counter(int counter)
|
|
|
+static inline u32 xscale2pmu_read_counter(struct perf_event *event)
|
|
|
{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ int counter = hwc->idx;
|
|
|
u32 val = 0;
|
|
|
|
|
|
switch (counter) {
|
|
@@ -777,9 +786,11 @@ xscale2pmu_read_counter(int counter)
|
|
|
return val;
|
|
|
}
|
|
|
|
|
|
-static inline void
|
|
|
-xscale2pmu_write_counter(int counter, u32 val)
|
|
|
+static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
|
|
|
{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+ int counter = hwc->idx;
|
|
|
+
|
|
|
switch (counter) {
|
|
|
case XSCALE_CYCLE_COUNTER:
|
|
|
asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
|