|
@@ -281,6 +281,7 @@ static void
|
|
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
{
|
|
{
|
|
unsigned long val, mask, evt, flags;
|
|
unsigned long val, mask, evt, flags;
|
|
|
|
+ struct cpu_hw_events *events = armpmu->get_hw_events();
|
|
|
|
|
|
switch (idx) {
|
|
switch (idx) {
|
|
case XSCALE_CYCLE_COUNTER:
|
|
case XSCALE_CYCLE_COUNTER:
|
|
@@ -302,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
|
val = xscale1pmu_read_pmnc();
|
|
val = xscale1pmu_read_pmnc();
|
|
val &= ~mask;
|
|
val &= ~mask;
|
|
val |= evt;
|
|
val |= evt;
|
|
xscale1pmu_write_pmnc(val);
|
|
xscale1pmu_write_pmnc(val);
|
|
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
{
|
|
{
|
|
unsigned long val, mask, evt, flags;
|
|
unsigned long val, mask, evt, flags;
|
|
|
|
+ struct cpu_hw_events *events = armpmu->get_hw_events();
|
|
|
|
|
|
switch (idx) {
|
|
switch (idx) {
|
|
case XSCALE_CYCLE_COUNTER:
|
|
case XSCALE_CYCLE_COUNTER:
|
|
@@ -333,12 +335,12 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
|
val = xscale1pmu_read_pmnc();
|
|
val = xscale1pmu_read_pmnc();
|
|
val &= ~mask;
|
|
val &= ~mask;
|
|
val |= evt;
|
|
val |= evt;
|
|
xscale1pmu_write_pmnc(val);
|
|
xscale1pmu_write_pmnc(val);
|
|
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static int
|
|
static int
|
|
@@ -365,24 +367,26 @@ static void
|
|
xscale1pmu_start(void)
|
|
xscale1pmu_start(void)
|
|
{
|
|
{
|
|
unsigned long flags, val;
|
|
unsigned long flags, val;
|
|
|
|
+ struct cpu_hw_events *events = armpmu->get_hw_events();
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
|
val = xscale1pmu_read_pmnc();
|
|
val = xscale1pmu_read_pmnc();
|
|
val |= XSCALE_PMU_ENABLE;
|
|
val |= XSCALE_PMU_ENABLE;
|
|
xscale1pmu_write_pmnc(val);
|
|
xscale1pmu_write_pmnc(val);
|
|
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
xscale1pmu_stop(void)
|
|
xscale1pmu_stop(void)
|
|
{
|
|
{
|
|
unsigned long flags, val;
|
|
unsigned long flags, val;
|
|
|
|
+ struct cpu_hw_events *events = armpmu->get_hw_events();
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
|
val = xscale1pmu_read_pmnc();
|
|
val = xscale1pmu_read_pmnc();
|
|
val &= ~XSCALE_PMU_ENABLE;
|
|
val &= ~XSCALE_PMU_ENABLE;
|
|
xscale1pmu_write_pmnc(val);
|
|
xscale1pmu_write_pmnc(val);
|
|
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static inline u32
|
|
static inline u32
|
|
@@ -610,6 +614,7 @@ static void
|
|
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
{
|
|
{
|
|
unsigned long flags, ien, evtsel;
|
|
unsigned long flags, ien, evtsel;
|
|
|
|
+ struct cpu_hw_events *events = armpmu->get_hw_events();
|
|
|
|
|
|
ien = xscale2pmu_read_int_enable();
|
|
ien = xscale2pmu_read_int_enable();
|
|
evtsel = xscale2pmu_read_event_select();
|
|
evtsel = xscale2pmu_read_event_select();
|
|
@@ -643,16 +648,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
|
xscale2pmu_write_event_select(evtsel);
|
|
xscale2pmu_write_event_select(evtsel);
|
|
xscale2pmu_write_int_enable(ien);
|
|
xscale2pmu_write_int_enable(ien);
|
|
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
{
|
|
{
|
|
unsigned long flags, ien, evtsel;
|
|
unsigned long flags, ien, evtsel;
|
|
|
|
+ struct cpu_hw_events *events = armpmu->get_hw_events();
|
|
|
|
|
|
ien = xscale2pmu_read_int_enable();
|
|
ien = xscale2pmu_read_int_enable();
|
|
evtsel = xscale2pmu_read_event_select();
|
|
evtsel = xscale2pmu_read_event_select();
|
|
@@ -686,10 +692,10 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
|
xscale2pmu_write_event_select(evtsel);
|
|
xscale2pmu_write_event_select(evtsel);
|
|
xscale2pmu_write_int_enable(ien);
|
|
xscale2pmu_write_int_enable(ien);
|
|
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static int
|
|
static int
|
|
@@ -712,24 +718,26 @@ static void
|
|
xscale2pmu_start(void)
|
|
xscale2pmu_start(void)
|
|
{
|
|
{
|
|
unsigned long flags, val;
|
|
unsigned long flags, val;
|
|
|
|
+ struct cpu_hw_events *events = armpmu->get_hw_events();
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
|
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
|
|
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
|
|
val |= XSCALE_PMU_ENABLE;
|
|
val |= XSCALE_PMU_ENABLE;
|
|
xscale2pmu_write_pmnc(val);
|
|
xscale2pmu_write_pmnc(val);
|
|
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
xscale2pmu_stop(void)
|
|
xscale2pmu_stop(void)
|
|
{
|
|
{
|
|
unsigned long flags, val;
|
|
unsigned long flags, val;
|
|
|
|
+ struct cpu_hw_events *events = armpmu->get_hw_events();
|
|
|
|
|
|
- raw_spin_lock_irqsave(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
|
|
val = xscale2pmu_read_pmnc();
|
|
val = xscale2pmu_read_pmnc();
|
|
val &= ~XSCALE_PMU_ENABLE;
|
|
val &= ~XSCALE_PMU_ENABLE;
|
|
xscale2pmu_write_pmnc(val);
|
|
xscale2pmu_write_pmnc(val);
|
|
- raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
|
|
|
|
|
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static inline u32
|
|
static inline u32
|