|
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static int mipspmu_enable(struct perf_event *event)
|
|
|
-{
|
|
|
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
- struct hw_perf_event *hwc = &event->hw;
|
|
|
- int idx;
|
|
|
- int err = 0;
|
|
|
-
|
|
|
- /* To look for a free counter for this event. */
|
|
|
- idx = mipspmu->alloc_counter(cpuc, hwc);
|
|
|
- if (idx < 0) {
|
|
|
- err = idx;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * If there is an event in the counter we are going to use then
|
|
|
- * make sure it is disabled.
|
|
|
- */
|
|
|
- event->hw.idx = idx;
|
|
|
- mipspmu->disable_event(idx);
|
|
|
- cpuc->events[idx] = event;
|
|
|
-
|
|
|
- /* Set the period for the event. */
|
|
|
- mipspmu_event_set_period(event, hwc, idx);
|
|
|
-
|
|
|
- /* Enable the event. */
|
|
|
- mipspmu->enable_event(hwc, idx);
|
|
|
-
|
|
|
- /* Propagate our changes to the userspace mapping. */
|
|
|
- perf_event_update_userpage(event);
|
|
|
-
|
|
|
-out:
|
|
|
- return err;
|
|
|
-}
|
|
|
-
|
|
|
static void mipspmu_event_update(struct perf_event *event,
|
|
|
struct hw_perf_event *hwc,
|
|
|
int idx)
|
|
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event,
|
|
|
unsigned long flags;
|
|
|
int shift = 64 - TOTAL_BITS;
|
|
|
s64 prev_raw_count, new_raw_count;
|
|
|
- s64 delta;
|
|
|
+ u64 delta;
|
|
|
|
|
|
again:
|
|
|
prev_raw_count = local64_read(&hwc->prev_count);
|
|
@@ -231,32 +196,90 @@ again:
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
-static void mipspmu_disable(struct perf_event *event)
|
|
|
+static void mipspmu_start(struct perf_event *event, int flags)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+
|
|
|
+ if (!mipspmu)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (flags & PERF_EF_RELOAD)
|
|
|
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
|
|
|
+
|
|
|
+ hwc->state = 0;
|
|
|
+
|
|
|
+ /* Set the period for the event. */
|
|
|
+ mipspmu_event_set_period(event, hwc, hwc->idx);
|
|
|
+
|
|
|
+ /* Enable the event. */
|
|
|
+ mipspmu->enable_event(hwc, hwc->idx);
|
|
|
+}
|
|
|
+
|
|
|
+static void mipspmu_stop(struct perf_event *event, int flags)
|
|
|
+{
|
|
|
+ struct hw_perf_event *hwc = &event->hw;
|
|
|
+
|
|
|
+ if (!mipspmu)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!(hwc->state & PERF_HES_STOPPED)) {
|
|
|
+ /* We are working on a local event. */
|
|
|
+ mipspmu->disable_event(hwc->idx);
|
|
|
+ barrier();
|
|
|
+ mipspmu_event_update(event, hwc, hwc->idx);
|
|
|
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int mipspmu_add(struct perf_event *event, int flags)
|
|
|
{
|
|
|
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
- int idx = hwc->idx;
|
|
|
+ int idx;
|
|
|
+ int err = 0;
|
|
|
|
|
|
+ perf_pmu_disable(event->pmu);
|
|
|
|
|
|
- WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
|
|
|
+ /* To look for a free counter for this event. */
|
|
|
+ idx = mipspmu->alloc_counter(cpuc, hwc);
|
|
|
+ if (idx < 0) {
|
|
|
+ err = idx;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- /* We are working on a local event. */
|
|
|
+ /*
|
|
|
+ * If there is an event in the counter we are going to use then
|
|
|
+ * make sure it is disabled.
|
|
|
+ */
|
|
|
+ event->hw.idx = idx;
|
|
|
mipspmu->disable_event(idx);
|
|
|
+ cpuc->events[idx] = event;
|
|
|
|
|
|
- barrier();
|
|
|
-
|
|
|
- mipspmu_event_update(event, hwc, idx);
|
|
|
- cpuc->events[idx] = NULL;
|
|
|
- clear_bit(idx, cpuc->used_mask);
|
|
|
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
|
|
|
+ if (flags & PERF_EF_START)
|
|
|
+ mipspmu_start(event, PERF_EF_RELOAD);
|
|
|
|
|
|
+ /* Propagate our changes to the userspace mapping. */
|
|
|
perf_event_update_userpage(event);
|
|
|
+
|
|
|
+out:
|
|
|
+ perf_pmu_enable(event->pmu);
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
-static void mipspmu_unthrottle(struct perf_event *event)
|
|
|
+static void mipspmu_del(struct perf_event *event, int flags)
|
|
|
{
|
|
|
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
|
|
struct hw_perf_event *hwc = &event->hw;
|
|
|
+ int idx = hwc->idx;
|
|
|
|
|
|
- mipspmu->enable_event(hwc, hwc->idx);
|
|
|
+ WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
|
|
|
+
|
|
|
+ mipspmu_stop(event, PERF_EF_UPDATE);
|
|
|
+ cpuc->events[idx] = NULL;
|
|
|
+ clear_bit(idx, cpuc->used_mask);
|
|
|
+
|
|
|
+ perf_event_update_userpage(event);
|
|
|
}
|
|
|
|
|
|
static void mipspmu_read(struct perf_event *event)
|
|
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event)
|
|
|
mipspmu_event_update(event, hwc, hwc->idx);
|
|
|
}
|
|
|
|
|
|
-static struct pmu pmu = {
|
|
|
- .enable = mipspmu_enable,
|
|
|
- .disable = mipspmu_disable,
|
|
|
- .unthrottle = mipspmu_unthrottle,
|
|
|
- .read = mipspmu_read,
|
|
|
-};
|
|
|
+static void mipspmu_enable(struct pmu *pmu)
|
|
|
+{
|
|
|
+ if (mipspmu)
|
|
|
+ mipspmu->start();
|
|
|
+}
|
|
|
+
|
|
|
+static void mipspmu_disable(struct pmu *pmu)
|
|
|
+{
|
|
|
+ if (mipspmu)
|
|
|
+ mipspmu->stop();
|
|
|
+}
|
|
|
|
|
|
static atomic_t active_events = ATOMIC_INIT(0);
|
|
|
static DEFINE_MUTEX(pmu_reserve_mutex);
|
|
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void)
|
|
|
perf_irq = save_perf_irq;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
|
|
|
+ * specific low-level init routines.
|
|
|
+ */
|
|
|
+static void reset_counters(void *arg);
|
|
|
+static int __hw_perf_event_init(struct perf_event *event);
|
|
|
+
|
|
|
+static void hw_perf_event_destroy(struct perf_event *event)
|
|
|
+{
|
|
|
+ if (atomic_dec_and_mutex_lock(&active_events,
|
|
|
+ &pmu_reserve_mutex)) {
|
|
|
+ /*
|
|
|
+ * We must not call the destroy function with interrupts
|
|
|
+ * disabled.
|
|
|
+ */
|
|
|
+ on_each_cpu(reset_counters,
|
|
|
+ (void *)(long)mipspmu->num_counters, 1);
|
|
|
+ mipspmu_free_irq();
|
|
|
+ mutex_unlock(&pmu_reserve_mutex);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int mipspmu_event_init(struct perf_event *event)
|
|
|
+{
|
|
|
+ int err = 0;
|
|
|
+
|
|
|
+ switch (event->attr.type) {
|
|
|
+ case PERF_TYPE_RAW:
|
|
|
+ case PERF_TYPE_HARDWARE:
|
|
|
+ case PERF_TYPE_HW_CACHE:
|
|
|
+ break;
|
|
|
+
|
|
|
+ default:
|
|
|
+ return -ENOENT;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!mipspmu || event->cpu >= nr_cpumask_bits ||
|
|
|
+ (event->cpu >= 0 && !cpu_online(event->cpu)))
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ if (!atomic_inc_not_zero(&active_events)) {
|
|
|
+ if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
|
|
|
+ atomic_dec(&active_events);
|
|
|
+ return -ENOSPC;
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_lock(&pmu_reserve_mutex);
|
|
|
+ if (atomic_read(&active_events) == 0)
|
|
|
+ err = mipspmu_get_irq();
|
|
|
+
|
|
|
+ if (!err)
|
|
|
+ atomic_inc(&active_events);
|
|
|
+ mutex_unlock(&pmu_reserve_mutex);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ err = __hw_perf_event_init(event);
|
|
|
+ if (err)
|
|
|
+ hw_perf_event_destroy(event);
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static struct pmu pmu = {
|
|
|
+ .pmu_enable = mipspmu_enable,
|
|
|
+ .pmu_disable = mipspmu_disable,
|
|
|
+ .event_init = mipspmu_event_init,
|
|
|
+ .add = mipspmu_add,
|
|
|
+ .del = mipspmu_del,
|
|
|
+ .start = mipspmu_start,
|
|
|
+ .stop = mipspmu_stop,
|
|
|
+ .read = mipspmu_read,
|
|
|
+};
|
|
|
+
|
|
|
static inline unsigned int
|
|
|
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
|
|
|
{
|
|
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc,
|
|
|
{
|
|
|
struct hw_perf_event fake_hwc = event->hw;
|
|
|
|
|
|
- if (event->pmu && event->pmu != &pmu)
|
|
|
- return 0;
|
|
|
+ /* Allow mixed event group. So return 1 to pass validation. */
|
|
|
+ if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
|
|
|
+ return 1;
|
|
|
|
|
|
return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
|
|
|
}
|
|
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * mipsxx/rm9000/loongson2 have different performance counters, they have
|
|
|
- * specific low-level init routines.
|
|
|
- */
|
|
|
-static void reset_counters(void *arg);
|
|
|
-static int __hw_perf_event_init(struct perf_event *event);
|
|
|
-
|
|
|
-static void hw_perf_event_destroy(struct perf_event *event)
|
|
|
-{
|
|
|
- if (atomic_dec_and_mutex_lock(&active_events,
|
|
|
- &pmu_reserve_mutex)) {
|
|
|
- /*
|
|
|
- * We must not call the destroy function with interrupts
|
|
|
- * disabled.
|
|
|
- */
|
|
|
- on_each_cpu(reset_counters,
|
|
|
- (void *)(long)mipspmu->num_counters, 1);
|
|
|
- mipspmu_free_irq();
|
|
|
- mutex_unlock(&pmu_reserve_mutex);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-const struct pmu *hw_perf_event_init(struct perf_event *event)
|
|
|
-{
|
|
|
- int err = 0;
|
|
|
-
|
|
|
- if (!mipspmu || event->cpu >= nr_cpumask_bits ||
|
|
|
- (event->cpu >= 0 && !cpu_online(event->cpu)))
|
|
|
- return ERR_PTR(-ENODEV);
|
|
|
-
|
|
|
- if (!atomic_inc_not_zero(&active_events)) {
|
|
|
- if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
|
|
|
- atomic_dec(&active_events);
|
|
|
- return ERR_PTR(-ENOSPC);
|
|
|
- }
|
|
|
-
|
|
|
- mutex_lock(&pmu_reserve_mutex);
|
|
|
- if (atomic_read(&active_events) == 0)
|
|
|
- err = mipspmu_get_irq();
|
|
|
-
|
|
|
- if (!err)
|
|
|
- atomic_inc(&active_events);
|
|
|
- mutex_unlock(&pmu_reserve_mutex);
|
|
|
- }
|
|
|
-
|
|
|
- if (err)
|
|
|
- return ERR_PTR(err);
|
|
|
-
|
|
|
- err = __hw_perf_event_init(event);
|
|
|
- if (err)
|
|
|
- hw_perf_event_destroy(event);
|
|
|
-
|
|
|
- return err ? ERR_PTR(err) : &pmu;
|
|
|
-}
|
|
|
-
|
|
|
-void hw_perf_enable(void)
|
|
|
-{
|
|
|
- if (mipspmu)
|
|
|
- mipspmu->start();
|
|
|
-}
|
|
|
-
|
|
|
-void hw_perf_disable(void)
|
|
|
-{
|
|
|
- if (mipspmu)
|
|
|
- mipspmu->stop();
|
|
|
-}
|
|
|
-
|
|
|
/* This is needed by specific irq handlers in perf_event_*.c */
|
|
|
static void
|
|
|
handle_associated_event(struct cpu_hw_events *cpuc,
|
|
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc,
|
|
|
#include "perf_event_mipsxx.c"
|
|
|
|
|
|
/* Callchain handling code. */
|
|
|
-static inline void
|
|
|
-callchain_store(struct perf_callchain_entry *entry,
|
|
|
- u64 ip)
|
|
|
-{
|
|
|
- if (entry->nr < PERF_MAX_STACK_DEPTH)
|
|
|
- entry->ip[entry->nr++] = ip;
|
|
|
-}
|
|
|
|
|
|
/*
|
|
|
* Leave userspace callchain empty for now. When we find a way to trace
|
|
|
* the user stack callchains, we add here.
|
|
|
*/
|
|
|
-static void
|
|
|
-perf_callchain_user(struct pt_regs *regs,
|
|
|
- struct perf_callchain_entry *entry)
|
|
|
+void perf_callchain_user(struct perf_callchain_entry *entry,
|
|
|
+ struct pt_regs *regs)
|
|
|
{
|
|
|
}
|
|
|
|
|
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
|
|
|
while (!kstack_end(sp)) {
|
|
|
addr = *sp++;
|
|
|
if (__kernel_text_address(addr)) {
|
|
|
- callchain_store(entry, addr);
|
|
|
+ perf_callchain_store(entry, addr);
|
|
|
if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-perf_callchain_kernel(struct pt_regs *regs,
|
|
|
- struct perf_callchain_entry *entry)
|
|
|
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
|
|
+ struct pt_regs *regs)
|
|
|
{
|
|
|
unsigned long sp = regs->regs[29];
|
|
|
#ifdef CONFIG_KALLSYMS
|
|
|
unsigned long ra = regs->regs[31];
|
|
|
unsigned long pc = regs->cp0_epc;
|
|
|
|
|
|
- callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
|
if (raw_show_trace || !__kernel_text_address(pc)) {
|
|
|
unsigned long stack_page =
|
|
|
(unsigned long)task_stack_page(current);
|
|
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs,
|
|
|
return;
|
|
|
}
|
|
|
do {
|
|
|
- callchain_store(entry, pc);
|
|
|
+ perf_callchain_store(entry, pc);
|
|
|
if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
|
|
break;
|
|
|
pc = unwind_stack(current, &sp, pc, &ra);
|
|
|
} while (pc);
|
|
|
#else
|
|
|
- callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
|
save_raw_perf_callchain(entry, sp);
|
|
|
#endif
|
|
|
}
|
|
|
-
|
|
|
-static void
|
|
|
-perf_do_callchain(struct pt_regs *regs,
|
|
|
- struct perf_callchain_entry *entry)
|
|
|
-{
|
|
|
- int is_user;
|
|
|
-
|
|
|
- if (!regs)
|
|
|
- return;
|
|
|
-
|
|
|
- is_user = user_mode(regs);
|
|
|
-
|
|
|
- if (!current || !current->pid)
|
|
|
- return;
|
|
|
-
|
|
|
- if (is_user && current->state != TASK_RUNNING)
|
|
|
- return;
|
|
|
-
|
|
|
- if (!is_user) {
|
|
|
- perf_callchain_kernel(regs, entry);
|
|
|
- if (current->mm)
|
|
|
- regs = task_pt_regs(current);
|
|
|
- else
|
|
|
- regs = NULL;
|
|
|
- }
|
|
|
- if (regs)
|
|
|
- perf_callchain_user(regs, entry);
|
|
|
-}
|
|
|
-
|
|
|
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
|
|
|
-
|
|
|
-struct perf_callchain_entry *
|
|
|
-perf_callchain(struct pt_regs *regs)
|
|
|
-{
|
|
|
- struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
|
|
|
-
|
|
|
- entry->nr = 0;
|
|
|
- perf_do_callchain(regs, entry);
|
|
|
- return entry;
|
|
|
-}
|