|
@@ -534,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc,
|
|
|
#include "perf_event_mipsxx.c"
|
|
|
|
|
|
/* Callchain handling code. */
|
|
|
-static inline void
|
|
|
-callchain_store(struct perf_callchain_entry *entry,
|
|
|
- u64 ip)
|
|
|
-{
|
|
|
- if (entry->nr < PERF_MAX_STACK_DEPTH)
|
|
|
- entry->ip[entry->nr++] = ip;
|
|
|
-}
|
|
|
|
|
|
/*
|
|
|
* Leave userspace callchain empty for now. When we find a way to trace
|
|
|
* the user stack callchains, we add here.
|
|
|
*/
|
|
|
-static void
|
|
|
-perf_callchain_user(struct pt_regs *regs,
|
|
|
- struct perf_callchain_entry *entry)
|
|
|
+void perf_callchain_user(struct perf_callchain_entry *entry,
|
|
|
+ struct pt_regs *regs)
|
|
|
{
|
|
|
}
|
|
|
|
|
@@ -561,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
|
|
|
while (!kstack_end(sp)) {
|
|
|
addr = *sp++;
|
|
|
if (__kernel_text_address(addr)) {
|
|
|
- callchain_store(entry, addr);
|
|
|
+ perf_callchain_store(entry, addr);
|
|
|
if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void
|
|
|
-perf_callchain_kernel(struct pt_regs *regs,
|
|
|
- struct perf_callchain_entry *entry)
|
|
|
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
|
|
|
+ struct pt_regs *regs)
|
|
|
{
|
|
|
unsigned long sp = regs->regs[29];
|
|
|
#ifdef CONFIG_KALLSYMS
|
|
|
unsigned long ra = regs->regs[31];
|
|
|
unsigned long pc = regs->cp0_epc;
|
|
|
|
|
|
- callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
|
if (raw_show_trace || !__kernel_text_address(pc)) {
|
|
|
unsigned long stack_page =
|
|
|
(unsigned long)task_stack_page(current);
|
|
@@ -587,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs,
|
|
|
return;
|
|
|
}
|
|
|
do {
|
|
|
- callchain_store(entry, pc);
|
|
|
+ perf_callchain_store(entry, pc);
|
|
|
if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
|
|
break;
|
|
|
pc = unwind_stack(current, &sp, pc, &ra);
|
|
|
} while (pc);
|
|
|
#else
|
|
|
- callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
|
save_raw_perf_callchain(entry, sp);
|
|
|
#endif
|
|
|
}
|
|
|
-
|
|
|
-static void
|
|
|
-perf_do_callchain(struct pt_regs *regs,
|
|
|
- struct perf_callchain_entry *entry)
|
|
|
-{
|
|
|
- int is_user;
|
|
|
-
|
|
|
- if (!regs)
|
|
|
- return;
|
|
|
-
|
|
|
- is_user = user_mode(regs);
|
|
|
-
|
|
|
- if (!current || !current->pid)
|
|
|
- return;
|
|
|
-
|
|
|
- if (is_user && current->state != TASK_RUNNING)
|
|
|
- return;
|
|
|
-
|
|
|
- if (!is_user) {
|
|
|
- perf_callchain_kernel(regs, entry);
|
|
|
- if (current->mm)
|
|
|
- regs = task_pt_regs(current);
|
|
|
- else
|
|
|
- regs = NULL;
|
|
|
- }
|
|
|
- if (regs)
|
|
|
- perf_callchain_user(regs, entry);
|
|
|
-}
|
|
|
-
|
|
|
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
|
|
|
-
|
|
|
-struct perf_callchain_entry *
|
|
|
-perf_callchain(struct pt_regs *regs)
|
|
|
-{
|
|
|
- struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
|
|
|
-
|
|
|
- entry->nr = 0;
|
|
|
- perf_do_callchain(regs, entry);
|
|
|
- return entry;
|
|
|
-}
|