|
@@ -6,7 +6,8 @@
|
|
|
*
|
|
|
* This code is based on the implementation for ARM, which is in turn
|
|
|
* based on the sparc64 perf event code and the x86 code. Performance
|
|
|
- * counter access is based on the MIPS Oprofile code.
|
|
|
+ * counter access is based on the MIPS Oprofile code. And the callchain
|
|
|
+ * support references the code of MIPS stacktrace.c.
|
|
|
*
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
@@ -486,3 +487,108 @@ handle_associated_event(struct cpu_hw_events *cpuc,
|
|
|
if (perf_event_overflow(event, 0, data, regs))
|
|
|
mipspmu->disable_event(idx);
|
|
|
}
|
|
|
+
|
|
|
+/* Callchain handling code. */
|
|
|
+static inline void
|
|
|
+callchain_store(struct perf_callchain_entry *entry,
|
|
|
+ u64 ip)
|
|
|
+{
|
|
|
+ if (entry->nr < PERF_MAX_STACK_DEPTH)
|
|
|
+ entry->ip[entry->nr++] = ip;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Leave userspace callchain empty for now. When we find a way to trace
|
|
|
+ * the user stack callchains, we add here.
|
|
|
+ */
|
|
|
+static void
|
|
|
+perf_callchain_user(struct pt_regs *regs,
|
|
|
+ struct perf_callchain_entry *entry)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
|
|
|
+ unsigned long reg29)
|
|
|
+{
|
|
|
+ unsigned long *sp = (unsigned long *)reg29;
|
|
|
+ unsigned long addr;
|
|
|
+
|
|
|
+ while (!kstack_end(sp)) {
|
|
|
+ addr = *sp++;
|
|
|
+ if (__kernel_text_address(addr)) {
|
|
|
+ callchain_store(entry, addr);
|
|
|
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+perf_callchain_kernel(struct pt_regs *regs,
|
|
|
+ struct perf_callchain_entry *entry)
|
|
|
+{
|
|
|
+ unsigned long sp = regs->regs[29];
|
|
|
+#ifdef CONFIG_KALLSYMS
|
|
|
+ unsigned long ra = regs->regs[31];
|
|
|
+ unsigned long pc = regs->cp0_epc;
|
|
|
+
|
|
|
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
|
+ if (raw_show_trace || !__kernel_text_address(pc)) {
|
|
|
+ unsigned long stack_page =
|
|
|
+ (unsigned long)task_stack_page(current);
|
|
|
+ if (stack_page && sp >= stack_page &&
|
|
|
+ sp <= stack_page + THREAD_SIZE - 32)
|
|
|
+ save_raw_perf_callchain(entry, sp);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ do {
|
|
|
+ callchain_store(entry, pc);
|
|
|
+ if (entry->nr >= PERF_MAX_STACK_DEPTH)
|
|
|
+ break;
|
|
|
+ pc = unwind_stack(current, &sp, pc, &ra);
|
|
|
+ } while (pc);
|
|
|
+#else
|
|
|
+ callchain_store(entry, PERF_CONTEXT_KERNEL);
|
|
|
+ save_raw_perf_callchain(entry, sp);
|
|
|
+#endif
|
|
|
+}
|
|
|
+
|
|
|
+static void
|
|
|
+perf_do_callchain(struct pt_regs *regs,
|
|
|
+ struct perf_callchain_entry *entry)
|
|
|
+{
|
|
|
+ int is_user;
|
|
|
+
|
|
|
+ if (!regs)
|
|
|
+ return;
|
|
|
+
|
|
|
+ is_user = user_mode(regs);
|
|
|
+
|
|
|
+ if (!current || !current->pid)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (is_user && current->state != TASK_RUNNING)
|
|
|
+ return;
|
|
|
+
|
|
|
+ if (!is_user) {
|
|
|
+ perf_callchain_kernel(regs, entry);
|
|
|
+ if (current->mm)
|
|
|
+ regs = task_pt_regs(current);
|
|
|
+ else
|
|
|
+ regs = NULL;
|
|
|
+ }
|
|
|
+ if (regs)
|
|
|
+ perf_callchain_user(regs, entry);
|
|
|
+}
|
|
|
+
|
|
|
+static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
|
|
|
+
|
|
|
+struct perf_callchain_entry *
|
|
|
+perf_callchain(struct pt_regs *regs)
|
|
|
+{
|
|
|
+ struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
|
|
|
+
|
|
|
+ entry->nr = 0;
|
|
|
+ perf_do_callchain(regs, entry);
|
|
|
+ return entry;
|
|
|
+}
|