|
@@ -0,0 +1,128 @@
|
|
|
+/*
|
|
|
+ * Dynamic function tracing support.
|
|
|
+ *
|
|
|
+ * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
|
|
|
+ *
|
|
|
+ * For licencing details, see COPYING.
|
|
|
+ *
|
|
|
+ * Defines low-level handling of mcount calls when the kernel
|
|
|
+ * is compiled with the -pg flag. When using dynamic ftrace, the
|
|
|
+ * mcount call-sites get patched lazily with NOP till they are
|
|
|
+ * enabled. All code mutation routines here take effect atomically.
|
|
|
+ */
|
|
|
+
|
|
|
+#include <linux/ftrace.h>
|
|
|
+#include <asm/cacheflush.h>
|
|
|
+
|
|
|
+#define INSN_SIZE 4
|
|
|
+#define PC_OFFSET 8
|
|
|
+#define BL_OPCODE 0xeb000000
|
|
|
+#define BL_OFFSET_MASK 0x00ffffff
|
|
|
+
|
|
|
+static unsigned long bl_insn;
|
|
|
+static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */
|
|
|
+
|
|
|
+/* return true if mcount call site is already patched/no-op'ed */
|
|
|
+int ftrace_ip_converted(unsigned long pc)
|
|
|
+{
|
|
|
+ unsigned long save;
|
|
|
+
|
|
|
+ pc -= INSN_SIZE;
|
|
|
+ save = *(unsigned long *)pc;
|
|
|
+ return save == NOP;
|
|
|
+}
|
|
|
+
|
|
|
+unsigned char *ftrace_nop_replace(void)
|
|
|
+{
|
|
|
+ return (char *)&NOP;
|
|
|
+}
|
|
|
+
|
|
|
+/* construct a branch (BL) instruction to addr */
|
|
|
+unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr)
|
|
|
+{
|
|
|
+ long offset;
|
|
|
+
|
|
|
+ offset = (long)addr - (long)(pc - INSN_SIZE + PC_OFFSET);
|
|
|
+ if (unlikely(offset < -33554432 || offset > 33554428)) {
|
|
|
+ /* Can't generate branches that far (from ARM ARM). Ftrace
|
|
|
+ * doesn't generate branches outside of core kernel text.
|
|
|
+ */
|
|
|
+ WARN_ON_ONCE(1);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ offset = (offset >> 2) & BL_OFFSET_MASK;
|
|
|
+ bl_insn = BL_OPCODE | offset;
|
|
|
+ return (unsigned char *)&bl_insn;
|
|
|
+}
|
|
|
+
|
|
|
+int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
|
|
|
+ unsigned char *new_code)
|
|
|
+{
|
|
|
+ unsigned long err = 0, replaced = 0, old, new;
|
|
|
+
|
|
|
+ old = *(unsigned long *)old_code;
|
|
|
+ new = *(unsigned long *)new_code;
|
|
|
+ pc -= INSN_SIZE;
|
|
|
+
|
|
|
+ __asm__ __volatile__ (
|
|
|
+ "1: ldr %1, [%2] \n"
|
|
|
+ " cmp %1, %4 \n"
|
|
|
+ "2: streq %3, [%2] \n"
|
|
|
+ " cmpne %1, %3 \n"
|
|
|
+ " movne %0, #2 \n"
|
|
|
+ "3:\n"
|
|
|
+
|
|
|
+ ".section .fixup, \"ax\"\n"
|
|
|
+ "4: mov %0, #1 \n"
|
|
|
+ " b 3b \n"
|
|
|
+ ".previous\n"
|
|
|
+
|
|
|
+ ".section __ex_table, \"a\"\n"
|
|
|
+ " .long 1b, 4b \n"
|
|
|
+ " .long 2b, 4b \n"
|
|
|
+ ".previous\n"
|
|
|
+
|
|
|
+ : "=r"(err), "=r"(replaced)
|
|
|
+ : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
|
|
|
+ : "memory");
|
|
|
+
|
|
|
+ if (!err && (replaced == old))
|
|
|
+ flush_icache_range(pc, pc + INSN_SIZE);
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+int ftrace_update_ftrace_func(ftrace_func_t func)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ unsigned long pc, old;
|
|
|
+ unsigned char *new;
|
|
|
+
|
|
|
+ pc = (unsigned long)&ftrace_call;
|
|
|
+ pc += INSN_SIZE;
|
|
|
+ memcpy(&old, &ftrace_call, INSN_SIZE);
|
|
|
+ new = ftrace_call_replace(pc, (unsigned long)func);
|
|
|
+ ret = ftrace_modify_code(pc, (unsigned char *)&old, new);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+int ftrace_mcount_set(unsigned long *data)
|
|
|
+{
|
|
|
+ unsigned long pc, old;
|
|
|
+ unsigned long *addr = data;
|
|
|
+ unsigned char *new;
|
|
|
+
|
|
|
+ pc = (unsigned long)&mcount_call;
|
|
|
+ pc += INSN_SIZE;
|
|
|
+ memcpy(&old, &mcount_call, INSN_SIZE);
|
|
|
+ new = ftrace_call_replace(pc, *addr);
|
|
|
+ *addr = ftrace_modify_code(pc, (unsigned char *)&old, new);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/* run from kstop_machine */
|
|
|
+int __init ftrace_dyn_arch_init(void *data)
|
|
|
+{
|
|
|
+ ftrace_mcount_set(data);
|
|
|
+ return 0;
|
|
|
+}
|