|
@@ -16,8 +16,8 @@
|
|
|
#include <linux/init.h>
|
|
|
#include <linux/list.h>
|
|
|
|
|
|
-#include <asm/alternative.h>
|
|
|
#include <asm/ftrace.h>
|
|
|
+#include <asm/nops.h>
|
|
|
|
|
|
|
|
|
/* Long is fine, even if it is only 4 bytes ;-) */
|
|
@@ -119,13 +119,67 @@ notrace int ftrace_mcount_set(unsigned long *data)
|
|
|
|
|
|
int __init ftrace_dyn_arch_init(void *data)
|
|
|
{
|
|
|
- const unsigned char *const *noptable = find_nop_table();
|
|
|
-
|
|
|
- /* This is running in kstop_machine */
|
|
|
-
|
|
|
- ftrace_mcount_set(data);
|
|
|
+ extern const unsigned char ftrace_test_p6nop[];
|
|
|
+ extern const unsigned char ftrace_test_nop5[];
|
|
|
+ extern const unsigned char ftrace_test_jmp[];
|
|
|
+ int faulted = 0;
|
|
|
|
|
|
- ftrace_nop = (unsigned long *)noptable[MCOUNT_INSN_SIZE];
|
|
|
+ /*
|
|
|
+ * There is no good nop for all x86 archs.
|
|
|
+ * We will default to using the P6_NOP5, but first we
|
|
|
+ * will test to make sure that the nop will actually
|
|
|
+ * work on this CPU. If it faults, we will then
|
|
|
+ * go to a lesser efficient 5 byte nop. If that fails
|
|
|
+ * we then just use a jmp as our nop. This isn't the most
|
|
|
+ * efficient nop, but we can not use a multi part nop
|
|
|
+ * since we would then risk being preempted in the middle
|
|
|
+ * of that nop, and if we enabled tracing then, it might
|
|
|
+ * cause a system crash.
|
|
|
+ *
|
|
|
+ * TODO: check the cpuid to determine the best nop.
|
|
|
+ */
|
|
|
+ asm volatile (
|
|
|
+ "jmp ftrace_test_jmp\n"
|
|
|
+ /* This code needs to stay around */
|
|
|
+ ".section .text, \"ax\"\n"
|
|
|
+ "ftrace_test_jmp:"
|
|
|
+ "jmp ftrace_test_p6nop\n"
|
|
|
+ ".byte 0x00,0x00,0x00\n" /* 2 byte jmp + 3 bytes */
|
|
|
+ "ftrace_test_p6nop:"
|
|
|
+ P6_NOP5
|
|
|
+ "jmp 1f\n"
|
|
|
+ "ftrace_test_nop5:"
|
|
|
+ ".byte 0x66,0x66,0x66,0x66,0x90\n"
|
|
|
+ "jmp 1f\n"
|
|
|
+ ".previous\n"
|
|
|
+ "1:"
|
|
|
+ ".section .fixup, \"ax\"\n"
|
|
|
+ "2: movl $1, %0\n"
|
|
|
+ " jmp ftrace_test_nop5\n"
|
|
|
+ "3: movl $2, %0\n"
|
|
|
+ " jmp 1b\n"
|
|
|
+ ".previous\n"
|
|
|
+ _ASM_EXTABLE(ftrace_test_p6nop, 2b)
|
|
|
+ _ASM_EXTABLE(ftrace_test_nop5, 3b)
|
|
|
+ : "=r"(faulted) : "0" (faulted));
|
|
|
+
|
|
|
+ switch (faulted) {
|
|
|
+ case 0:
|
|
|
+ pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
|
|
|
+ ftrace_nop = (unsigned long *)ftrace_test_p6nop;
|
|
|
+ break;
|
|
|
+ case 1:
|
|
|
+ pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
|
|
|
+ ftrace_nop = (unsigned long *)ftrace_test_nop5;
|
|
|
+ break;
|
|
|
+ case 2:
|
|
|
+ pr_info("ftrace: converting mcount calls to jmp 1f\n");
|
|
|
+ ftrace_nop = (unsigned long *)ftrace_test_jmp;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* The return code is retured via data */
|
|
|
+ *(unsigned long *)data = 0;
|
|
|
|
|
|
return 0;
|
|
|
}
|