ftrace.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. /*
  2. * Code for replacing ftrace calls with jumps.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * Thanks goes to Ingo Molnar, for suggesting the idea.
  7. * Mathieu Desnoyers, for suggesting postponing the modifications.
  8. * Arjan van de Ven, for keeping me straight, and explaining to me
  9. * the dangers of modifying code on the run.
  10. */
  11. #include <linux/spinlock.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/ftrace.h>
  15. #include <linux/percpu.h>
  16. #include <linux/sched.h>
  17. #include <linux/init.h>
  18. #include <linux/list.h>
  19. #include <asm/ftrace.h>
  20. #include <linux/ftrace.h>
  21. #include <asm/nops.h>
  22. #include <asm/nmi.h>
  23. #ifdef CONFIG_FUNCTION_RET_TRACER
  24. /*
  25. * These functions are picked from those used on
  26. * this page for dynamic ftrace. They have been
  27. * simplified to ignore all traces in NMI context.
  28. */
  29. static atomic_t in_nmi;
  30. void ftrace_nmi_enter(void)
  31. {
  32. atomic_inc(&in_nmi);
  33. }
  34. void ftrace_nmi_exit(void)
  35. {
  36. atomic_dec(&in_nmi);
  37. }
  38. /* Add a function return address to the trace stack on thread info.*/
  39. static int push_return_trace(unsigned long ret, unsigned long long time,
  40. unsigned long func)
  41. {
  42. int index;
  43. struct thread_info *ti = current_thread_info();
  44. /* The return trace stack is full */
  45. if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
  46. return -EBUSY;
  47. index = ++ti->curr_ret_stack;
  48. ti->ret_stack[index].ret = ret;
  49. ti->ret_stack[index].func = func;
  50. ti->ret_stack[index].calltime = time;
  51. return 0;
  52. }
  53. /* Retrieve a function return address to the trace stack on thread info.*/
  54. static void pop_return_trace(unsigned long *ret, unsigned long long *time,
  55. unsigned long *func)
  56. {
  57. int index;
  58. struct thread_info *ti = current_thread_info();
  59. index = ti->curr_ret_stack;
  60. *ret = ti->ret_stack[index].ret;
  61. *func = ti->ret_stack[index].func;
  62. *time = ti->ret_stack[index].calltime;
  63. ti->curr_ret_stack--;
  64. }
  65. /*
  66. * Send the trace to the ring-buffer.
  67. * @return the original return address.
  68. */
  69. unsigned long ftrace_return_to_handler(void)
  70. {
  71. struct ftrace_retfunc trace;
  72. pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
  73. trace.rettime = cpu_clock(raw_smp_processor_id());
  74. ftrace_function_return(&trace);
  75. return trace.ret;
  76. }
  77. /*
  78. * Hook the return address and push it in the stack of return addrs
  79. * in current thread info.
  80. */
  81. asmlinkage
  82. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
  83. {
  84. unsigned long old;
  85. unsigned long long calltime;
  86. int faulted;
  87. unsigned long return_hooker = (unsigned long)
  88. &return_to_handler;
  89. /* Nmi's are currently unsupported */
  90. if (atomic_read(&in_nmi))
  91. return;
  92. /*
  93. * Protect against fault, even if it shouldn't
  94. * happen. This tool is too much intrusive to
  95. * ignore such a protection.
  96. */
  97. asm volatile(
  98. "1: movl (%[parent_old]), %[old]\n"
  99. "2: movl %[return_hooker], (%[parent_replaced])\n"
  100. " movl $0, %[faulted]\n"
  101. ".section .fixup, \"ax\"\n"
  102. "3: movl $1, %[faulted]\n"
  103. ".previous\n"
  104. ".section __ex_table, \"a\"\n"
  105. " .long 1b, 3b\n"
  106. " .long 2b, 3b\n"
  107. ".previous\n"
  108. : [parent_replaced] "=r" (parent), [old] "=r" (old),
  109. [faulted] "=r" (faulted)
  110. : [parent_old] "0" (parent), [return_hooker] "r" (return_hooker)
  111. : "memory"
  112. );
  113. if (WARN_ON(faulted)) {
  114. unregister_ftrace_return();
  115. return;
  116. }
  117. if (WARN_ON(!__kernel_text_address(old))) {
  118. unregister_ftrace_return();
  119. *parent = old;
  120. return;
  121. }
  122. calltime = cpu_clock(raw_smp_processor_id());
  123. if (push_return_trace(old, calltime, self_addr) == -EBUSY)
  124. *parent = old;
  125. }
  126. #endif
  127. #ifdef CONFIG_DYNAMIC_FTRACE
  128. union ftrace_code_union {
  129. char code[MCOUNT_INSN_SIZE];
  130. struct {
  131. char e8;
  132. int offset;
  133. } __attribute__((packed));
  134. };
  135. static int ftrace_calc_offset(long ip, long addr)
  136. {
  137. return (int)(addr - ip);
  138. }
  139. unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  140. {
  141. static union ftrace_code_union calc;
  142. calc.e8 = 0xe8;
  143. calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
  144. /*
  145. * No locking needed, this must be called via kstop_machine
  146. * which in essence is like running on a uniprocessor machine.
  147. */
  148. return calc.code;
  149. }
  150. /*
  151. * Modifying code must take extra care. On an SMP machine, if
  152. * the code being modified is also being executed on another CPU
  153. * that CPU will have undefined results and possibly take a GPF.
  154. * We use kstop_machine to stop other CPUS from exectuing code.
  155. * But this does not stop NMIs from happening. We still need
  156. * to protect against that. We separate out the modification of
  157. * the code to take care of this.
  158. *
  159. * Two buffers are added: An IP buffer and a "code" buffer.
  160. *
  161. * 1) Put the instruction pointer into the IP buffer
  162. * and the new code into the "code" buffer.
  163. * 2) Set a flag that says we are modifying code
  164. * 3) Wait for any running NMIs to finish.
  165. * 4) Write the code
  166. * 5) clear the flag.
  167. * 6) Wait for any running NMIs to finish.
  168. *
  169. * If an NMI is executed, the first thing it does is to call
  170. * "ftrace_nmi_enter". This will check if the flag is set to write
  171. * and if it is, it will write what is in the IP and "code" buffers.
  172. *
  173. * The trick is, it does not matter if everyone is writing the same
  174. * content to the code location. Also, if a CPU is executing code
  175. * it is OK to write to that code location if the contents being written
  176. * are the same as what exists.
  177. */
  178. static atomic_t in_nmi = ATOMIC_INIT(0);
  179. static int mod_code_status; /* holds return value of text write */
  180. static int mod_code_write; /* set when NMI should do the write */
  181. static void *mod_code_ip; /* holds the IP to write to */
  182. static void *mod_code_newcode; /* holds the text to write to the IP */
  183. static unsigned nmi_wait_count;
  184. static atomic_t nmi_update_count = ATOMIC_INIT(0);
  185. int ftrace_arch_read_dyn_info(char *buf, int size)
  186. {
  187. int r;
  188. r = snprintf(buf, size, "%u %u",
  189. nmi_wait_count,
  190. atomic_read(&nmi_update_count));
  191. return r;
  192. }
  193. static void ftrace_mod_code(void)
  194. {
  195. /*
  196. * Yes, more than one CPU process can be writing to mod_code_status.
  197. * (and the code itself)
  198. * But if one were to fail, then they all should, and if one were
  199. * to succeed, then they all should.
  200. */
  201. mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
  202. MCOUNT_INSN_SIZE);
  203. }
  204. void ftrace_nmi_enter(void)
  205. {
  206. atomic_inc(&in_nmi);
  207. /* Must have in_nmi seen before reading write flag */
  208. smp_mb();
  209. if (mod_code_write) {
  210. ftrace_mod_code();
  211. atomic_inc(&nmi_update_count);
  212. }
  213. }
  214. void ftrace_nmi_exit(void)
  215. {
  216. /* Finish all executions before clearing in_nmi */
  217. smp_wmb();
  218. atomic_dec(&in_nmi);
  219. }
  220. static void wait_for_nmi(void)
  221. {
  222. int waited = 0;
  223. while (atomic_read(&in_nmi)) {
  224. waited = 1;
  225. cpu_relax();
  226. }
  227. if (waited)
  228. nmi_wait_count++;
  229. }
  230. static int
  231. do_ftrace_mod_code(unsigned long ip, void *new_code)
  232. {
  233. mod_code_ip = (void *)ip;
  234. mod_code_newcode = new_code;
  235. /* The buffers need to be visible before we let NMIs write them */
  236. smp_wmb();
  237. mod_code_write = 1;
  238. /* Make sure write bit is visible before we wait on NMIs */
  239. smp_mb();
  240. wait_for_nmi();
  241. /* Make sure all running NMIs have finished before we write the code */
  242. smp_mb();
  243. ftrace_mod_code();
  244. /* Make sure the write happens before clearing the bit */
  245. smp_wmb();
  246. mod_code_write = 0;
  247. /* make sure NMIs see the cleared bit */
  248. smp_mb();
  249. wait_for_nmi();
  250. return mod_code_status;
  251. }
  252. static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
  253. unsigned char *ftrace_nop_replace(void)
  254. {
  255. return ftrace_nop;
  256. }
  257. int
  258. ftrace_modify_code(unsigned long ip, unsigned char *old_code,
  259. unsigned char *new_code)
  260. {
  261. unsigned char replaced[MCOUNT_INSN_SIZE];
  262. /*
  263. * Note: Due to modules and __init, code can
  264. * disappear and change, we need to protect against faulting
  265. * as well as code changing. We do this by using the
  266. * probe_kernel_* functions.
  267. *
  268. * No real locking needed, this code is run through
  269. * kstop_machine, or before SMP starts.
  270. */
  271. /* read the text we want to modify */
  272. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  273. return -EFAULT;
  274. /* Make sure it is what we expect it to be */
  275. if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
  276. return -EINVAL;
  277. /* replace the text with the new text */
  278. if (do_ftrace_mod_code(ip, new_code))
  279. return -EPERM;
  280. sync_core();
  281. return 0;
  282. }
  283. int ftrace_update_ftrace_func(ftrace_func_t func)
  284. {
  285. unsigned long ip = (unsigned long)(&ftrace_call);
  286. unsigned char old[MCOUNT_INSN_SIZE], *new;
  287. int ret;
  288. memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
  289. new = ftrace_call_replace(ip, (unsigned long)func);
  290. ret = ftrace_modify_code(ip, old, new);
  291. return ret;
  292. }
  293. int __init ftrace_dyn_arch_init(void *data)
  294. {
  295. extern const unsigned char ftrace_test_p6nop[];
  296. extern const unsigned char ftrace_test_nop5[];
  297. extern const unsigned char ftrace_test_jmp[];
  298. int faulted = 0;
  299. /*
  300. * There is no good nop for all x86 archs.
  301. * We will default to using the P6_NOP5, but first we
  302. * will test to make sure that the nop will actually
  303. * work on this CPU. If it faults, we will then
  304. * go to a lesser efficient 5 byte nop. If that fails
  305. * we then just use a jmp as our nop. This isn't the most
  306. * efficient nop, but we can not use a multi part nop
  307. * since we would then risk being preempted in the middle
  308. * of that nop, and if we enabled tracing then, it might
  309. * cause a system crash.
  310. *
  311. * TODO: check the cpuid to determine the best nop.
  312. */
  313. asm volatile (
  314. "ftrace_test_jmp:"
  315. "jmp ftrace_test_p6nop\n"
  316. "nop\n"
  317. "nop\n"
  318. "nop\n" /* 2 byte jmp + 3 bytes */
  319. "ftrace_test_p6nop:"
  320. P6_NOP5
  321. "jmp 1f\n"
  322. "ftrace_test_nop5:"
  323. ".byte 0x66,0x66,0x66,0x66,0x90\n"
  324. "1:"
  325. ".section .fixup, \"ax\"\n"
  326. "2: movl $1, %0\n"
  327. " jmp ftrace_test_nop5\n"
  328. "3: movl $2, %0\n"
  329. " jmp 1b\n"
  330. ".previous\n"
  331. _ASM_EXTABLE(ftrace_test_p6nop, 2b)
  332. _ASM_EXTABLE(ftrace_test_nop5, 3b)
  333. : "=r"(faulted) : "0" (faulted));
  334. switch (faulted) {
  335. case 0:
  336. pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
  337. memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
  338. break;
  339. case 1:
  340. pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
  341. memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
  342. break;
  343. case 2:
  344. pr_info("ftrace: converting mcount calls to jmp . + 5\n");
  345. memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
  346. break;
  347. }
  348. /* The return code is retured via data */
  349. *(unsigned long *)data = 0;
  350. return 0;
  351. }
  352. #endif