ftrace.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * Copyright (C) 2008 Matt Fleming <matt@console-pimps.org>
  3. * Copyright (C) 2008 Paul Mundt <lethal@linux-sh.org>
  4. *
  5. * Code for replacing ftrace calls with jumps.
  6. *
  7. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  8. *
  9. * Thanks goes to Ingo Molnar, for suggesting the idea.
  10. * Mathieu Desnoyers, for suggesting postponing the modifications.
  11. * Arjan van de Ven, for keeping me straight, and explaining to me
  12. * the dangers of modifying code on the run.
  13. */
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/string.h>
  17. #include <linux/init.h>
  18. #include <linux/io.h>
  19. #include <linux/kernel.h>
  20. #include <asm/ftrace.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/unistd.h>
  23. #include <trace/syscall.h>
  24. #ifdef CONFIG_DYNAMIC_FTRACE
  25. static unsigned char ftrace_replaced_code[MCOUNT_INSN_SIZE];
  26. static unsigned char ftrace_nop[4];
  27. /*
  28. * If we're trying to nop out a call to a function, we instead
  29. * place a call to the address after the memory table.
  30. *
  31. * 8c011060 <a>:
  32. * 8c011060: 02 d1 mov.l 8c01106c <a+0xc>,r1
  33. * 8c011062: 22 4f sts.l pr,@-r15
  34. * 8c011064: 02 c7 mova 8c011070 <a+0x10>,r0
  35. * 8c011066: 2b 41 jmp @r1
  36. * 8c011068: 2a 40 lds r0,pr
  37. * 8c01106a: 09 00 nop
  38. * 8c01106c: 68 24 .word 0x2468 <--- ip
  39. * 8c01106e: 1d 8c .word 0x8c1d
  40. * 8c011070: 26 4f lds.l @r15+,pr <--- ip + MCOUNT_INSN_SIZE
  41. *
  42. * We write 0x8c011070 to 0x8c01106c so that on entry to a() we branch
  43. * past the _mcount call and continue executing code like normal.
  44. */
  45. static unsigned char *ftrace_nop_replace(unsigned long ip)
  46. {
  47. __raw_writel(ip + MCOUNT_INSN_SIZE, ftrace_nop);
  48. return ftrace_nop;
  49. }
  50. static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
  51. {
  52. /* Place the address in the memory table. */
  53. __raw_writel(addr, ftrace_replaced_code);
  54. /*
  55. * No locking needed, this must be called via kstop_machine
  56. * which in essence is like running on a uniprocessor machine.
  57. */
  58. return ftrace_replaced_code;
  59. }
  60. static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
  61. unsigned char *new_code)
  62. {
  63. unsigned char replaced[MCOUNT_INSN_SIZE];
  64. /*
  65. * Note: Due to modules and __init, code can
  66. * disappear and change, we need to protect against faulting
  67. * as well as code changing. We do this by using the
  68. * probe_kernel_* functions.
  69. *
  70. * No real locking needed, this code is run through
  71. * kstop_machine, or before SMP starts.
  72. */
  73. /* read the text we want to modify */
  74. if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
  75. return -EFAULT;
  76. /* Make sure it is what we expect it to be */
  77. if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
  78. return -EINVAL;
  79. /* replace the text with the new text */
  80. if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
  81. return -EPERM;
  82. flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
  83. return 0;
  84. }
  85. int ftrace_update_ftrace_func(ftrace_func_t func)
  86. {
  87. unsigned long ip = (unsigned long)(&ftrace_call) + MCOUNT_INSN_OFFSET;
  88. unsigned char old[MCOUNT_INSN_SIZE], *new;
  89. memcpy(old, (unsigned char *)ip, MCOUNT_INSN_SIZE);
  90. new = ftrace_call_replace(ip, (unsigned long)func);
  91. return ftrace_modify_code(ip, old, new);
  92. }
  93. int ftrace_make_nop(struct module *mod,
  94. struct dyn_ftrace *rec, unsigned long addr)
  95. {
  96. unsigned char *new, *old;
  97. unsigned long ip = rec->ip;
  98. old = ftrace_call_replace(ip, addr);
  99. new = ftrace_nop_replace(ip);
  100. return ftrace_modify_code(rec->ip, old, new);
  101. }
  102. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  103. {
  104. unsigned char *new, *old;
  105. unsigned long ip = rec->ip;
  106. old = ftrace_nop_replace(ip);
  107. new = ftrace_call_replace(ip, addr);
  108. return ftrace_modify_code(rec->ip, old, new);
  109. }
  110. int __init ftrace_dyn_arch_init(void *data)
  111. {
  112. /* The return code is retured via data */
  113. __raw_writel(0, (unsigned long)data);
  114. return 0;
  115. }
  116. #endif /* CONFIG_DYNAMIC_FTRACE */
  117. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  118. #ifdef CONFIG_DYNAMIC_FTRACE
  119. extern void ftrace_graph_call(void);
  120. static int ftrace_mod(unsigned long ip, unsigned long old_addr,
  121. unsigned long new_addr)
  122. {
  123. unsigned char code[MCOUNT_INSN_SIZE];
  124. if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
  125. return -EFAULT;
  126. if (old_addr != __raw_readl((unsigned long *)code))
  127. return -EINVAL;
  128. __raw_writel(new_addr, ip);
  129. return 0;
  130. }
  131. int ftrace_enable_ftrace_graph_caller(void)
  132. {
  133. unsigned long ip, old_addr, new_addr;
  134. ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
  135. old_addr = (unsigned long)(&skip_trace);
  136. new_addr = (unsigned long)(&ftrace_graph_caller);
  137. return ftrace_mod(ip, old_addr, new_addr);
  138. }
  139. int ftrace_disable_ftrace_graph_caller(void)
  140. {
  141. unsigned long ip, old_addr, new_addr;
  142. ip = (unsigned long)(&ftrace_graph_call) + GRAPH_INSN_OFFSET;
  143. old_addr = (unsigned long)(&ftrace_graph_caller);
  144. new_addr = (unsigned long)(&skip_trace);
  145. return ftrace_mod(ip, old_addr, new_addr);
  146. }
  147. #endif /* CONFIG_DYNAMIC_FTRACE */
  148. /*
  149. * Hook the return address and push it in the stack of return addrs
  150. * in the current thread info.
  151. *
  152. * This is the main routine for the function graph tracer. The function
  153. * graph tracer essentially works like this:
  154. *
  155. * parent is the stack address containing self_addr's return address.
  156. * We pull the real return address out of parent and store it in
  157. * current's ret_stack. Then, we replace the return address on the stack
  158. * with the address of return_to_handler. self_addr is the function that
  159. * called mcount.
  160. *
  161. * When self_addr returns, it will jump to return_to_handler which calls
  162. * ftrace_return_to_handler. ftrace_return_to_handler will pull the real
  163. * return address off of current's ret_stack and jump to it.
  164. */
  165. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
  166. {
  167. unsigned long old;
  168. int faulted, err;
  169. struct ftrace_graph_ent trace;
  170. unsigned long return_hooker = (unsigned long)&return_to_handler;
  171. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  172. return;
  173. /*
  174. * Protect against fault, even if it shouldn't
  175. * happen. This tool is too much intrusive to
  176. * ignore such a protection.
  177. */
  178. __asm__ __volatile__(
  179. "1: \n\t"
  180. "mov.l @%2, %0 \n\t"
  181. "2: \n\t"
  182. "mov.l %3, @%2 \n\t"
  183. "mov #0, %1 \n\t"
  184. "3: \n\t"
  185. ".section .fixup, \"ax\" \n\t"
  186. "4: \n\t"
  187. "mov.l 5f, %0 \n\t"
  188. "jmp @%0 \n\t"
  189. " mov #1, %1 \n\t"
  190. ".balign 4 \n\t"
  191. "5: .long 3b \n\t"
  192. ".previous \n\t"
  193. ".section __ex_table,\"a\" \n\t"
  194. ".long 1b, 4b \n\t"
  195. ".long 2b, 4b \n\t"
  196. ".previous \n\t"
  197. : "=&r" (old), "=r" (faulted)
  198. : "r" (parent), "r" (return_hooker)
  199. );
  200. if (unlikely(faulted)) {
  201. ftrace_graph_stop();
  202. WARN_ON(1);
  203. return;
  204. }
  205. err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0);
  206. if (err == -EBUSY) {
  207. __raw_writel(old, parent);
  208. return;
  209. }
  210. trace.func = self_addr;
  211. /* Only trace if the calling function expects to */
  212. if (!ftrace_graph_entry(&trace)) {
  213. current->curr_ret_stack--;
  214. __raw_writel(old, parent);
  215. }
  216. }
  217. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  218. #ifdef CONFIG_FTRACE_SYSCALLS
  219. extern unsigned long __start_syscalls_metadata[];
  220. extern unsigned long __stop_syscalls_metadata[];
  221. extern unsigned long *sys_call_table;
  222. static struct syscall_metadata **syscalls_metadata;
  223. static struct syscall_metadata *find_syscall_meta(unsigned long *syscall)
  224. {
  225. struct syscall_metadata *start;
  226. struct syscall_metadata *stop;
  227. char str[KSYM_SYMBOL_LEN];
  228. start = (struct syscall_metadata *)__start_syscalls_metadata;
  229. stop = (struct syscall_metadata *)__stop_syscalls_metadata;
  230. kallsyms_lookup((unsigned long) syscall, NULL, NULL, NULL, str);
  231. for ( ; start < stop; start++) {
  232. if (start->name && !strcmp(start->name, str))
  233. return start;
  234. }
  235. return NULL;
  236. }
  237. struct syscall_metadata *syscall_nr_to_meta(int nr)
  238. {
  239. if (!syscalls_metadata || nr >= FTRACE_SYSCALL_MAX || nr < 0)
  240. return NULL;
  241. return syscalls_metadata[nr];
  242. }
  243. int syscall_name_to_nr(char *name)
  244. {
  245. int i;
  246. if (!syscalls_metadata)
  247. return -1;
  248. for (i = 0; i < NR_syscalls; i++)
  249. if (syscalls_metadata[i])
  250. if (!strcmp(syscalls_metadata[i]->name, name))
  251. return i;
  252. return -1;
  253. }
  254. void set_syscall_enter_id(int num, int id)
  255. {
  256. syscalls_metadata[num]->enter_id = id;
  257. }
  258. void set_syscall_exit_id(int num, int id)
  259. {
  260. syscalls_metadata[num]->exit_id = id;
  261. }
  262. static int __init arch_init_ftrace_syscalls(void)
  263. {
  264. int i;
  265. struct syscall_metadata *meta;
  266. unsigned long **psys_syscall_table = &sys_call_table;
  267. syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
  268. FTRACE_SYSCALL_MAX, GFP_KERNEL);
  269. if (!syscalls_metadata) {
  270. WARN_ON(1);
  271. return -ENOMEM;
  272. }
  273. for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
  274. meta = find_syscall_meta(psys_syscall_table[i]);
  275. syscalls_metadata[i] = meta;
  276. }
  277. return 0;
  278. }
  279. arch_initcall(arch_init_ftrace_syscalls);
  280. #endif /* CONFIG_FTRACE_SYSCALLS */