ftrace.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. /*
  2. * Dynamic function tracer architecture backend.
  3. *
  4. * Copyright IBM Corp. 2009
  5. *
  6. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7. *
  8. */
  9. #include <linux/hardirq.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/kernel.h>
  13. #include <linux/types.h>
  14. #include <trace/syscall.h>
  15. #include <asm/lowcore.h>
  16. #ifdef CONFIG_DYNAMIC_FTRACE
  17. void ftrace_disable_code(void);
  18. void ftrace_disable_return(void);
  19. void ftrace_call_code(void);
  20. void ftrace_nop_code(void);
  21. #define FTRACE_INSN_SIZE 4
  22. #ifdef CONFIG_64BIT
  23. asm(
  24. " .align 4\n"
  25. "ftrace_disable_code:\n"
  26. " j 0f\n"
  27. " .word 0x0024\n"
  28. " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
  29. " basr %r14,%r1\n"
  30. "ftrace_disable_return:\n"
  31. " lg %r14,8(15)\n"
  32. " lgr %r0,%r0\n"
  33. "0:\n");
  34. asm(
  35. " .align 4\n"
  36. "ftrace_nop_code:\n"
  37. " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
  38. asm(
  39. " .align 4\n"
  40. "ftrace_call_code:\n"
  41. " stg %r14,8(%r15)\n");
  42. #else /* CONFIG_64BIT */
  43. asm(
  44. " .align 4\n"
  45. "ftrace_disable_code:\n"
  46. " j 0f\n"
  47. " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
  48. " basr %r14,%r1\n"
  49. "ftrace_disable_return:\n"
  50. " l %r14,4(%r15)\n"
  51. " j 0f\n"
  52. " bcr 0,%r7\n"
  53. " bcr 0,%r7\n"
  54. " bcr 0,%r7\n"
  55. " bcr 0,%r7\n"
  56. " bcr 0,%r7\n"
  57. " bcr 0,%r7\n"
  58. "0:\n");
  59. asm(
  60. " .align 4\n"
  61. "ftrace_nop_code:\n"
  62. " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
  63. asm(
  64. " .align 4\n"
  65. "ftrace_call_code:\n"
  66. " st %r14,4(%r15)\n");
  67. #endif /* CONFIG_64BIT */
  68. static int ftrace_modify_code(unsigned long ip,
  69. void *old_code, int old_size,
  70. void *new_code, int new_size)
  71. {
  72. unsigned char replaced[MCOUNT_INSN_SIZE];
  73. /*
  74. * Note: Due to modules code can disappear and change.
  75. * We need to protect against faulting as well as code
  76. * changing. We do this by using the probe_kernel_*
  77. * functions.
  78. * This however is just a simple sanity check.
  79. */
  80. if (probe_kernel_read(replaced, (void *)ip, old_size))
  81. return -EFAULT;
  82. if (memcmp(replaced, old_code, old_size) != 0)
  83. return -EINVAL;
  84. if (probe_kernel_write((void *)ip, new_code, new_size))
  85. return -EPERM;
  86. return 0;
  87. }
  88. static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
  89. unsigned long addr)
  90. {
  91. return ftrace_modify_code(rec->ip,
  92. ftrace_call_code, FTRACE_INSN_SIZE,
  93. ftrace_disable_code, MCOUNT_INSN_SIZE);
  94. }
  95. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  96. unsigned long addr)
  97. {
  98. if (addr == MCOUNT_ADDR)
  99. return ftrace_make_initial_nop(mod, rec, addr);
  100. return ftrace_modify_code(rec->ip,
  101. ftrace_call_code, FTRACE_INSN_SIZE,
  102. ftrace_nop_code, FTRACE_INSN_SIZE);
  103. }
  104. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  105. {
  106. return ftrace_modify_code(rec->ip,
  107. ftrace_nop_code, FTRACE_INSN_SIZE,
  108. ftrace_call_code, FTRACE_INSN_SIZE);
  109. }
  110. int ftrace_update_ftrace_func(ftrace_func_t func)
  111. {
  112. ftrace_dyn_func = (unsigned long)func;
  113. return 0;
  114. }
  115. int __init ftrace_dyn_arch_init(void *data)
  116. {
  117. *(unsigned long *)data = 0;
  118. return 0;
  119. }
  120. #endif /* CONFIG_DYNAMIC_FTRACE */
  121. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  122. #ifdef CONFIG_DYNAMIC_FTRACE
  123. /*
  124. * Patch the kernel code at ftrace_graph_caller location:
  125. * The instruction there is branch relative on condition. The condition mask
  126. * is either all ones (always branch aka disable ftrace_graph_caller) or all
  127. * zeroes (nop aka enable ftrace_graph_caller).
  128. * Instruction format for brc is a7m4xxxx where m is the condition mask.
  129. */
  130. int ftrace_enable_ftrace_graph_caller(void)
  131. {
  132. unsigned short opcode = 0xa704;
  133. return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
  134. }
  135. int ftrace_disable_ftrace_graph_caller(void)
  136. {
  137. unsigned short opcode = 0xa7f4;
  138. return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
  139. }
  140. static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
  141. {
  142. return addr - (ftrace_disable_return - ftrace_disable_code);
  143. }
  144. #else /* CONFIG_DYNAMIC_FTRACE */
  145. static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
  146. {
  147. return addr - MCOUNT_OFFSET_RET;
  148. }
  149. #endif /* CONFIG_DYNAMIC_FTRACE */
  150. /*
  151. * Hook the return address and push it in the stack of return addresses
  152. * in current thread info.
  153. */
  154. unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
  155. {
  156. struct ftrace_graph_ent trace;
  157. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  158. goto out;
  159. if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
  160. goto out;
  161. trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
  162. /* Only trace if the calling function expects to. */
  163. if (!ftrace_graph_entry(&trace)) {
  164. current->curr_ret_stack--;
  165. goto out;
  166. }
  167. parent = (unsigned long)return_to_handler;
  168. out:
  169. return parent;
  170. }
  171. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  172. #ifdef CONFIG_FTRACE_SYSCALLS
  173. extern unsigned long __start_syscalls_metadata[];
  174. extern unsigned long __stop_syscalls_metadata[];
  175. extern unsigned int sys_call_table[];
  176. static struct syscall_metadata **syscalls_metadata;
  177. struct syscall_metadata *syscall_nr_to_meta(int nr)
  178. {
  179. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  180. return NULL;
  181. return syscalls_metadata[nr];
  182. }
  183. int syscall_name_to_nr(char *name)
  184. {
  185. int i;
  186. if (!syscalls_metadata)
  187. return -1;
  188. for (i = 0; i < NR_syscalls; i++)
  189. if (syscalls_metadata[i])
  190. if (!strcmp(syscalls_metadata[i]->name, name))
  191. return i;
  192. return -1;
  193. }
  194. void set_syscall_enter_id(int num, int id)
  195. {
  196. syscalls_metadata[num]->enter_id = id;
  197. }
  198. void set_syscall_exit_id(int num, int id)
  199. {
  200. syscalls_metadata[num]->exit_id = id;
  201. }
  202. static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
  203. {
  204. struct syscall_metadata *start;
  205. struct syscall_metadata *stop;
  206. char str[KSYM_SYMBOL_LEN];
  207. start = (struct syscall_metadata *)__start_syscalls_metadata;
  208. stop = (struct syscall_metadata *)__stop_syscalls_metadata;
  209. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  210. for ( ; start < stop; start++) {
  211. if (start->name && !strcmp(start->name + 3, str + 3))
  212. return start;
  213. }
  214. return NULL;
  215. }
  216. static int __init arch_init_ftrace_syscalls(void)
  217. {
  218. struct syscall_metadata *meta;
  219. int i;
  220. syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
  221. GFP_KERNEL);
  222. if (!syscalls_metadata)
  223. return -ENOMEM;
  224. for (i = 0; i < NR_syscalls; i++) {
  225. meta = find_syscall_meta((unsigned long)sys_call_table[i]);
  226. syscalls_metadata[i] = meta;
  227. }
  228. return 0;
  229. }
  230. arch_initcall(arch_init_ftrace_syscalls);
  231. #endif