ftrace.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /*
  2. * Dynamic function tracer architecture backend.
  3. *
  4. * Copyright IBM Corp. 2009
  5. *
  6. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7. *
  8. */
  9. #include <linux/hardirq.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/kernel.h>
  13. #include <linux/types.h>
  14. #include <trace/syscall.h>
  15. #include <asm/lowcore.h>
  16. #ifdef CONFIG_DYNAMIC_FTRACE
  17. void ftrace_disable_code(void);
  18. void ftrace_disable_return(void);
  19. void ftrace_call_code(void);
  20. void ftrace_nop_code(void);
  21. #define FTRACE_INSN_SIZE 4
  22. #ifdef CONFIG_64BIT
  23. asm(
  24. " .align 4\n"
  25. "ftrace_disable_code:\n"
  26. " j 0f\n"
  27. " .word 0x0024\n"
  28. " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
  29. " basr %r14,%r1\n"
  30. "ftrace_disable_return:\n"
  31. " lg %r14,8(15)\n"
  32. " lgr %r0,%r0\n"
  33. "0:\n");
  34. asm(
  35. " .align 4\n"
  36. "ftrace_nop_code:\n"
  37. " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
  38. asm(
  39. " .align 4\n"
  40. "ftrace_call_code:\n"
  41. " stg %r14,8(%r15)\n");
  42. #else /* CONFIG_64BIT */
  43. asm(
  44. " .align 4\n"
  45. "ftrace_disable_code:\n"
  46. " j 0f\n"
  47. " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
  48. " basr %r14,%r1\n"
  49. "ftrace_disable_return:\n"
  50. " l %r14,4(%r15)\n"
  51. " j 0f\n"
  52. " bcr 0,%r7\n"
  53. " bcr 0,%r7\n"
  54. " bcr 0,%r7\n"
  55. " bcr 0,%r7\n"
  56. " bcr 0,%r7\n"
  57. " bcr 0,%r7\n"
  58. "0:\n");
  59. asm(
  60. " .align 4\n"
  61. "ftrace_nop_code:\n"
  62. " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
  63. asm(
  64. " .align 4\n"
  65. "ftrace_call_code:\n"
  66. " st %r14,4(%r15)\n");
  67. #endif /* CONFIG_64BIT */
  68. static int ftrace_modify_code(unsigned long ip,
  69. void *old_code, int old_size,
  70. void *new_code, int new_size)
  71. {
  72. unsigned char replaced[MCOUNT_INSN_SIZE];
  73. /*
  74. * Note: Due to modules code can disappear and change.
  75. * We need to protect against faulting as well as code
  76. * changing. We do this by using the probe_kernel_*
  77. * functions.
  78. * This however is just a simple sanity check.
  79. */
  80. if (probe_kernel_read(replaced, (void *)ip, old_size))
  81. return -EFAULT;
  82. if (memcmp(replaced, old_code, old_size) != 0)
  83. return -EINVAL;
  84. if (probe_kernel_write((void *)ip, new_code, new_size))
  85. return -EPERM;
  86. return 0;
  87. }
  88. static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
  89. unsigned long addr)
  90. {
  91. return ftrace_modify_code(rec->ip,
  92. ftrace_call_code, FTRACE_INSN_SIZE,
  93. ftrace_disable_code, MCOUNT_INSN_SIZE);
  94. }
  95. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  96. unsigned long addr)
  97. {
  98. if (addr == MCOUNT_ADDR)
  99. return ftrace_make_initial_nop(mod, rec, addr);
  100. return ftrace_modify_code(rec->ip,
  101. ftrace_call_code, FTRACE_INSN_SIZE,
  102. ftrace_nop_code, FTRACE_INSN_SIZE);
  103. }
  104. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  105. {
  106. return ftrace_modify_code(rec->ip,
  107. ftrace_nop_code, FTRACE_INSN_SIZE,
  108. ftrace_call_code, FTRACE_INSN_SIZE);
  109. }
  110. int ftrace_update_ftrace_func(ftrace_func_t func)
  111. {
  112. ftrace_dyn_func = (unsigned long)func;
  113. return 0;
  114. }
  115. int __init ftrace_dyn_arch_init(void *data)
  116. {
  117. *(unsigned long *)data = 0;
  118. return 0;
  119. }
  120. #endif /* CONFIG_DYNAMIC_FTRACE */
  121. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  122. #ifdef CONFIG_DYNAMIC_FTRACE
  123. /*
  124. * Patch the kernel code at ftrace_graph_caller location:
  125. * The instruction there is branch relative on condition. The condition mask
  126. * is either all ones (always branch aka disable ftrace_graph_caller) or all
  127. * zeroes (nop aka enable ftrace_graph_caller).
  128. * Instruction format for brc is a7m4xxxx where m is the condition mask.
  129. */
  130. int ftrace_enable_ftrace_graph_caller(void)
  131. {
  132. unsigned short opcode = 0xa704;
  133. return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
  134. }
  135. int ftrace_disable_ftrace_graph_caller(void)
  136. {
  137. unsigned short opcode = 0xa7f4;
  138. return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
  139. }
  140. static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
  141. {
  142. return addr - (ftrace_disable_return - ftrace_disable_code);
  143. }
  144. #else /* CONFIG_DYNAMIC_FTRACE */
  145. static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
  146. {
  147. return addr - MCOUNT_OFFSET_RET;
  148. }
  149. #endif /* CONFIG_DYNAMIC_FTRACE */
  150. /*
  151. * Hook the return address and push it in the stack of return addresses
  152. * in current thread info.
  153. */
  154. unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
  155. {
  156. struct ftrace_graph_ent trace;
  157. /* Nmi's are currently unsupported. */
  158. if (unlikely(in_nmi()))
  159. goto out;
  160. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  161. goto out;
  162. if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
  163. goto out;
  164. trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
  165. /* Only trace if the calling function expects to. */
  166. if (!ftrace_graph_entry(&trace)) {
  167. current->curr_ret_stack--;
  168. goto out;
  169. }
  170. parent = (unsigned long)return_to_handler;
  171. out:
  172. return parent;
  173. }
  174. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  175. #ifdef CONFIG_FTRACE_SYSCALLS
  176. extern unsigned long __start_syscalls_metadata[];
  177. extern unsigned long __stop_syscalls_metadata[];
  178. extern unsigned int sys_call_table[];
  179. static struct syscall_metadata **syscalls_metadata;
  180. struct syscall_metadata *syscall_nr_to_meta(int nr)
  181. {
  182. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  183. return NULL;
  184. return syscalls_metadata[nr];
  185. }
  186. static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
  187. {
  188. struct syscall_metadata *start;
  189. struct syscall_metadata *stop;
  190. char str[KSYM_SYMBOL_LEN];
  191. start = (struct syscall_metadata *)__start_syscalls_metadata;
  192. stop = (struct syscall_metadata *)__stop_syscalls_metadata;
  193. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  194. for ( ; start < stop; start++) {
  195. if (start->name && !strcmp(start->name + 3, str + 3))
  196. return start;
  197. }
  198. return NULL;
  199. }
  200. void arch_init_ftrace_syscalls(void)
  201. {
  202. struct syscall_metadata *meta;
  203. int i;
  204. static atomic_t refs;
  205. if (atomic_inc_return(&refs) != 1)
  206. goto out;
  207. syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) * NR_syscalls,
  208. GFP_KERNEL);
  209. if (!syscalls_metadata)
  210. goto out;
  211. for (i = 0; i < NR_syscalls; i++) {
  212. meta = find_syscall_meta((unsigned long)sys_call_table[i]);
  213. syscalls_metadata[i] = meta;
  214. }
  215. return;
  216. out:
  217. atomic_dec(&refs);
  218. }
  219. #endif