ftrace.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * Dynamic function tracer architecture backend.
  3. *
  4. * Copyright IBM Corp. 2009
  5. *
  6. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
  7. *
  8. */
  9. #include <linux/hardirq.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/kernel.h>
  13. #include <linux/types.h>
  14. #include <asm/lowcore.h>
  15. #ifdef CONFIG_DYNAMIC_FTRACE
  16. void ftrace_disable_code(void);
  17. void ftrace_disable_return(void);
  18. void ftrace_call_code(void);
  19. void ftrace_nop_code(void);
  20. #define FTRACE_INSN_SIZE 4
  21. #ifdef CONFIG_64BIT
  22. asm(
  23. " .align 4\n"
  24. "ftrace_disable_code:\n"
  25. " j 0f\n"
  26. " .word 0x0024\n"
  27. " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
  28. " basr %r14,%r1\n"
  29. "ftrace_disable_return:\n"
  30. " lg %r14,8(15)\n"
  31. " lgr %r0,%r0\n"
  32. "0:\n");
  33. asm(
  34. " .align 4\n"
  35. "ftrace_nop_code:\n"
  36. " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
  37. asm(
  38. " .align 4\n"
  39. "ftrace_call_code:\n"
  40. " stg %r14,8(%r15)\n");
  41. #else /* CONFIG_64BIT */
  42. asm(
  43. " .align 4\n"
  44. "ftrace_disable_code:\n"
  45. " j 0f\n"
  46. " l %r1,"__stringify(__LC_FTRACE_FUNC)"\n"
  47. " basr %r14,%r1\n"
  48. "ftrace_disable_return:\n"
  49. " l %r14,4(%r15)\n"
  50. " j 0f\n"
  51. " bcr 0,%r7\n"
  52. " bcr 0,%r7\n"
  53. " bcr 0,%r7\n"
  54. " bcr 0,%r7\n"
  55. " bcr 0,%r7\n"
  56. " bcr 0,%r7\n"
  57. "0:\n");
  58. asm(
  59. " .align 4\n"
  60. "ftrace_nop_code:\n"
  61. " j .+"__stringify(MCOUNT_INSN_SIZE)"\n");
  62. asm(
  63. " .align 4\n"
  64. "ftrace_call_code:\n"
  65. " st %r14,4(%r15)\n");
  66. #endif /* CONFIG_64BIT */
  67. static int ftrace_modify_code(unsigned long ip,
  68. void *old_code, int old_size,
  69. void *new_code, int new_size)
  70. {
  71. unsigned char replaced[MCOUNT_INSN_SIZE];
  72. /*
  73. * Note: Due to modules code can disappear and change.
  74. * We need to protect against faulting as well as code
  75. * changing. We do this by using the probe_kernel_*
  76. * functions.
  77. * This however is just a simple sanity check.
  78. */
  79. if (probe_kernel_read(replaced, (void *)ip, old_size))
  80. return -EFAULT;
  81. if (memcmp(replaced, old_code, old_size) != 0)
  82. return -EINVAL;
  83. if (probe_kernel_write((void *)ip, new_code, new_size))
  84. return -EPERM;
  85. return 0;
  86. }
  87. static int ftrace_make_initial_nop(struct module *mod, struct dyn_ftrace *rec,
  88. unsigned long addr)
  89. {
  90. return ftrace_modify_code(rec->ip,
  91. ftrace_call_code, FTRACE_INSN_SIZE,
  92. ftrace_disable_code, MCOUNT_INSN_SIZE);
  93. }
  94. int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
  95. unsigned long addr)
  96. {
  97. if (addr == MCOUNT_ADDR)
  98. return ftrace_make_initial_nop(mod, rec, addr);
  99. return ftrace_modify_code(rec->ip,
  100. ftrace_call_code, FTRACE_INSN_SIZE,
  101. ftrace_nop_code, FTRACE_INSN_SIZE);
  102. }
  103. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  104. {
  105. return ftrace_modify_code(rec->ip,
  106. ftrace_nop_code, FTRACE_INSN_SIZE,
  107. ftrace_call_code, FTRACE_INSN_SIZE);
  108. }
  109. int ftrace_update_ftrace_func(ftrace_func_t func)
  110. {
  111. ftrace_dyn_func = (unsigned long)func;
  112. return 0;
  113. }
  114. int __init ftrace_dyn_arch_init(void *data)
  115. {
  116. *(unsigned long *)data = 0;
  117. return 0;
  118. }
  119. #endif /* CONFIG_DYNAMIC_FTRACE */
  120. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  121. #ifdef CONFIG_DYNAMIC_FTRACE
  122. /*
  123. * Patch the kernel code at ftrace_graph_caller location:
  124. * The instruction there is branch relative on condition. The condition mask
  125. * is either all ones (always branch aka disable ftrace_graph_caller) or all
  126. * zeroes (nop aka enable ftrace_graph_caller).
  127. * Instruction format for brc is a7m4xxxx where m is the condition mask.
  128. */
  129. int ftrace_enable_ftrace_graph_caller(void)
  130. {
  131. unsigned short opcode = 0xa704;
  132. return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
  133. }
  134. int ftrace_disable_ftrace_graph_caller(void)
  135. {
  136. unsigned short opcode = 0xa7f4;
  137. return probe_kernel_write(ftrace_graph_caller, &opcode, sizeof(opcode));
  138. }
  139. static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
  140. {
  141. return addr - (ftrace_disable_return - ftrace_disable_code);
  142. }
  143. #else /* CONFIG_DYNAMIC_FTRACE */
  144. static inline unsigned long ftrace_mcount_call_adjust(unsigned long addr)
  145. {
  146. return addr - MCOUNT_OFFSET_RET;
  147. }
  148. #endif /* CONFIG_DYNAMIC_FTRACE */
  149. /*
  150. * Hook the return address and push it in the stack of return addresses
  151. * in current thread info.
  152. */
  153. unsigned long prepare_ftrace_return(unsigned long ip, unsigned long parent)
  154. {
  155. struct ftrace_graph_ent trace;
  156. /* Nmi's are currently unsupported. */
  157. if (unlikely(in_nmi()))
  158. goto out;
  159. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  160. goto out;
  161. if (ftrace_push_return_trace(parent, ip, &trace.depth) == -EBUSY)
  162. goto out;
  163. trace.func = ftrace_mcount_call_adjust(ip) & PSW_ADDR_INSN;
  164. /* Only trace if the calling function expects to. */
  165. if (!ftrace_graph_entry(&trace)) {
  166. current->curr_ret_stack--;
  167. goto out;
  168. }
  169. parent = (unsigned long)return_to_handler;
  170. out:
  171. return parent;
  172. }
  173. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */