trace_functions.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/debugfs.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/ftrace.h>
  15. #include <linux/fs.h>
  16. #include "trace.h"
  17. static struct trace_array *func_trace;
  18. static void start_function_trace(struct trace_array *tr)
  19. {
  20. func_trace = tr;
  21. tr->cpu = get_cpu();
  22. tracing_reset_online_cpus(tr);
  23. put_cpu();
  24. tracing_start_cmdline_record();
  25. tracing_start_function_trace();
  26. }
  27. static void stop_function_trace(struct trace_array *tr)
  28. {
  29. tracing_stop_function_trace();
  30. tracing_stop_cmdline_record();
  31. }
  32. static int function_trace_init(struct trace_array *tr)
  33. {
  34. start_function_trace(tr);
  35. return 0;
  36. }
  37. static void function_trace_reset(struct trace_array *tr)
  38. {
  39. stop_function_trace(tr);
  40. }
  41. static void function_trace_start(struct trace_array *tr)
  42. {
  43. tracing_reset_online_cpus(tr);
  44. }
  45. static void
  46. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
  47. {
  48. struct trace_array *tr = func_trace;
  49. struct trace_array_cpu *data;
  50. unsigned long flags;
  51. long disabled;
  52. int cpu, resched;
  53. int pc;
  54. if (unlikely(!ftrace_function_enabled))
  55. return;
  56. pc = preempt_count();
  57. resched = ftrace_preempt_disable();
  58. local_save_flags(flags);
  59. cpu = raw_smp_processor_id();
  60. data = tr->data[cpu];
  61. disabled = atomic_inc_return(&data->disabled);
  62. if (likely(disabled == 1))
  63. trace_function(tr, data, ip, parent_ip, flags, pc);
  64. atomic_dec(&data->disabled);
  65. ftrace_preempt_enable(resched);
  66. }
  67. static void
  68. function_trace_call(unsigned long ip, unsigned long parent_ip)
  69. {
  70. struct trace_array *tr = func_trace;
  71. struct trace_array_cpu *data;
  72. unsigned long flags;
  73. long disabled;
  74. int cpu;
  75. int pc;
  76. if (unlikely(!ftrace_function_enabled))
  77. return;
  78. /*
  79. * Need to use raw, since this must be called before the
  80. * recursive protection is performed.
  81. */
  82. local_irq_save(flags);
  83. cpu = raw_smp_processor_id();
  84. data = tr->data[cpu];
  85. disabled = atomic_inc_return(&data->disabled);
  86. if (likely(disabled == 1)) {
  87. pc = preempt_count();
  88. trace_function(tr, data, ip, parent_ip, flags, pc);
  89. }
  90. atomic_dec(&data->disabled);
  91. local_irq_restore(flags);
  92. }
  93. static void
  94. function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
  95. {
  96. struct trace_array *tr = func_trace;
  97. struct trace_array_cpu *data;
  98. unsigned long flags;
  99. long disabled;
  100. int cpu;
  101. int pc;
  102. if (unlikely(!ftrace_function_enabled))
  103. return;
  104. /*
  105. * Need to use raw, since this must be called before the
  106. * recursive protection is performed.
  107. */
  108. local_irq_save(flags);
  109. cpu = raw_smp_processor_id();
  110. data = tr->data[cpu];
  111. disabled = atomic_inc_return(&data->disabled);
  112. if (likely(disabled == 1)) {
  113. pc = preempt_count();
  114. trace_function(tr, data, ip, parent_ip, flags, pc);
  115. /*
  116. * skip over 5 funcs:
  117. * __ftrace_trace_stack,
  118. * __trace_stack,
  119. * function_stack_trace_call
  120. * ftrace_list_func
  121. * ftrace_call
  122. */
  123. __trace_stack(tr, data, flags, 5, pc);
  124. }
  125. atomic_dec(&data->disabled);
  126. local_irq_restore(flags);
  127. }
  128. static struct ftrace_ops trace_ops __read_mostly =
  129. {
  130. .func = function_trace_call,
  131. };
  132. static struct ftrace_ops trace_stack_ops __read_mostly =
  133. {
  134. .func = function_stack_trace_call,
  135. };
  136. /* Our two options */
  137. enum {
  138. TRACE_FUNC_OPT_STACK = 0x1,
  139. };
  140. static struct tracer_opt func_opts[] = {
  141. #ifdef CONFIG_STACKTRACE
  142. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  143. #endif
  144. { } /* Always set a last empty entry */
  145. };
  146. static struct tracer_flags func_flags = {
  147. .val = 0, /* By default: all flags disabled */
  148. .opts = func_opts
  149. };
  150. void tracing_start_function_trace(void)
  151. {
  152. ftrace_function_enabled = 0;
  153. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  154. trace_ops.func = function_trace_call_preempt_only;
  155. else
  156. trace_ops.func = function_trace_call;
  157. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  158. register_ftrace_function(&trace_stack_ops);
  159. else
  160. register_ftrace_function(&trace_ops);
  161. ftrace_function_enabled = 1;
  162. }
  163. void tracing_stop_function_trace(void)
  164. {
  165. ftrace_function_enabled = 0;
  166. /* OK if they are not registered */
  167. unregister_ftrace_function(&trace_stack_ops);
  168. unregister_ftrace_function(&trace_ops);
  169. }
  170. static int func_set_flag(u32 old_flags, u32 bit, int set)
  171. {
  172. if (bit == TRACE_FUNC_OPT_STACK) {
  173. /* do nothing if already set */
  174. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  175. return 0;
  176. if (set) {
  177. unregister_ftrace_function(&trace_ops);
  178. register_ftrace_function(&trace_stack_ops);
  179. } else {
  180. unregister_ftrace_function(&trace_stack_ops);
  181. register_ftrace_function(&trace_ops);
  182. }
  183. return 0;
  184. }
  185. return -EINVAL;
  186. }
  187. static struct tracer function_trace __read_mostly =
  188. {
  189. .name = "function",
  190. .init = function_trace_init,
  191. .reset = function_trace_reset,
  192. .start = function_trace_start,
  193. .flags = &func_flags,
  194. .set_flag = func_set_flag,
  195. #ifdef CONFIG_FTRACE_SELFTEST
  196. .selftest = trace_selftest_startup_function,
  197. #endif
  198. };
  199. static __init int init_function_trace(void)
  200. {
  201. return register_tracer(&function_trace);
  202. }
  203. device_initcall(init_function_trace);