trace_functions.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/debugfs.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/ftrace.h>
  15. #include <linux/fs.h>
  16. #include "trace.h"
  17. /* function tracing enabled */
  18. static int ftrace_function_enabled;
  19. static struct trace_array *func_trace;
  20. static void tracing_start_function_trace(void);
  21. static void tracing_stop_function_trace(void);
  22. static int function_trace_init(struct trace_array *tr)
  23. {
  24. func_trace = tr;
  25. tr->cpu = get_cpu();
  26. put_cpu();
  27. tracing_start_cmdline_record();
  28. tracing_start_function_trace();
  29. return 0;
  30. }
  31. static void function_trace_reset(struct trace_array *tr)
  32. {
  33. tracing_stop_function_trace();
  34. tracing_stop_cmdline_record();
  35. }
  36. static void function_trace_start(struct trace_array *tr)
  37. {
  38. tracing_reset_online_cpus(tr);
  39. }
  40. static void
  41. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
  42. {
  43. struct trace_array *tr = func_trace;
  44. struct trace_array_cpu *data;
  45. unsigned long flags;
  46. long disabled;
  47. int cpu, resched;
  48. int pc;
  49. if (unlikely(!ftrace_function_enabled))
  50. return;
  51. pc = preempt_count();
  52. resched = ftrace_preempt_disable();
  53. local_save_flags(flags);
  54. cpu = raw_smp_processor_id();
  55. data = tr->data[cpu];
  56. disabled = atomic_inc_return(&data->disabled);
  57. if (likely(disabled == 1))
  58. trace_function(tr, ip, parent_ip, flags, pc);
  59. atomic_dec(&data->disabled);
  60. ftrace_preempt_enable(resched);
  61. }
  62. static void
  63. function_trace_call(unsigned long ip, unsigned long parent_ip)
  64. {
  65. struct trace_array *tr = func_trace;
  66. struct trace_array_cpu *data;
  67. unsigned long flags;
  68. long disabled;
  69. int cpu;
  70. int pc;
  71. if (unlikely(!ftrace_function_enabled))
  72. return;
  73. /*
  74. * Need to use raw, since this must be called before the
  75. * recursive protection is performed.
  76. */
  77. local_irq_save(flags);
  78. cpu = raw_smp_processor_id();
  79. data = tr->data[cpu];
  80. disabled = atomic_inc_return(&data->disabled);
  81. if (likely(disabled == 1)) {
  82. pc = preempt_count();
  83. trace_function(tr, ip, parent_ip, flags, pc);
  84. }
  85. atomic_dec(&data->disabled);
  86. local_irq_restore(flags);
  87. }
  88. static void
  89. function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
  90. {
  91. struct trace_array *tr = func_trace;
  92. struct trace_array_cpu *data;
  93. unsigned long flags;
  94. long disabled;
  95. int cpu;
  96. int pc;
  97. if (unlikely(!ftrace_function_enabled))
  98. return;
  99. /*
  100. * Need to use raw, since this must be called before the
  101. * recursive protection is performed.
  102. */
  103. local_irq_save(flags);
  104. cpu = raw_smp_processor_id();
  105. data = tr->data[cpu];
  106. disabled = atomic_inc_return(&data->disabled);
  107. if (likely(disabled == 1)) {
  108. pc = preempt_count();
  109. trace_function(tr, ip, parent_ip, flags, pc);
  110. /*
  111. * skip over 5 funcs:
  112. * __ftrace_trace_stack,
  113. * __trace_stack,
  114. * function_stack_trace_call
  115. * ftrace_list_func
  116. * ftrace_call
  117. */
  118. __trace_stack(tr, flags, 5, pc);
  119. }
  120. atomic_dec(&data->disabled);
  121. local_irq_restore(flags);
  122. }
  123. static struct ftrace_ops trace_ops __read_mostly =
  124. {
  125. .func = function_trace_call,
  126. };
  127. static struct ftrace_ops trace_stack_ops __read_mostly =
  128. {
  129. .func = function_stack_trace_call,
  130. };
  131. /* Our two options */
  132. enum {
  133. TRACE_FUNC_OPT_STACK = 0x1,
  134. };
  135. static struct tracer_opt func_opts[] = {
  136. #ifdef CONFIG_STACKTRACE
  137. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  138. #endif
  139. { } /* Always set a last empty entry */
  140. };
  141. static struct tracer_flags func_flags = {
  142. .val = 0, /* By default: all flags disabled */
  143. .opts = func_opts
  144. };
  145. static void tracing_start_function_trace(void)
  146. {
  147. ftrace_function_enabled = 0;
  148. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  149. trace_ops.func = function_trace_call_preempt_only;
  150. else
  151. trace_ops.func = function_trace_call;
  152. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  153. register_ftrace_function(&trace_stack_ops);
  154. else
  155. register_ftrace_function(&trace_ops);
  156. ftrace_function_enabled = 1;
  157. }
  158. static void tracing_stop_function_trace(void)
  159. {
  160. ftrace_function_enabled = 0;
  161. /* OK if they are not registered */
  162. unregister_ftrace_function(&trace_stack_ops);
  163. unregister_ftrace_function(&trace_ops);
  164. }
  165. static int func_set_flag(u32 old_flags, u32 bit, int set)
  166. {
  167. if (bit == TRACE_FUNC_OPT_STACK) {
  168. /* do nothing if already set */
  169. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  170. return 0;
  171. if (set) {
  172. unregister_ftrace_function(&trace_ops);
  173. register_ftrace_function(&trace_stack_ops);
  174. } else {
  175. unregister_ftrace_function(&trace_stack_ops);
  176. register_ftrace_function(&trace_ops);
  177. }
  178. return 0;
  179. }
  180. return -EINVAL;
  181. }
  182. static struct tracer function_trace __read_mostly =
  183. {
  184. .name = "function",
  185. .init = function_trace_init,
  186. .reset = function_trace_reset,
  187. .start = function_trace_start,
  188. .flags = &func_flags,
  189. .set_flag = func_set_flag,
  190. #ifdef CONFIG_FTRACE_SELFTEST
  191. .selftest = trace_selftest_startup_function,
  192. #endif
  193. };
  194. static __init int init_function_trace(void)
  195. {
  196. return register_tracer(&function_trace);
  197. }
  198. device_initcall(init_function_trace);