trace_functions.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/debugfs.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/ftrace.h>
  15. #include <linux/fs.h>
  16. #include "trace.h"
  17. static struct trace_array *func_trace;
  18. static void start_function_trace(struct trace_array *tr)
  19. {
  20. func_trace = tr;
  21. tr->cpu = get_cpu();
  22. tracing_reset_online_cpus(tr);
  23. put_cpu();
  24. tracing_start_cmdline_record();
  25. tracing_start_function_trace();
  26. }
  27. static void stop_function_trace(struct trace_array *tr)
  28. {
  29. tracing_stop_function_trace();
  30. tracing_stop_cmdline_record();
  31. }
  32. static int function_trace_init(struct trace_array *tr)
  33. {
  34. start_function_trace(tr);
  35. return 0;
  36. }
  37. static void function_trace_reset(struct trace_array *tr)
  38. {
  39. stop_function_trace(tr);
  40. }
  41. static void function_trace_start(struct trace_array *tr)
  42. {
  43. tracing_reset_online_cpus(tr);
  44. }
  45. static void
  46. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
  47. {
  48. struct trace_array *tr = func_trace;
  49. struct trace_array_cpu *data;
  50. unsigned long flags;
  51. long disabled;
  52. int cpu, resched;
  53. int pc;
  54. if (unlikely(!ftrace_function_enabled))
  55. return;
  56. pc = preempt_count();
  57. resched = ftrace_preempt_disable();
  58. local_save_flags(flags);
  59. cpu = raw_smp_processor_id();
  60. data = tr->data[cpu];
  61. disabled = atomic_inc_return(&data->disabled);
  62. if (likely(disabled == 1))
  63. trace_function(tr, data, ip, parent_ip, flags, pc);
  64. atomic_dec(&data->disabled);
  65. ftrace_preempt_enable(resched);
  66. }
  67. static void
  68. function_trace_call(unsigned long ip, unsigned long parent_ip)
  69. {
  70. struct trace_array *tr = func_trace;
  71. struct trace_array_cpu *data;
  72. unsigned long flags;
  73. long disabled;
  74. int cpu;
  75. int pc;
  76. if (unlikely(!ftrace_function_enabled))
  77. return;
  78. /*
  79. * Need to use raw, since this must be called before the
  80. * recursive protection is performed.
  81. */
  82. local_irq_save(flags);
  83. cpu = raw_smp_processor_id();
  84. data = tr->data[cpu];
  85. disabled = atomic_inc_return(&data->disabled);
  86. if (likely(disabled == 1)) {
  87. pc = preempt_count();
  88. trace_function(tr, data, ip, parent_ip, flags, pc);
  89. }
  90. atomic_dec(&data->disabled);
  91. local_irq_restore(flags);
  92. }
  93. static void
  94. function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
  95. {
  96. struct trace_array *tr = func_trace;
  97. struct trace_array_cpu *data;
  98. unsigned long flags;
  99. long disabled;
  100. int cpu;
  101. int pc;
  102. if (unlikely(!ftrace_function_enabled))
  103. return;
  104. /*
  105. * Need to use raw, since this must be called before the
  106. * recursive protection is performed.
  107. */
  108. local_irq_save(flags);
  109. cpu = raw_smp_processor_id();
  110. data = tr->data[cpu];
  111. disabled = atomic_inc_return(&data->disabled);
  112. if (likely(disabled == 1)) {
  113. pc = preempt_count();
  114. /*
  115. * skip over 5 funcs:
  116. * __ftrace_trace_stack,
  117. * __trace_stack,
  118. * function_stack_trace_call
  119. * ftrace_list_func
  120. * ftrace_call
  121. */
  122. __trace_stack(tr, data, flags, 5, pc);
  123. }
  124. atomic_dec(&data->disabled);
  125. local_irq_restore(flags);
  126. }
  127. static struct ftrace_ops trace_ops __read_mostly =
  128. {
  129. .func = function_trace_call,
  130. };
  131. void tracing_start_function_trace(void)
  132. {
  133. ftrace_function_enabled = 0;
  134. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  135. trace_ops.func = function_trace_call_preempt_only;
  136. else
  137. trace_ops.func = function_trace_call;
  138. register_ftrace_function(&trace_ops);
  139. ftrace_function_enabled = 1;
  140. }
  141. void tracing_stop_function_trace(void)
  142. {
  143. ftrace_function_enabled = 0;
  144. unregister_ftrace_function(&trace_ops);
  145. }
  146. static struct ftrace_ops trace_stack_ops __read_mostly =
  147. {
  148. .func = function_stack_trace_call,
  149. };
  150. /* Our two options */
  151. enum {
  152. TRACE_FUNC_OPT_STACK = 0x1,
  153. };
  154. static struct tracer_opt func_opts[] = {
  155. #ifdef CONFIG_STACKTRACE
  156. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  157. #endif
  158. { } /* Always set a last empty entry */
  159. };
  160. static struct tracer_flags func_flags = {
  161. .val = 0, /* By default: all flags disabled */
  162. .opts = func_opts
  163. };
  164. static int func_set_flag(u32 old_flags, u32 bit, int set)
  165. {
  166. if (bit == TRACE_FUNC_OPT_STACK) {
  167. /* do nothing if already set */
  168. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  169. return 0;
  170. if (set)
  171. register_ftrace_function(&trace_stack_ops);
  172. else
  173. unregister_ftrace_function(&trace_stack_ops);
  174. return 0;
  175. }
  176. return -EINVAL;
  177. }
  178. static struct tracer function_trace __read_mostly =
  179. {
  180. .name = "function",
  181. .init = function_trace_init,
  182. .reset = function_trace_reset,
  183. .start = function_trace_start,
  184. .flags = &func_flags,
  185. .set_flag = func_set_flag,
  186. #ifdef CONFIG_FTRACE_SELFTEST
  187. .selftest = trace_selftest_startup_function,
  188. #endif
  189. };
  190. static __init int init_function_trace(void)
  191. {
  192. return register_tracer(&function_trace);
  193. }
  194. device_initcall(init_function_trace);