trace_functions.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/debugfs.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/ftrace.h>
  15. #include <linux/fs.h>
  16. #include "trace.h"
  17. /* function tracing enabled */
  18. static int ftrace_function_enabled;
  19. static struct trace_array *func_trace;
  20. static void tracing_start_function_trace(void);
  21. static void tracing_stop_function_trace(void);
  22. static void start_function_trace(struct trace_array *tr)
  23. {
  24. func_trace = tr;
  25. tr->cpu = get_cpu();
  26. tracing_reset_online_cpus(tr);
  27. put_cpu();
  28. tracing_start_cmdline_record();
  29. tracing_start_function_trace();
  30. }
  31. static void stop_function_trace(struct trace_array *tr)
  32. {
  33. tracing_stop_function_trace();
  34. tracing_stop_cmdline_record();
  35. }
  36. static int function_trace_init(struct trace_array *tr)
  37. {
  38. start_function_trace(tr);
  39. return 0;
  40. }
  41. static void function_trace_reset(struct trace_array *tr)
  42. {
  43. stop_function_trace(tr);
  44. }
  45. static void function_trace_start(struct trace_array *tr)
  46. {
  47. tracing_reset_online_cpus(tr);
  48. }
  49. static void
  50. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
  51. {
  52. struct trace_array *tr = func_trace;
  53. struct trace_array_cpu *data;
  54. unsigned long flags;
  55. long disabled;
  56. int cpu, resched;
  57. int pc;
  58. if (unlikely(!ftrace_function_enabled))
  59. return;
  60. pc = preempt_count();
  61. resched = ftrace_preempt_disable();
  62. local_save_flags(flags);
  63. cpu = raw_smp_processor_id();
  64. data = tr->data[cpu];
  65. disabled = atomic_inc_return(&data->disabled);
  66. if (likely(disabled == 1))
  67. trace_function(tr, data, ip, parent_ip, flags, pc);
  68. atomic_dec(&data->disabled);
  69. ftrace_preempt_enable(resched);
  70. }
  71. static void
  72. function_trace_call(unsigned long ip, unsigned long parent_ip)
  73. {
  74. struct trace_array *tr = func_trace;
  75. struct trace_array_cpu *data;
  76. unsigned long flags;
  77. long disabled;
  78. int cpu;
  79. int pc;
  80. if (unlikely(!ftrace_function_enabled))
  81. return;
  82. /*
  83. * Need to use raw, since this must be called before the
  84. * recursive protection is performed.
  85. */
  86. local_irq_save(flags);
  87. cpu = raw_smp_processor_id();
  88. data = tr->data[cpu];
  89. disabled = atomic_inc_return(&data->disabled);
  90. if (likely(disabled == 1)) {
  91. pc = preempt_count();
  92. trace_function(tr, data, ip, parent_ip, flags, pc);
  93. }
  94. atomic_dec(&data->disabled);
  95. local_irq_restore(flags);
  96. }
  97. static void
  98. function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
  99. {
  100. struct trace_array *tr = func_trace;
  101. struct trace_array_cpu *data;
  102. unsigned long flags;
  103. long disabled;
  104. int cpu;
  105. int pc;
  106. if (unlikely(!ftrace_function_enabled))
  107. return;
  108. /*
  109. * Need to use raw, since this must be called before the
  110. * recursive protection is performed.
  111. */
  112. local_irq_save(flags);
  113. cpu = raw_smp_processor_id();
  114. data = tr->data[cpu];
  115. disabled = atomic_inc_return(&data->disabled);
  116. if (likely(disabled == 1)) {
  117. pc = preempt_count();
  118. trace_function(tr, data, ip, parent_ip, flags, pc);
  119. /*
  120. * skip over 5 funcs:
  121. * __ftrace_trace_stack,
  122. * __trace_stack,
  123. * function_stack_trace_call
  124. * ftrace_list_func
  125. * ftrace_call
  126. */
  127. __trace_stack(tr, data, flags, 5, pc);
  128. }
  129. atomic_dec(&data->disabled);
  130. local_irq_restore(flags);
  131. }
  132. static struct ftrace_ops trace_ops __read_mostly =
  133. {
  134. .func = function_trace_call,
  135. };
  136. static struct ftrace_ops trace_stack_ops __read_mostly =
  137. {
  138. .func = function_stack_trace_call,
  139. };
  140. /* Our two options */
  141. enum {
  142. TRACE_FUNC_OPT_STACK = 0x1,
  143. };
  144. static struct tracer_opt func_opts[] = {
  145. #ifdef CONFIG_STACKTRACE
  146. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  147. #endif
  148. { } /* Always set a last empty entry */
  149. };
  150. static struct tracer_flags func_flags = {
  151. .val = 0, /* By default: all flags disabled */
  152. .opts = func_opts
  153. };
  154. static void tracing_start_function_trace(void)
  155. {
  156. ftrace_function_enabled = 0;
  157. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  158. trace_ops.func = function_trace_call_preempt_only;
  159. else
  160. trace_ops.func = function_trace_call;
  161. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  162. register_ftrace_function(&trace_stack_ops);
  163. else
  164. register_ftrace_function(&trace_ops);
  165. ftrace_function_enabled = 1;
  166. }
  167. static void tracing_stop_function_trace(void)
  168. {
  169. ftrace_function_enabled = 0;
  170. /* OK if they are not registered */
  171. unregister_ftrace_function(&trace_stack_ops);
  172. unregister_ftrace_function(&trace_ops);
  173. }
  174. static int func_set_flag(u32 old_flags, u32 bit, int set)
  175. {
  176. if (bit == TRACE_FUNC_OPT_STACK) {
  177. /* do nothing if already set */
  178. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  179. return 0;
  180. if (set) {
  181. unregister_ftrace_function(&trace_ops);
  182. register_ftrace_function(&trace_stack_ops);
  183. } else {
  184. unregister_ftrace_function(&trace_stack_ops);
  185. register_ftrace_function(&trace_ops);
  186. }
  187. return 0;
  188. }
  189. return -EINVAL;
  190. }
  191. static struct tracer function_trace __read_mostly =
  192. {
  193. .name = "function",
  194. .init = function_trace_init,
  195. .reset = function_trace_reset,
  196. .start = function_trace_start,
  197. .flags = &func_flags,
  198. .set_flag = func_set_flag,
  199. #ifdef CONFIG_FTRACE_SELFTEST
  200. .selftest = trace_selftest_startup_function,
  201. #endif
  202. };
  203. static __init int init_function_trace(void)
  204. {
  205. return register_tracer(&function_trace);
  206. }
  207. device_initcall(init_function_trace);