trace_functions.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/fs.h>
  17. #include "trace.h"
  18. /* function tracing enabled */
  19. static int ftrace_function_enabled;
  20. static struct trace_array *func_trace;
  21. static void tracing_start_function_trace(void);
  22. static void tracing_stop_function_trace(void);
  23. static int function_trace_init(struct trace_array *tr)
  24. {
  25. func_trace = tr;
  26. tr->cpu = get_cpu();
  27. put_cpu();
  28. tracing_start_cmdline_record();
  29. tracing_start_function_trace();
  30. return 0;
  31. }
  32. static void function_trace_reset(struct trace_array *tr)
  33. {
  34. tracing_stop_function_trace();
  35. tracing_stop_cmdline_record();
  36. }
  37. static void function_trace_start(struct trace_array *tr)
  38. {
  39. tracing_reset_online_cpus(tr);
  40. }
  41. static void
  42. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
  43. {
  44. struct trace_array *tr = func_trace;
  45. struct trace_array_cpu *data;
  46. unsigned long flags;
  47. long disabled;
  48. int cpu;
  49. int pc;
  50. if (unlikely(!ftrace_function_enabled))
  51. return;
  52. pc = preempt_count();
  53. preempt_disable_notrace();
  54. local_save_flags(flags);
  55. cpu = raw_smp_processor_id();
  56. data = tr->data[cpu];
  57. disabled = atomic_inc_return(&data->disabled);
  58. if (likely(disabled == 1))
  59. trace_function(tr, ip, parent_ip, flags, pc);
  60. atomic_dec(&data->disabled);
  61. preempt_enable_notrace();
  62. }
  63. /* Our option */
  64. enum {
  65. TRACE_FUNC_OPT_STACK = 0x1,
  66. };
  67. static struct tracer_flags func_flags;
  68. static void
  69. function_trace_call(unsigned long ip, unsigned long parent_ip)
  70. {
  71. struct trace_array *tr = func_trace;
  72. struct trace_array_cpu *data;
  73. unsigned long flags;
  74. long disabled;
  75. int cpu;
  76. int pc;
  77. if (unlikely(!ftrace_function_enabled))
  78. return;
  79. /*
  80. * Need to use raw, since this must be called before the
  81. * recursive protection is performed.
  82. */
  83. local_irq_save(flags);
  84. cpu = raw_smp_processor_id();
  85. data = tr->data[cpu];
  86. disabled = atomic_inc_return(&data->disabled);
  87. if (likely(disabled == 1)) {
  88. pc = preempt_count();
  89. trace_function(tr, ip, parent_ip, flags, pc);
  90. }
  91. atomic_dec(&data->disabled);
  92. local_irq_restore(flags);
  93. }
  94. static void
  95. function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
  96. {
  97. struct trace_array *tr = func_trace;
  98. struct trace_array_cpu *data;
  99. unsigned long flags;
  100. long disabled;
  101. int cpu;
  102. int pc;
  103. if (unlikely(!ftrace_function_enabled))
  104. return;
  105. /*
  106. * Need to use raw, since this must be called before the
  107. * recursive protection is performed.
  108. */
  109. local_irq_save(flags);
  110. cpu = raw_smp_processor_id();
  111. data = tr->data[cpu];
  112. disabled = atomic_inc_return(&data->disabled);
  113. if (likely(disabled == 1)) {
  114. pc = preempt_count();
  115. trace_function(tr, ip, parent_ip, flags, pc);
  116. /*
  117. * skip over 5 funcs:
  118. * __ftrace_trace_stack,
  119. * __trace_stack,
  120. * function_stack_trace_call
  121. * ftrace_list_func
  122. * ftrace_call
  123. */
  124. __trace_stack(tr, flags, 5, pc);
  125. }
  126. atomic_dec(&data->disabled);
  127. local_irq_restore(flags);
  128. }
  129. static struct ftrace_ops trace_ops __read_mostly =
  130. {
  131. .func = function_trace_call,
  132. .flags = FTRACE_OPS_FL_GLOBAL,
  133. };
  134. static struct ftrace_ops trace_stack_ops __read_mostly =
  135. {
  136. .func = function_stack_trace_call,
  137. .flags = FTRACE_OPS_FL_GLOBAL,
  138. };
  139. static struct tracer_opt func_opts[] = {
  140. #ifdef CONFIG_STACKTRACE
  141. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  142. #endif
  143. { } /* Always set a last empty entry */
  144. };
  145. static struct tracer_flags func_flags = {
  146. .val = 0, /* By default: all flags disabled */
  147. .opts = func_opts
  148. };
  149. static void tracing_start_function_trace(void)
  150. {
  151. ftrace_function_enabled = 0;
  152. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  153. trace_ops.func = function_trace_call_preempt_only;
  154. else
  155. trace_ops.func = function_trace_call;
  156. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  157. register_ftrace_function(&trace_stack_ops);
  158. else
  159. register_ftrace_function(&trace_ops);
  160. ftrace_function_enabled = 1;
  161. }
  162. static void tracing_stop_function_trace(void)
  163. {
  164. ftrace_function_enabled = 0;
  165. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  166. unregister_ftrace_function(&trace_stack_ops);
  167. else
  168. unregister_ftrace_function(&trace_ops);
  169. }
  170. static int func_set_flag(u32 old_flags, u32 bit, int set)
  171. {
  172. switch (bit) {
  173. case TRACE_FUNC_OPT_STACK:
  174. /* do nothing if already set */
  175. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  176. break;
  177. if (set) {
  178. unregister_ftrace_function(&trace_ops);
  179. register_ftrace_function(&trace_stack_ops);
  180. } else {
  181. unregister_ftrace_function(&trace_stack_ops);
  182. register_ftrace_function(&trace_ops);
  183. }
  184. break;
  185. default:
  186. return -EINVAL;
  187. }
  188. return 0;
  189. }
  190. static struct tracer function_trace __read_mostly =
  191. {
  192. .name = "function",
  193. .init = function_trace_init,
  194. .reset = function_trace_reset,
  195. .start = function_trace_start,
  196. .wait_pipe = poll_wait_pipe,
  197. .flags = &func_flags,
  198. .set_flag = func_set_flag,
  199. #ifdef CONFIG_FTRACE_SELFTEST
  200. .selftest = trace_selftest_startup_function,
  201. #endif
  202. };
  203. #ifdef CONFIG_DYNAMIC_FTRACE
  204. static void
  205. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  206. {
  207. long *count = (long *)data;
  208. if (tracing_is_on())
  209. return;
  210. if (!*count)
  211. return;
  212. if (*count != -1)
  213. (*count)--;
  214. tracing_on();
  215. }
  216. static void
  217. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  218. {
  219. long *count = (long *)data;
  220. if (!tracing_is_on())
  221. return;
  222. if (!*count)
  223. return;
  224. if (*count != -1)
  225. (*count)--;
  226. tracing_off();
  227. }
  228. static int
  229. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  230. struct ftrace_probe_ops *ops, void *data);
  231. static struct ftrace_probe_ops traceon_probe_ops = {
  232. .func = ftrace_traceon,
  233. .print = ftrace_trace_onoff_print,
  234. };
  235. static struct ftrace_probe_ops traceoff_probe_ops = {
  236. .func = ftrace_traceoff,
  237. .print = ftrace_trace_onoff_print,
  238. };
  239. static int
  240. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  241. struct ftrace_probe_ops *ops, void *data)
  242. {
  243. long count = (long)data;
  244. seq_printf(m, "%ps:", (void *)ip);
  245. if (ops == &traceon_probe_ops)
  246. seq_printf(m, "traceon");
  247. else
  248. seq_printf(m, "traceoff");
  249. if (count == -1)
  250. seq_printf(m, ":unlimited\n");
  251. else
  252. seq_printf(m, ":count=%ld\n", count);
  253. return 0;
  254. }
  255. static int
  256. ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
  257. {
  258. struct ftrace_probe_ops *ops;
  259. /* we register both traceon and traceoff to this callback */
  260. if (strcmp(cmd, "traceon") == 0)
  261. ops = &traceon_probe_ops;
  262. else
  263. ops = &traceoff_probe_ops;
  264. unregister_ftrace_function_probe_func(glob, ops);
  265. return 0;
  266. }
  267. static int
  268. ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  269. char *glob, char *cmd, char *param, int enable)
  270. {
  271. struct ftrace_probe_ops *ops;
  272. void *count = (void *)-1;
  273. char *number;
  274. int ret;
  275. /* hash funcs only work with set_ftrace_filter */
  276. if (!enable)
  277. return -EINVAL;
  278. if (glob[0] == '!')
  279. return ftrace_trace_onoff_unreg(glob+1, cmd, param);
  280. /* we register both traceon and traceoff to this callback */
  281. if (strcmp(cmd, "traceon") == 0)
  282. ops = &traceon_probe_ops;
  283. else
  284. ops = &traceoff_probe_ops;
  285. if (!param)
  286. goto out_reg;
  287. number = strsep(&param, ":");
  288. if (!strlen(number))
  289. goto out_reg;
  290. /*
  291. * We use the callback data field (which is a pointer)
  292. * as our counter.
  293. */
  294. ret = strict_strtoul(number, 0, (unsigned long *)&count);
  295. if (ret)
  296. return ret;
  297. out_reg:
  298. ret = register_ftrace_function_probe(glob, ops, count);
  299. return ret < 0 ? ret : 0;
  300. }
  301. static struct ftrace_func_command ftrace_traceon_cmd = {
  302. .name = "traceon",
  303. .func = ftrace_trace_onoff_callback,
  304. };
  305. static struct ftrace_func_command ftrace_traceoff_cmd = {
  306. .name = "traceoff",
  307. .func = ftrace_trace_onoff_callback,
  308. };
  309. static int __init init_func_cmd_traceon(void)
  310. {
  311. int ret;
  312. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  313. if (ret)
  314. return ret;
  315. ret = register_ftrace_command(&ftrace_traceon_cmd);
  316. if (ret)
  317. unregister_ftrace_command(&ftrace_traceoff_cmd);
  318. return ret;
  319. }
  320. #else
  321. static inline int init_func_cmd_traceon(void)
  322. {
  323. return 0;
  324. }
  325. #endif /* CONFIG_DYNAMIC_FTRACE */
  326. static __init int init_function_trace(void)
  327. {
  328. init_func_cmd_traceon();
  329. return register_tracer(&function_trace);
  330. }
  331. device_initcall(init_function_trace);