trace_functions.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/fs.h>
  17. #include "trace.h"
  18. /* function tracing enabled */
  19. static int ftrace_function_enabled;
  20. static struct trace_array *func_trace;
  21. static void tracing_start_function_trace(void);
  22. static void tracing_stop_function_trace(void);
  23. static int function_trace_init(struct trace_array *tr)
  24. {
  25. func_trace = tr;
  26. tr->cpu = get_cpu();
  27. put_cpu();
  28. tracing_start_cmdline_record();
  29. tracing_start_function_trace();
  30. return 0;
  31. }
  32. static void function_trace_reset(struct trace_array *tr)
  33. {
  34. tracing_stop_function_trace();
  35. tracing_stop_cmdline_record();
  36. }
  37. static void function_trace_start(struct trace_array *tr)
  38. {
  39. tracing_reset_online_cpus(tr);
  40. }
  41. static void
  42. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
  43. struct ftrace_ops *op, struct pt_regs *pt_regs)
  44. {
  45. struct trace_array *tr = func_trace;
  46. struct trace_array_cpu *data;
  47. unsigned long flags;
  48. long disabled;
  49. int cpu;
  50. int pc;
  51. if (unlikely(!ftrace_function_enabled))
  52. return;
  53. pc = preempt_count();
  54. preempt_disable_notrace();
  55. local_save_flags(flags);
  56. cpu = raw_smp_processor_id();
  57. data = tr->data[cpu];
  58. disabled = atomic_inc_return(&data->disabled);
  59. if (likely(disabled == 1))
  60. trace_function(tr, ip, parent_ip, flags, pc);
  61. atomic_dec(&data->disabled);
  62. preempt_enable_notrace();
  63. }
  64. /* Our option */
  65. enum {
  66. TRACE_FUNC_OPT_STACK = 0x1,
  67. };
  68. static struct tracer_flags func_flags;
  69. static void
  70. function_trace_call(unsigned long ip, unsigned long parent_ip,
  71. struct ftrace_ops *op, struct pt_regs *pt_regs)
  72. {
  73. struct trace_array *tr = func_trace;
  74. struct trace_array_cpu *data;
  75. unsigned long flags;
  76. long disabled;
  77. int cpu;
  78. int pc;
  79. if (unlikely(!ftrace_function_enabled))
  80. return;
  81. /*
  82. * Need to use raw, since this must be called before the
  83. * recursive protection is performed.
  84. */
  85. local_irq_save(flags);
  86. cpu = raw_smp_processor_id();
  87. data = tr->data[cpu];
  88. disabled = atomic_inc_return(&data->disabled);
  89. if (likely(disabled == 1)) {
  90. pc = preempt_count();
  91. trace_function(tr, ip, parent_ip, flags, pc);
  92. }
  93. atomic_dec(&data->disabled);
  94. local_irq_restore(flags);
  95. }
  96. static void
  97. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  98. struct ftrace_ops *op, struct pt_regs *pt_regs)
  99. {
  100. struct trace_array *tr = func_trace;
  101. struct trace_array_cpu *data;
  102. unsigned long flags;
  103. long disabled;
  104. int cpu;
  105. int pc;
  106. if (unlikely(!ftrace_function_enabled))
  107. return;
  108. /*
  109. * Need to use raw, since this must be called before the
  110. * recursive protection is performed.
  111. */
  112. local_irq_save(flags);
  113. cpu = raw_smp_processor_id();
  114. data = tr->data[cpu];
  115. disabled = atomic_inc_return(&data->disabled);
  116. if (likely(disabled == 1)) {
  117. pc = preempt_count();
  118. trace_function(tr, ip, parent_ip, flags, pc);
  119. /*
  120. * skip over 5 funcs:
  121. * __ftrace_trace_stack,
  122. * __trace_stack,
  123. * function_stack_trace_call
  124. * ftrace_list_func
  125. * ftrace_call
  126. */
  127. __trace_stack(tr, flags, 5, pc);
  128. }
  129. atomic_dec(&data->disabled);
  130. local_irq_restore(flags);
  131. }
  132. static struct ftrace_ops trace_ops __read_mostly =
  133. {
  134. .func = function_trace_call,
  135. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  136. };
  137. static struct ftrace_ops trace_stack_ops __read_mostly =
  138. {
  139. .func = function_stack_trace_call,
  140. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  141. };
  142. static struct tracer_opt func_opts[] = {
  143. #ifdef CONFIG_STACKTRACE
  144. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  145. #endif
  146. { } /* Always set a last empty entry */
  147. };
  148. static struct tracer_flags func_flags = {
  149. .val = 0, /* By default: all flags disabled */
  150. .opts = func_opts
  151. };
  152. static void tracing_start_function_trace(void)
  153. {
  154. ftrace_function_enabled = 0;
  155. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  156. trace_ops.func = function_trace_call_preempt_only;
  157. else
  158. trace_ops.func = function_trace_call;
  159. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  160. register_ftrace_function(&trace_stack_ops);
  161. else
  162. register_ftrace_function(&trace_ops);
  163. ftrace_function_enabled = 1;
  164. }
  165. static void tracing_stop_function_trace(void)
  166. {
  167. ftrace_function_enabled = 0;
  168. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  169. unregister_ftrace_function(&trace_stack_ops);
  170. else
  171. unregister_ftrace_function(&trace_ops);
  172. }
  173. static int func_set_flag(u32 old_flags, u32 bit, int set)
  174. {
  175. switch (bit) {
  176. case TRACE_FUNC_OPT_STACK:
  177. /* do nothing if already set */
  178. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  179. break;
  180. if (set) {
  181. unregister_ftrace_function(&trace_ops);
  182. register_ftrace_function(&trace_stack_ops);
  183. } else {
  184. unregister_ftrace_function(&trace_stack_ops);
  185. register_ftrace_function(&trace_ops);
  186. }
  187. break;
  188. default:
  189. return -EINVAL;
  190. }
  191. return 0;
  192. }
  193. static struct tracer function_trace __read_mostly =
  194. {
  195. .name = "function",
  196. .init = function_trace_init,
  197. .reset = function_trace_reset,
  198. .start = function_trace_start,
  199. .wait_pipe = poll_wait_pipe,
  200. .flags = &func_flags,
  201. .set_flag = func_set_flag,
  202. #ifdef CONFIG_FTRACE_SELFTEST
  203. .selftest = trace_selftest_startup_function,
  204. #endif
  205. };
  206. #ifdef CONFIG_DYNAMIC_FTRACE
  207. static void
  208. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  209. {
  210. long *count = (long *)data;
  211. if (tracing_is_on())
  212. return;
  213. if (!*count)
  214. return;
  215. if (*count != -1)
  216. (*count)--;
  217. tracing_on();
  218. }
  219. static void
  220. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  221. {
  222. long *count = (long *)data;
  223. if (!tracing_is_on())
  224. return;
  225. if (!*count)
  226. return;
  227. if (*count != -1)
  228. (*count)--;
  229. tracing_off();
  230. }
  231. static int
  232. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  233. struct ftrace_probe_ops *ops, void *data);
  234. static struct ftrace_probe_ops traceon_probe_ops = {
  235. .func = ftrace_traceon,
  236. .print = ftrace_trace_onoff_print,
  237. };
  238. static struct ftrace_probe_ops traceoff_probe_ops = {
  239. .func = ftrace_traceoff,
  240. .print = ftrace_trace_onoff_print,
  241. };
  242. static int
  243. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  244. struct ftrace_probe_ops *ops, void *data)
  245. {
  246. long count = (long)data;
  247. seq_printf(m, "%ps:", (void *)ip);
  248. if (ops == &traceon_probe_ops)
  249. seq_printf(m, "traceon");
  250. else
  251. seq_printf(m, "traceoff");
  252. if (count == -1)
  253. seq_printf(m, ":unlimited\n");
  254. else
  255. seq_printf(m, ":count=%ld\n", count);
  256. return 0;
  257. }
  258. static int
  259. ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
  260. {
  261. struct ftrace_probe_ops *ops;
  262. /* we register both traceon and traceoff to this callback */
  263. if (strcmp(cmd, "traceon") == 0)
  264. ops = &traceon_probe_ops;
  265. else
  266. ops = &traceoff_probe_ops;
  267. unregister_ftrace_function_probe_func(glob, ops);
  268. return 0;
  269. }
  270. static int
  271. ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  272. char *glob, char *cmd, char *param, int enable)
  273. {
  274. struct ftrace_probe_ops *ops;
  275. void *count = (void *)-1;
  276. char *number;
  277. int ret;
  278. /* hash funcs only work with set_ftrace_filter */
  279. if (!enable)
  280. return -EINVAL;
  281. if (glob[0] == '!')
  282. return ftrace_trace_onoff_unreg(glob+1, cmd, param);
  283. /* we register both traceon and traceoff to this callback */
  284. if (strcmp(cmd, "traceon") == 0)
  285. ops = &traceon_probe_ops;
  286. else
  287. ops = &traceoff_probe_ops;
  288. if (!param)
  289. goto out_reg;
  290. number = strsep(&param, ":");
  291. if (!strlen(number))
  292. goto out_reg;
  293. /*
  294. * We use the callback data field (which is a pointer)
  295. * as our counter.
  296. */
  297. ret = strict_strtoul(number, 0, (unsigned long *)&count);
  298. if (ret)
  299. return ret;
  300. out_reg:
  301. ret = register_ftrace_function_probe(glob, ops, count);
  302. return ret < 0 ? ret : 0;
  303. }
  304. static struct ftrace_func_command ftrace_traceon_cmd = {
  305. .name = "traceon",
  306. .func = ftrace_trace_onoff_callback,
  307. };
  308. static struct ftrace_func_command ftrace_traceoff_cmd = {
  309. .name = "traceoff",
  310. .func = ftrace_trace_onoff_callback,
  311. };
  312. static int __init init_func_cmd_traceon(void)
  313. {
  314. int ret;
  315. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  316. if (ret)
  317. return ret;
  318. ret = register_ftrace_command(&ftrace_traceon_cmd);
  319. if (ret)
  320. unregister_ftrace_command(&ftrace_traceoff_cmd);
  321. return ret;
  322. }
  323. #else
  324. static inline int init_func_cmd_traceon(void)
  325. {
  326. return 0;
  327. }
  328. #endif /* CONFIG_DYNAMIC_FTRACE */
  329. static __init int init_function_trace(void)
  330. {
  331. init_func_cmd_traceon();
  332. return register_tracer(&function_trace);
  333. }
  334. device_initcall(init_function_trace);