trace_functions.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/fs.h>
  17. #include "trace.h"
  18. /* function tracing enabled */
  19. static int ftrace_function_enabled;
  20. static struct trace_array *func_trace;
  21. static void tracing_start_function_trace(void);
  22. static void tracing_stop_function_trace(void);
  23. static int function_trace_init(struct trace_array *tr)
  24. {
  25. func_trace = tr;
  26. tr->cpu = get_cpu();
  27. put_cpu();
  28. tracing_start_cmdline_record();
  29. tracing_start_function_trace();
  30. return 0;
  31. }
  32. static void function_trace_reset(struct trace_array *tr)
  33. {
  34. tracing_stop_function_trace();
  35. tracing_stop_cmdline_record();
  36. }
  37. static void function_trace_start(struct trace_array *tr)
  38. {
  39. tracing_reset_online_cpus(tr);
  40. }
  41. static void
  42. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
  43. {
  44. struct trace_array *tr = func_trace;
  45. struct trace_array_cpu *data;
  46. unsigned long flags;
  47. long disabled;
  48. int cpu, resched;
  49. int pc;
  50. if (unlikely(!ftrace_function_enabled))
  51. return;
  52. pc = preempt_count();
  53. resched = ftrace_preempt_disable();
  54. local_save_flags(flags);
  55. cpu = raw_smp_processor_id();
  56. data = tr->data[cpu];
  57. disabled = atomic_inc_return(&data->disabled);
  58. if (likely(disabled == 1))
  59. trace_function(tr, ip, parent_ip, flags, pc);
  60. atomic_dec(&data->disabled);
  61. ftrace_preempt_enable(resched);
  62. }
  63. static void
  64. function_trace_call(unsigned long ip, unsigned long parent_ip)
  65. {
  66. struct trace_array *tr = func_trace;
  67. struct trace_array_cpu *data;
  68. unsigned long flags;
  69. long disabled;
  70. int cpu;
  71. int pc;
  72. if (unlikely(!ftrace_function_enabled))
  73. return;
  74. /*
  75. * Need to use raw, since this must be called before the
  76. * recursive protection is performed.
  77. */
  78. local_irq_save(flags);
  79. cpu = raw_smp_processor_id();
  80. data = tr->data[cpu];
  81. disabled = atomic_inc_return(&data->disabled);
  82. if (likely(disabled == 1)) {
  83. pc = preempt_count();
  84. trace_function(tr, ip, parent_ip, flags, pc);
  85. }
  86. atomic_dec(&data->disabled);
  87. local_irq_restore(flags);
  88. }
  89. static void
  90. function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
  91. {
  92. struct trace_array *tr = func_trace;
  93. struct trace_array_cpu *data;
  94. unsigned long flags;
  95. long disabled;
  96. int cpu;
  97. int pc;
  98. if (unlikely(!ftrace_function_enabled))
  99. return;
  100. /*
  101. * Need to use raw, since this must be called before the
  102. * recursive protection is performed.
  103. */
  104. local_irq_save(flags);
  105. cpu = raw_smp_processor_id();
  106. data = tr->data[cpu];
  107. disabled = atomic_inc_return(&data->disabled);
  108. if (likely(disabled == 1)) {
  109. pc = preempt_count();
  110. trace_function(tr, ip, parent_ip, flags, pc);
  111. /*
  112. * skip over 5 funcs:
  113. * __ftrace_trace_stack,
  114. * __trace_stack,
  115. * function_stack_trace_call
  116. * ftrace_list_func
  117. * ftrace_call
  118. */
  119. __trace_stack(tr, flags, 5, pc);
  120. }
  121. atomic_dec(&data->disabled);
  122. local_irq_restore(flags);
  123. }
  124. static struct ftrace_ops trace_ops __read_mostly =
  125. {
  126. .func = function_trace_call,
  127. };
  128. static struct ftrace_ops trace_stack_ops __read_mostly =
  129. {
  130. .func = function_stack_trace_call,
  131. };
  132. /* Our two options */
  133. enum {
  134. TRACE_FUNC_OPT_STACK = 0x1,
  135. };
  136. static struct tracer_opt func_opts[] = {
  137. #ifdef CONFIG_STACKTRACE
  138. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  139. #endif
  140. { } /* Always set a last empty entry */
  141. };
  142. static struct tracer_flags func_flags = {
  143. .val = 0, /* By default: all flags disabled */
  144. .opts = func_opts
  145. };
  146. static void tracing_start_function_trace(void)
  147. {
  148. ftrace_function_enabled = 0;
  149. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  150. trace_ops.func = function_trace_call_preempt_only;
  151. else
  152. trace_ops.func = function_trace_call;
  153. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  154. register_ftrace_function(&trace_stack_ops);
  155. else
  156. register_ftrace_function(&trace_ops);
  157. ftrace_function_enabled = 1;
  158. }
  159. static void tracing_stop_function_trace(void)
  160. {
  161. ftrace_function_enabled = 0;
  162. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  163. unregister_ftrace_function(&trace_stack_ops);
  164. else
  165. unregister_ftrace_function(&trace_ops);
  166. }
  167. static int func_set_flag(u32 old_flags, u32 bit, int set)
  168. {
  169. if (bit == TRACE_FUNC_OPT_STACK) {
  170. /* do nothing if already set */
  171. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  172. return 0;
  173. if (set) {
  174. unregister_ftrace_function(&trace_ops);
  175. register_ftrace_function(&trace_stack_ops);
  176. } else {
  177. unregister_ftrace_function(&trace_stack_ops);
  178. register_ftrace_function(&trace_ops);
  179. }
  180. return 0;
  181. }
  182. return -EINVAL;
  183. }
  184. static struct tracer function_trace __read_mostly =
  185. {
  186. .name = "function",
  187. .init = function_trace_init,
  188. .reset = function_trace_reset,
  189. .start = function_trace_start,
  190. .wait_pipe = poll_wait_pipe,
  191. .flags = &func_flags,
  192. .set_flag = func_set_flag,
  193. #ifdef CONFIG_FTRACE_SELFTEST
  194. .selftest = trace_selftest_startup_function,
  195. #endif
  196. };
  197. #ifdef CONFIG_DYNAMIC_FTRACE
  198. static void
  199. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  200. {
  201. long *count = (long *)data;
  202. if (tracing_is_on())
  203. return;
  204. if (!*count)
  205. return;
  206. if (*count != -1)
  207. (*count)--;
  208. tracing_on();
  209. }
  210. static void
  211. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  212. {
  213. long *count = (long *)data;
  214. if (!tracing_is_on())
  215. return;
  216. if (!*count)
  217. return;
  218. if (*count != -1)
  219. (*count)--;
  220. tracing_off();
  221. }
  222. static int
  223. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  224. struct ftrace_probe_ops *ops, void *data);
  225. static struct ftrace_probe_ops traceon_probe_ops = {
  226. .func = ftrace_traceon,
  227. .print = ftrace_trace_onoff_print,
  228. };
  229. static struct ftrace_probe_ops traceoff_probe_ops = {
  230. .func = ftrace_traceoff,
  231. .print = ftrace_trace_onoff_print,
  232. };
  233. static int
  234. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  235. struct ftrace_probe_ops *ops, void *data)
  236. {
  237. char str[KSYM_SYMBOL_LEN];
  238. long count = (long)data;
  239. kallsyms_lookup(ip, NULL, NULL, NULL, str);
  240. seq_printf(m, "%s:", str);
  241. if (ops == &traceon_probe_ops)
  242. seq_printf(m, "traceon");
  243. else
  244. seq_printf(m, "traceoff");
  245. if (count == -1)
  246. seq_printf(m, ":unlimited\n");
  247. else
  248. seq_printf(m, ":count=%ld\n", count);
  249. return 0;
  250. }
  251. static int
  252. ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
  253. {
  254. struct ftrace_probe_ops *ops;
  255. /* we register both traceon and traceoff to this callback */
  256. if (strcmp(cmd, "traceon") == 0)
  257. ops = &traceon_probe_ops;
  258. else
  259. ops = &traceoff_probe_ops;
  260. unregister_ftrace_function_probe_func(glob, ops);
  261. return 0;
  262. }
  263. static int
  264. ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable)
  265. {
  266. struct ftrace_probe_ops *ops;
  267. void *count = (void *)-1;
  268. char *number;
  269. int ret;
  270. /* hash funcs only work with set_ftrace_filter */
  271. if (!enable)
  272. return -EINVAL;
  273. if (glob[0] == '!')
  274. return ftrace_trace_onoff_unreg(glob+1, cmd, param);
  275. /* we register both traceon and traceoff to this callback */
  276. if (strcmp(cmd, "traceon") == 0)
  277. ops = &traceon_probe_ops;
  278. else
  279. ops = &traceoff_probe_ops;
  280. if (!param)
  281. goto out_reg;
  282. number = strsep(&param, ":");
  283. if (!strlen(number))
  284. goto out_reg;
  285. /*
  286. * We use the callback data field (which is a pointer)
  287. * as our counter.
  288. */
  289. ret = strict_strtoul(number, 0, (unsigned long *)&count);
  290. if (ret)
  291. return ret;
  292. out_reg:
  293. ret = register_ftrace_function_probe(glob, ops, count);
  294. return ret;
  295. }
  296. static struct ftrace_func_command ftrace_traceon_cmd = {
  297. .name = "traceon",
  298. .func = ftrace_trace_onoff_callback,
  299. };
  300. static struct ftrace_func_command ftrace_traceoff_cmd = {
  301. .name = "traceoff",
  302. .func = ftrace_trace_onoff_callback,
  303. };
  304. static int __init init_func_cmd_traceon(void)
  305. {
  306. int ret;
  307. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  308. if (ret)
  309. return ret;
  310. ret = register_ftrace_command(&ftrace_traceon_cmd);
  311. if (ret)
  312. unregister_ftrace_command(&ftrace_traceoff_cmd);
  313. return ret;
  314. }
  315. #else
  316. static inline int init_func_cmd_traceon(void)
  317. {
  318. return 0;
  319. }
  320. #endif /* CONFIG_DYNAMIC_FTRACE */
  321. static __init int init_function_trace(void)
  322. {
  323. init_func_cmd_traceon();
  324. return register_tracer(&function_trace);
  325. }
  326. device_initcall(init_function_trace);