trace_functions.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/pstore.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. /* function tracing enabled */
  20. static int ftrace_function_enabled;
  21. static struct trace_array *func_trace;
  22. static void tracing_start_function_trace(void);
  23. static void tracing_stop_function_trace(void);
  24. static int function_trace_init(struct trace_array *tr)
  25. {
  26. func_trace = tr;
  27. tr->cpu = get_cpu();
  28. put_cpu();
  29. tracing_start_cmdline_record();
  30. tracing_start_function_trace();
  31. return 0;
  32. }
  33. static void function_trace_reset(struct trace_array *tr)
  34. {
  35. tracing_stop_function_trace();
  36. tracing_stop_cmdline_record();
  37. }
  38. static void function_trace_start(struct trace_array *tr)
  39. {
  40. tracing_reset_online_cpus(tr);
  41. }
  42. static void
  43. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
  44. {
  45. struct trace_array *tr = func_trace;
  46. struct trace_array_cpu *data;
  47. unsigned long flags;
  48. long disabled;
  49. int cpu;
  50. int pc;
  51. if (unlikely(!ftrace_function_enabled))
  52. return;
  53. pc = preempt_count();
  54. preempt_disable_notrace();
  55. local_save_flags(flags);
  56. cpu = raw_smp_processor_id();
  57. data = tr->data[cpu];
  58. disabled = atomic_inc_return(&data->disabled);
  59. if (likely(disabled == 1))
  60. trace_function(tr, ip, parent_ip, flags, pc);
  61. atomic_dec(&data->disabled);
  62. preempt_enable_notrace();
  63. }
  64. /* Our two options */
  65. enum {
  66. TRACE_FUNC_OPT_STACK = 0x1,
  67. TRACE_FUNC_OPT_PSTORE = 0x2,
  68. };
  69. static struct tracer_flags func_flags;
  70. static void
  71. function_trace_call(unsigned long ip, unsigned long parent_ip)
  72. {
  73. struct trace_array *tr = func_trace;
  74. struct trace_array_cpu *data;
  75. unsigned long flags;
  76. long disabled;
  77. int cpu;
  78. int pc;
  79. if (unlikely(!ftrace_function_enabled))
  80. return;
  81. /*
  82. * Need to use raw, since this must be called before the
  83. * recursive protection is performed.
  84. */
  85. local_irq_save(flags);
  86. cpu = raw_smp_processor_id();
  87. data = tr->data[cpu];
  88. disabled = atomic_inc_return(&data->disabled);
  89. if (likely(disabled == 1)) {
  90. /*
  91. * So far tracing doesn't support multiple buffers, so
  92. * we make an explicit call for now.
  93. */
  94. if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
  95. pstore_ftrace_call(ip, parent_ip);
  96. pc = preempt_count();
  97. trace_function(tr, ip, parent_ip, flags, pc);
  98. }
  99. atomic_dec(&data->disabled);
  100. local_irq_restore(flags);
  101. }
  102. static void
  103. function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
  104. {
  105. struct trace_array *tr = func_trace;
  106. struct trace_array_cpu *data;
  107. unsigned long flags;
  108. long disabled;
  109. int cpu;
  110. int pc;
  111. if (unlikely(!ftrace_function_enabled))
  112. return;
  113. /*
  114. * Need to use raw, since this must be called before the
  115. * recursive protection is performed.
  116. */
  117. local_irq_save(flags);
  118. cpu = raw_smp_processor_id();
  119. data = tr->data[cpu];
  120. disabled = atomic_inc_return(&data->disabled);
  121. if (likely(disabled == 1)) {
  122. pc = preempt_count();
  123. trace_function(tr, ip, parent_ip, flags, pc);
  124. /*
  125. * skip over 5 funcs:
  126. * __ftrace_trace_stack,
  127. * __trace_stack,
  128. * function_stack_trace_call
  129. * ftrace_list_func
  130. * ftrace_call
  131. */
  132. __trace_stack(tr, flags, 5, pc);
  133. }
  134. atomic_dec(&data->disabled);
  135. local_irq_restore(flags);
  136. }
  137. static struct ftrace_ops trace_ops __read_mostly =
  138. {
  139. .func = function_trace_call,
  140. .flags = FTRACE_OPS_FL_GLOBAL,
  141. };
  142. static struct ftrace_ops trace_stack_ops __read_mostly =
  143. {
  144. .func = function_stack_trace_call,
  145. .flags = FTRACE_OPS_FL_GLOBAL,
  146. };
  147. static struct tracer_opt func_opts[] = {
  148. #ifdef CONFIG_STACKTRACE
  149. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  150. #endif
  151. #ifdef CONFIG_PSTORE_FTRACE
  152. { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
  153. #endif
  154. { } /* Always set a last empty entry */
  155. };
  156. static struct tracer_flags func_flags = {
  157. .val = 0, /* By default: all flags disabled */
  158. .opts = func_opts
  159. };
  160. static void tracing_start_function_trace(void)
  161. {
  162. ftrace_function_enabled = 0;
  163. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  164. trace_ops.func = function_trace_call_preempt_only;
  165. else
  166. trace_ops.func = function_trace_call;
  167. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  168. register_ftrace_function(&trace_stack_ops);
  169. else
  170. register_ftrace_function(&trace_ops);
  171. ftrace_function_enabled = 1;
  172. }
  173. static void tracing_stop_function_trace(void)
  174. {
  175. ftrace_function_enabled = 0;
  176. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  177. unregister_ftrace_function(&trace_stack_ops);
  178. else
  179. unregister_ftrace_function(&trace_ops);
  180. }
  181. static int func_set_flag(u32 old_flags, u32 bit, int set)
  182. {
  183. switch (bit) {
  184. case TRACE_FUNC_OPT_STACK:
  185. /* do nothing if already set */
  186. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  187. break;
  188. if (set) {
  189. unregister_ftrace_function(&trace_ops);
  190. register_ftrace_function(&trace_stack_ops);
  191. } else {
  192. unregister_ftrace_function(&trace_stack_ops);
  193. register_ftrace_function(&trace_ops);
  194. }
  195. break;
  196. case TRACE_FUNC_OPT_PSTORE:
  197. break;
  198. default:
  199. return -EINVAL;
  200. }
  201. return 0;
  202. }
  203. static struct tracer function_trace __read_mostly =
  204. {
  205. .name = "function",
  206. .init = function_trace_init,
  207. .reset = function_trace_reset,
  208. .start = function_trace_start,
  209. .wait_pipe = poll_wait_pipe,
  210. .flags = &func_flags,
  211. .set_flag = func_set_flag,
  212. #ifdef CONFIG_FTRACE_SELFTEST
  213. .selftest = trace_selftest_startup_function,
  214. #endif
  215. };
  216. #ifdef CONFIG_DYNAMIC_FTRACE
  217. static void
  218. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  219. {
  220. long *count = (long *)data;
  221. if (tracing_is_on())
  222. return;
  223. if (!*count)
  224. return;
  225. if (*count != -1)
  226. (*count)--;
  227. tracing_on();
  228. }
  229. static void
  230. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  231. {
  232. long *count = (long *)data;
  233. if (!tracing_is_on())
  234. return;
  235. if (!*count)
  236. return;
  237. if (*count != -1)
  238. (*count)--;
  239. tracing_off();
  240. }
  241. static int
  242. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  243. struct ftrace_probe_ops *ops, void *data);
  244. static struct ftrace_probe_ops traceon_probe_ops = {
  245. .func = ftrace_traceon,
  246. .print = ftrace_trace_onoff_print,
  247. };
  248. static struct ftrace_probe_ops traceoff_probe_ops = {
  249. .func = ftrace_traceoff,
  250. .print = ftrace_trace_onoff_print,
  251. };
  252. static int
  253. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  254. struct ftrace_probe_ops *ops, void *data)
  255. {
  256. long count = (long)data;
  257. seq_printf(m, "%ps:", (void *)ip);
  258. if (ops == &traceon_probe_ops)
  259. seq_printf(m, "traceon");
  260. else
  261. seq_printf(m, "traceoff");
  262. if (count == -1)
  263. seq_printf(m, ":unlimited\n");
  264. else
  265. seq_printf(m, ":count=%ld\n", count);
  266. return 0;
  267. }
  268. static int
  269. ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
  270. {
  271. struct ftrace_probe_ops *ops;
  272. /* we register both traceon and traceoff to this callback */
  273. if (strcmp(cmd, "traceon") == 0)
  274. ops = &traceon_probe_ops;
  275. else
  276. ops = &traceoff_probe_ops;
  277. unregister_ftrace_function_probe_func(glob, ops);
  278. return 0;
  279. }
  280. static int
  281. ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  282. char *glob, char *cmd, char *param, int enable)
  283. {
  284. struct ftrace_probe_ops *ops;
  285. void *count = (void *)-1;
  286. char *number;
  287. int ret;
  288. /* hash funcs only work with set_ftrace_filter */
  289. if (!enable)
  290. return -EINVAL;
  291. if (glob[0] == '!')
  292. return ftrace_trace_onoff_unreg(glob+1, cmd, param);
  293. /* we register both traceon and traceoff to this callback */
  294. if (strcmp(cmd, "traceon") == 0)
  295. ops = &traceon_probe_ops;
  296. else
  297. ops = &traceoff_probe_ops;
  298. if (!param)
  299. goto out_reg;
  300. number = strsep(&param, ":");
  301. if (!strlen(number))
  302. goto out_reg;
  303. /*
  304. * We use the callback data field (which is a pointer)
  305. * as our counter.
  306. */
  307. ret = strict_strtoul(number, 0, (unsigned long *)&count);
  308. if (ret)
  309. return ret;
  310. out_reg:
  311. ret = register_ftrace_function_probe(glob, ops, count);
  312. return ret < 0 ? ret : 0;
  313. }
  314. static struct ftrace_func_command ftrace_traceon_cmd = {
  315. .name = "traceon",
  316. .func = ftrace_trace_onoff_callback,
  317. };
  318. static struct ftrace_func_command ftrace_traceoff_cmd = {
  319. .name = "traceoff",
  320. .func = ftrace_trace_onoff_callback,
  321. };
  322. static int __init init_func_cmd_traceon(void)
  323. {
  324. int ret;
  325. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  326. if (ret)
  327. return ret;
  328. ret = register_ftrace_command(&ftrace_traceon_cmd);
  329. if (ret)
  330. unregister_ftrace_command(&ftrace_traceoff_cmd);
  331. return ret;
  332. }
  333. #else
  334. static inline int init_func_cmd_traceon(void)
  335. {
  336. return 0;
  337. }
  338. #endif /* CONFIG_DYNAMIC_FTRACE */
  339. static __init int init_function_trace(void)
  340. {
  341. init_func_cmd_traceon();
  342. return register_tracer(&function_trace);
  343. }
  344. device_initcall(init_function_trace);