trace_functions.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. /*
  2. * ring buffer based function tracer
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/ring_buffer.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/ftrace.h>
  16. #include <linux/pstore.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. /* function tracing enabled */
  20. static int ftrace_function_enabled;
  21. static struct trace_array *func_trace;
  22. static void tracing_start_function_trace(void);
  23. static void tracing_stop_function_trace(void);
  24. static int function_trace_init(struct trace_array *tr)
  25. {
  26. func_trace = tr;
  27. tr->cpu = get_cpu();
  28. put_cpu();
  29. tracing_start_cmdline_record();
  30. tracing_start_function_trace();
  31. return 0;
  32. }
  33. static void function_trace_reset(struct trace_array *tr)
  34. {
  35. tracing_stop_function_trace();
  36. tracing_stop_cmdline_record();
  37. }
  38. static void function_trace_start(struct trace_array *tr)
  39. {
  40. tracing_reset_online_cpus(tr);
  41. }
  42. static void
  43. function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
  44. struct ftrace_ops *op, struct pt_regs *pt_regs)
  45. {
  46. struct trace_array *tr = func_trace;
  47. struct trace_array_cpu *data;
  48. unsigned long flags;
  49. long disabled;
  50. int cpu;
  51. int pc;
  52. if (unlikely(!ftrace_function_enabled))
  53. return;
  54. pc = preempt_count();
  55. preempt_disable_notrace();
  56. local_save_flags(flags);
  57. cpu = raw_smp_processor_id();
  58. data = tr->data[cpu];
  59. disabled = atomic_inc_return(&data->disabled);
  60. if (likely(disabled == 1))
  61. trace_function(tr, ip, parent_ip, flags, pc);
  62. atomic_dec(&data->disabled);
  63. preempt_enable_notrace();
  64. }
  65. /* Our two options */
  66. enum {
  67. TRACE_FUNC_OPT_STACK = 0x1,
  68. TRACE_FUNC_OPT_PSTORE = 0x2,
  69. };
  70. static struct tracer_flags func_flags;
  71. static void
  72. function_trace_call(unsigned long ip, unsigned long parent_ip,
  73. struct ftrace_ops *op, struct pt_regs *pt_regs)
  74. {
  75. struct trace_array *tr = func_trace;
  76. struct trace_array_cpu *data;
  77. unsigned long flags;
  78. long disabled;
  79. int cpu;
  80. int pc;
  81. if (unlikely(!ftrace_function_enabled))
  82. return;
  83. /*
  84. * Need to use raw, since this must be called before the
  85. * recursive protection is performed.
  86. */
  87. local_irq_save(flags);
  88. cpu = raw_smp_processor_id();
  89. data = tr->data[cpu];
  90. disabled = atomic_inc_return(&data->disabled);
  91. if (likely(disabled == 1)) {
  92. /*
  93. * So far tracing doesn't support multiple buffers, so
  94. * we make an explicit call for now.
  95. */
  96. if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
  97. pstore_ftrace_call(ip, parent_ip);
  98. pc = preempt_count();
  99. trace_function(tr, ip, parent_ip, flags, pc);
  100. }
  101. atomic_dec(&data->disabled);
  102. local_irq_restore(flags);
  103. }
  104. static void
  105. function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
  106. struct ftrace_ops *op, struct pt_regs *pt_regs)
  107. {
  108. struct trace_array *tr = func_trace;
  109. struct trace_array_cpu *data;
  110. unsigned long flags;
  111. long disabled;
  112. int cpu;
  113. int pc;
  114. if (unlikely(!ftrace_function_enabled))
  115. return;
  116. /*
  117. * Need to use raw, since this must be called before the
  118. * recursive protection is performed.
  119. */
  120. local_irq_save(flags);
  121. cpu = raw_smp_processor_id();
  122. data = tr->data[cpu];
  123. disabled = atomic_inc_return(&data->disabled);
  124. if (likely(disabled == 1)) {
  125. pc = preempt_count();
  126. trace_function(tr, ip, parent_ip, flags, pc);
  127. /*
  128. * skip over 5 funcs:
  129. * __ftrace_trace_stack,
  130. * __trace_stack,
  131. * function_stack_trace_call
  132. * ftrace_list_func
  133. * ftrace_call
  134. */
  135. __trace_stack(tr, flags, 5, pc);
  136. }
  137. atomic_dec(&data->disabled);
  138. local_irq_restore(flags);
  139. }
  140. static struct ftrace_ops trace_ops __read_mostly =
  141. {
  142. .func = function_trace_call,
  143. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  144. };
  145. static struct ftrace_ops trace_stack_ops __read_mostly =
  146. {
  147. .func = function_stack_trace_call,
  148. .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
  149. };
  150. static struct tracer_opt func_opts[] = {
  151. #ifdef CONFIG_STACKTRACE
  152. { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
  153. #endif
  154. #ifdef CONFIG_PSTORE_FTRACE
  155. { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
  156. #endif
  157. { } /* Always set a last empty entry */
  158. };
  159. static struct tracer_flags func_flags = {
  160. .val = 0, /* By default: all flags disabled */
  161. .opts = func_opts
  162. };
  163. static void tracing_start_function_trace(void)
  164. {
  165. ftrace_function_enabled = 0;
  166. if (trace_flags & TRACE_ITER_PREEMPTONLY)
  167. trace_ops.func = function_trace_call_preempt_only;
  168. else
  169. trace_ops.func = function_trace_call;
  170. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  171. register_ftrace_function(&trace_stack_ops);
  172. else
  173. register_ftrace_function(&trace_ops);
  174. ftrace_function_enabled = 1;
  175. }
  176. static void tracing_stop_function_trace(void)
  177. {
  178. ftrace_function_enabled = 0;
  179. if (func_flags.val & TRACE_FUNC_OPT_STACK)
  180. unregister_ftrace_function(&trace_stack_ops);
  181. else
  182. unregister_ftrace_function(&trace_ops);
  183. }
  184. static int func_set_flag(u32 old_flags, u32 bit, int set)
  185. {
  186. switch (bit) {
  187. case TRACE_FUNC_OPT_STACK:
  188. /* do nothing if already set */
  189. if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
  190. break;
  191. if (set) {
  192. unregister_ftrace_function(&trace_ops);
  193. register_ftrace_function(&trace_stack_ops);
  194. } else {
  195. unregister_ftrace_function(&trace_stack_ops);
  196. register_ftrace_function(&trace_ops);
  197. }
  198. break;
  199. case TRACE_FUNC_OPT_PSTORE:
  200. break;
  201. default:
  202. return -EINVAL;
  203. }
  204. return 0;
  205. }
  206. static struct tracer function_trace __read_mostly =
  207. {
  208. .name = "function",
  209. .init = function_trace_init,
  210. .reset = function_trace_reset,
  211. .start = function_trace_start,
  212. .wait_pipe = poll_wait_pipe,
  213. .flags = &func_flags,
  214. .set_flag = func_set_flag,
  215. #ifdef CONFIG_FTRACE_SELFTEST
  216. .selftest = trace_selftest_startup_function,
  217. #endif
  218. };
  219. #ifdef CONFIG_DYNAMIC_FTRACE
  220. static void
  221. ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
  222. {
  223. long *count = (long *)data;
  224. if (tracing_is_on())
  225. return;
  226. if (!*count)
  227. return;
  228. if (*count != -1)
  229. (*count)--;
  230. tracing_on();
  231. }
  232. static void
  233. ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
  234. {
  235. long *count = (long *)data;
  236. if (!tracing_is_on())
  237. return;
  238. if (!*count)
  239. return;
  240. if (*count != -1)
  241. (*count)--;
  242. tracing_off();
  243. }
  244. static int
  245. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  246. struct ftrace_probe_ops *ops, void *data);
  247. static struct ftrace_probe_ops traceon_probe_ops = {
  248. .func = ftrace_traceon,
  249. .print = ftrace_trace_onoff_print,
  250. };
  251. static struct ftrace_probe_ops traceoff_probe_ops = {
  252. .func = ftrace_traceoff,
  253. .print = ftrace_trace_onoff_print,
  254. };
  255. static int
  256. ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
  257. struct ftrace_probe_ops *ops, void *data)
  258. {
  259. long count = (long)data;
  260. seq_printf(m, "%ps:", (void *)ip);
  261. if (ops == &traceon_probe_ops)
  262. seq_printf(m, "traceon");
  263. else
  264. seq_printf(m, "traceoff");
  265. if (count == -1)
  266. seq_printf(m, ":unlimited\n");
  267. else
  268. seq_printf(m, ":count=%ld\n", count);
  269. return 0;
  270. }
  271. static int
  272. ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param)
  273. {
  274. struct ftrace_probe_ops *ops;
  275. /* we register both traceon and traceoff to this callback */
  276. if (strcmp(cmd, "traceon") == 0)
  277. ops = &traceon_probe_ops;
  278. else
  279. ops = &traceoff_probe_ops;
  280. unregister_ftrace_function_probe_func(glob, ops);
  281. return 0;
  282. }
  283. static int
  284. ftrace_trace_onoff_callback(struct ftrace_hash *hash,
  285. char *glob, char *cmd, char *param, int enable)
  286. {
  287. struct ftrace_probe_ops *ops;
  288. void *count = (void *)-1;
  289. char *number;
  290. int ret;
  291. /* hash funcs only work with set_ftrace_filter */
  292. if (!enable)
  293. return -EINVAL;
  294. if (glob[0] == '!')
  295. return ftrace_trace_onoff_unreg(glob+1, cmd, param);
  296. /* we register both traceon and traceoff to this callback */
  297. if (strcmp(cmd, "traceon") == 0)
  298. ops = &traceon_probe_ops;
  299. else
  300. ops = &traceoff_probe_ops;
  301. if (!param)
  302. goto out_reg;
  303. number = strsep(&param, ":");
  304. if (!strlen(number))
  305. goto out_reg;
  306. /*
  307. * We use the callback data field (which is a pointer)
  308. * as our counter.
  309. */
  310. ret = strict_strtoul(number, 0, (unsigned long *)&count);
  311. if (ret)
  312. return ret;
  313. out_reg:
  314. ret = register_ftrace_function_probe(glob, ops, count);
  315. return ret < 0 ? ret : 0;
  316. }
  317. static struct ftrace_func_command ftrace_traceon_cmd = {
  318. .name = "traceon",
  319. .func = ftrace_trace_onoff_callback,
  320. };
  321. static struct ftrace_func_command ftrace_traceoff_cmd = {
  322. .name = "traceoff",
  323. .func = ftrace_trace_onoff_callback,
  324. };
  325. static int __init init_func_cmd_traceon(void)
  326. {
  327. int ret;
  328. ret = register_ftrace_command(&ftrace_traceoff_cmd);
  329. if (ret)
  330. return ret;
  331. ret = register_ftrace_command(&ftrace_traceon_cmd);
  332. if (ret)
  333. unregister_ftrace_command(&ftrace_traceoff_cmd);
  334. return ret;
  335. }
  336. #else
  337. static inline int init_func_cmd_traceon(void)
  338. {
  339. return 0;
  340. }
  341. #endif /* CONFIG_DYNAMIC_FTRACE */
  342. static __init int init_function_trace(void)
  343. {
  344. init_func_cmd_traceon();
  345. return register_tracer(&function_trace);
  346. }
  347. device_initcall(init_function_trace);