trace_functions_graph.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/fs.h>
  13. #include "trace.h"
  14. #define TRACE_GRAPH_INDENT 2
  15. /* Flag options */
  16. #define TRACE_GRAPH_PRINT_OVERRUN 0x1
  17. #define TRACE_GRAPH_PRINT_CPU 0x2
  18. #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
  19. static struct tracer_opt trace_opts[] = {
  20. /* Display overruns ? */
  21. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  22. /* Display CPU ? */
  23. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  24. /* Display Overhead ? */
  25. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  26. { } /* Empty entry */
  27. };
  28. static struct tracer_flags tracer_flags = {
  29. /* Don't display overruns by default */
  30. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
  31. .opts = trace_opts
  32. };
  33. /* pid on the last trace processed */
  34. static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
  35. static int graph_trace_init(struct trace_array *tr)
  36. {
  37. int cpu, ret;
  38. for_each_online_cpu(cpu)
  39. tracing_reset(tr, cpu);
  40. ret = register_ftrace_graph(&trace_graph_return,
  41. &trace_graph_entry);
  42. if (ret)
  43. return ret;
  44. tracing_start_cmdline_record();
  45. return 0;
  46. }
  47. static void graph_trace_reset(struct trace_array *tr)
  48. {
  49. tracing_stop_cmdline_record();
  50. unregister_ftrace_graph();
  51. }
  52. static inline int log10_cpu(int nb)
  53. {
  54. if (nb / 100)
  55. return 3;
  56. if (nb / 10)
  57. return 2;
  58. return 1;
  59. }
  60. static enum print_line_t
  61. print_graph_cpu(struct trace_seq *s, int cpu)
  62. {
  63. int i;
  64. int ret;
  65. int log10_this = log10_cpu(cpu);
  66. int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
  67. for (i = 0; i < log10_all - log10_this; i++) {
  68. ret = trace_seq_printf(s, " ");
  69. if (!ret)
  70. return TRACE_TYPE_PARTIAL_LINE;
  71. }
  72. ret = trace_seq_printf(s, "%d) ", cpu);
  73. if (!ret)
  74. return TRACE_TYPE_PARTIAL_LINE;
  75. return TRACE_TYPE_HANDLED;
  76. }
  77. /* If the pid changed since the last trace, output this event */
  78. static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
  79. {
  80. char *comm;
  81. if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
  82. return 1;
  83. last_pid[cpu] = pid;
  84. comm = trace_find_cmdline(pid);
  85. return trace_seq_printf(s, "\n------------8<---------- thread %s-%d"
  86. " ------------8<----------\n\n",
  87. cpu, comm, pid);
  88. }
  89. static bool
  90. trace_branch_is_leaf(struct trace_iterator *iter,
  91. struct ftrace_graph_ent_entry *curr)
  92. {
  93. struct ring_buffer_iter *ring_iter;
  94. struct ring_buffer_event *event;
  95. struct ftrace_graph_ret_entry *next;
  96. ring_iter = iter->buffer_iter[iter->cpu];
  97. if (!ring_iter)
  98. return false;
  99. event = ring_buffer_iter_peek(ring_iter, NULL);
  100. if (!event)
  101. return false;
  102. next = ring_buffer_event_data(event);
  103. if (next->ent.type != TRACE_GRAPH_RET)
  104. return false;
  105. if (curr->ent.pid != next->ent.pid ||
  106. curr->graph_ent.func != next->ret.func)
  107. return false;
  108. return true;
  109. }
  110. static inline int
  111. print_graph_duration(unsigned long long duration, struct trace_seq *s)
  112. {
  113. unsigned long nsecs_rem = do_div(duration, 1000);
  114. return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem);
  115. }
  116. /* Signal a overhead of time execution to the output */
  117. static int
  118. print_graph_overhead(unsigned long long duration, struct trace_seq *s)
  119. {
  120. /* Duration exceeded 100 msecs */
  121. if (duration > 100000ULL)
  122. return trace_seq_printf(s, "! ");
  123. /* Duration exceeded 10 msecs */
  124. if (duration > 10000ULL)
  125. return trace_seq_printf(s, "+ ");
  126. return trace_seq_printf(s, " ");
  127. }
  128. /* Case of a leaf function on its call entry */
  129. static enum print_line_t
  130. print_graph_entry_leaf(struct trace_iterator *iter,
  131. struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
  132. {
  133. struct ftrace_graph_ret_entry *ret_entry;
  134. struct ftrace_graph_ret *graph_ret;
  135. struct ring_buffer_event *event;
  136. struct ftrace_graph_ent *call;
  137. unsigned long long duration;
  138. int ret;
  139. int i;
  140. event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
  141. ret_entry = ring_buffer_event_data(event);
  142. graph_ret = &ret_entry->ret;
  143. call = &entry->graph_ent;
  144. duration = graph_ret->rettime - graph_ret->calltime;
  145. /* Must not exceed 8 characters: 9999.999 us */
  146. if (duration > 10000000ULL)
  147. duration = 9999999ULL;
  148. /* Overhead */
  149. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
  150. ret = print_graph_overhead(duration, s);
  151. if (!ret)
  152. return TRACE_TYPE_PARTIAL_LINE;
  153. }
  154. /* Duration */
  155. ret = print_graph_duration(duration, s);
  156. if (!ret)
  157. return TRACE_TYPE_PARTIAL_LINE;
  158. /* Function */
  159. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  160. ret = trace_seq_printf(s, " ");
  161. if (!ret)
  162. return TRACE_TYPE_PARTIAL_LINE;
  163. }
  164. ret = seq_print_ip_sym(s, call->func, 0);
  165. if (!ret)
  166. return TRACE_TYPE_PARTIAL_LINE;
  167. ret = trace_seq_printf(s, "();\n");
  168. if (!ret)
  169. return TRACE_TYPE_PARTIAL_LINE;
  170. return TRACE_TYPE_HANDLED;
  171. }
  172. static enum print_line_t
  173. print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
  174. struct trace_seq *s)
  175. {
  176. int i;
  177. int ret;
  178. struct ftrace_graph_ent *call = &entry->graph_ent;
  179. /* No overhead */
  180. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
  181. ret = trace_seq_printf(s, " ");
  182. if (!ret)
  183. return TRACE_TYPE_PARTIAL_LINE;
  184. }
  185. /* No time */
  186. ret = trace_seq_printf(s, " | ");
  187. /* Function */
  188. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  189. ret = trace_seq_printf(s, " ");
  190. if (!ret)
  191. return TRACE_TYPE_PARTIAL_LINE;
  192. }
  193. ret = seq_print_ip_sym(s, call->func, 0);
  194. if (!ret)
  195. return TRACE_TYPE_PARTIAL_LINE;
  196. ret = trace_seq_printf(s, "() {\n");
  197. if (!ret)
  198. return TRACE_TYPE_PARTIAL_LINE;
  199. return TRACE_TYPE_HANDLED;
  200. }
  201. static enum print_line_t
  202. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  203. struct trace_iterator *iter, int cpu)
  204. {
  205. int ret;
  206. struct trace_entry *ent = iter->ent;
  207. /* Pid */
  208. if (!verif_pid(s, ent->pid, cpu))
  209. return TRACE_TYPE_PARTIAL_LINE;
  210. /* Cpu */
  211. if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
  212. ret = print_graph_cpu(s, cpu);
  213. if (!ret)
  214. return TRACE_TYPE_PARTIAL_LINE;
  215. }
  216. if (trace_branch_is_leaf(iter, field))
  217. return print_graph_entry_leaf(iter, field, s);
  218. else
  219. return print_graph_entry_nested(field, s);
  220. }
  221. static enum print_line_t
  222. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  223. struct trace_entry *ent, int cpu)
  224. {
  225. int i;
  226. int ret;
  227. unsigned long long duration = trace->rettime - trace->calltime;
  228. /* Must not exceed 8 characters: xxxx.yyy us */
  229. if (duration > 10000000ULL)
  230. duration = 9999999ULL;
  231. /* Pid */
  232. if (!verif_pid(s, ent->pid, cpu))
  233. return TRACE_TYPE_PARTIAL_LINE;
  234. /* Cpu */
  235. if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
  236. ret = print_graph_cpu(s, cpu);
  237. if (!ret)
  238. return TRACE_TYPE_PARTIAL_LINE;
  239. }
  240. /* Overhead */
  241. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
  242. ret = print_graph_overhead(duration, s);
  243. if (!ret)
  244. return TRACE_TYPE_PARTIAL_LINE;
  245. }
  246. /* Duration */
  247. ret = print_graph_duration(duration, s);
  248. if (!ret)
  249. return TRACE_TYPE_PARTIAL_LINE;
  250. /* Closing brace */
  251. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  252. ret = trace_seq_printf(s, " ");
  253. if (!ret)
  254. return TRACE_TYPE_PARTIAL_LINE;
  255. }
  256. ret = trace_seq_printf(s, "}\n");
  257. if (!ret)
  258. return TRACE_TYPE_PARTIAL_LINE;
  259. /* Overrun */
  260. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
  261. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  262. trace->overrun);
  263. if (!ret)
  264. return TRACE_TYPE_PARTIAL_LINE;
  265. }
  266. return TRACE_TYPE_HANDLED;
  267. }
  268. enum print_line_t
  269. print_graph_function(struct trace_iterator *iter)
  270. {
  271. struct trace_seq *s = &iter->seq;
  272. struct trace_entry *entry = iter->ent;
  273. switch (entry->type) {
  274. case TRACE_GRAPH_ENT: {
  275. struct ftrace_graph_ent_entry *field;
  276. trace_assign_type(field, entry);
  277. return print_graph_entry(field, s, iter,
  278. iter->cpu);
  279. }
  280. case TRACE_GRAPH_RET: {
  281. struct ftrace_graph_ret_entry *field;
  282. trace_assign_type(field, entry);
  283. return print_graph_return(&field->ret, s, entry, iter->cpu);
  284. }
  285. default:
  286. return TRACE_TYPE_UNHANDLED;
  287. }
  288. }
  289. static struct tracer graph_trace __read_mostly = {
  290. .name = "function_graph",
  291. .init = graph_trace_init,
  292. .reset = graph_trace_reset,
  293. .print_line = print_graph_function,
  294. .flags = &tracer_flags,
  295. };
  296. static __init int init_graph_trace(void)
  297. {
  298. return register_tracer(&graph_trace);
  299. }
  300. device_initcall(init_graph_trace);