trace_functions_graph.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/fs.h>
  13. #include "trace.h"
  14. #define TRACE_GRAPH_INDENT 2
  15. /* Flag options */
  16. #define TRACE_GRAPH_PRINT_OVERRUN 0x1
  17. #define TRACE_GRAPH_PRINT_CPU 0x2
  18. #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
  19. static struct tracer_opt trace_opts[] = {
  20. /* Display overruns ? */
  21. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  22. /* Display CPU ? */
  23. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  24. /* Display Overhead ? */
  25. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  26. { } /* Empty entry */
  27. };
  28. static struct tracer_flags tracer_flags = {
  29. /* Don't display overruns by default */
  30. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
  31. .opts = trace_opts
  32. };
  33. /* pid on the last trace processed */
  34. static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
  35. static int graph_trace_init(struct trace_array *tr)
  36. {
  37. int cpu, ret;
  38. for_each_online_cpu(cpu)
  39. tracing_reset(tr, cpu);
  40. ret = register_ftrace_graph(&trace_graph_return,
  41. &trace_graph_entry);
  42. if (ret)
  43. return ret;
  44. tracing_start_cmdline_record();
  45. return 0;
  46. }
  47. static void graph_trace_reset(struct trace_array *tr)
  48. {
  49. tracing_stop_cmdline_record();
  50. unregister_ftrace_graph();
  51. }
  52. static inline int log10_cpu(int nb)
  53. {
  54. if (nb / 100)
  55. return 3;
  56. if (nb / 10)
  57. return 2;
  58. return 1;
  59. }
  60. static enum print_line_t
  61. print_graph_cpu(struct trace_seq *s, int cpu)
  62. {
  63. int i;
  64. int ret;
  65. int log10_this = log10_cpu(cpu);
  66. int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
  67. /*
  68. * Start with a space character - to make it stand out
  69. * to the right a bit when trace output is pasted into
  70. * email:
  71. */
  72. ret = trace_seq_printf(s, " ");
  73. /*
  74. * Tricky - we space the CPU field according to the max
  75. * number of online CPUs. On a 2-cpu system it would take
  76. * a maximum of 1 digit - on a 128 cpu system it would
  77. * take up to 3 digits:
  78. */
  79. for (i = 0; i < log10_all - log10_this; i++) {
  80. ret = trace_seq_printf(s, " ");
  81. if (!ret)
  82. return TRACE_TYPE_PARTIAL_LINE;
  83. }
  84. ret = trace_seq_printf(s, "%d) ", cpu);
  85. if (!ret)
  86. return TRACE_TYPE_PARTIAL_LINE;
  87. return TRACE_TYPE_HANDLED;
  88. }
  89. /* If the pid changed since the last trace, output this event */
  90. static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
  91. {
  92. char *comm, *prev_comm;
  93. pid_t prev_pid;
  94. int ret;
  95. if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
  96. return 1;
  97. prev_pid = last_pid[cpu];
  98. last_pid[cpu] = pid;
  99. comm = trace_find_cmdline(pid);
  100. prev_comm = trace_find_cmdline(prev_pid);
  101. /*
  102. * Context-switch trace line:
  103. ------------------------------------------
  104. | 1) migration/0--1 => sshd-1755
  105. ------------------------------------------
  106. */
  107. ret = trace_seq_printf(s,
  108. " ------------------------------------------\n");
  109. ret += trace_seq_printf(s, " | %d) %s-%d => %s-%d\n",
  110. cpu, prev_comm, prev_pid, comm, pid);
  111. ret += trace_seq_printf(s,
  112. " ------------------------------------------\n\n");
  113. return ret;
  114. }
  115. static bool
  116. trace_branch_is_leaf(struct trace_iterator *iter,
  117. struct ftrace_graph_ent_entry *curr)
  118. {
  119. struct ring_buffer_iter *ring_iter;
  120. struct ring_buffer_event *event;
  121. struct ftrace_graph_ret_entry *next;
  122. ring_iter = iter->buffer_iter[iter->cpu];
  123. if (!ring_iter)
  124. return false;
  125. event = ring_buffer_iter_peek(ring_iter, NULL);
  126. if (!event)
  127. return false;
  128. next = ring_buffer_event_data(event);
  129. if (next->ent.type != TRACE_GRAPH_RET)
  130. return false;
  131. if (curr->ent.pid != next->ent.pid ||
  132. curr->graph_ent.func != next->ret.func)
  133. return false;
  134. return true;
  135. }
  136. static inline int
  137. print_graph_duration(unsigned long long duration, struct trace_seq *s)
  138. {
  139. unsigned long nsecs_rem = do_div(duration, 1000);
  140. return trace_seq_printf(s, "%4llu.%3lu us | ", duration, nsecs_rem);
  141. }
  142. /* Signal a overhead of time execution to the output */
  143. static int
  144. print_graph_overhead(unsigned long long duration, struct trace_seq *s)
  145. {
  146. /* Duration exceeded 100 msecs */
  147. if (duration > 100000ULL)
  148. return trace_seq_printf(s, "! ");
  149. /* Duration exceeded 10 msecs */
  150. if (duration > 10000ULL)
  151. return trace_seq_printf(s, "+ ");
  152. return trace_seq_printf(s, " ");
  153. }
  154. /* Case of a leaf function on its call entry */
  155. static enum print_line_t
  156. print_graph_entry_leaf(struct trace_iterator *iter,
  157. struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
  158. {
  159. struct ftrace_graph_ret_entry *ret_entry;
  160. struct ftrace_graph_ret *graph_ret;
  161. struct ring_buffer_event *event;
  162. struct ftrace_graph_ent *call;
  163. unsigned long long duration;
  164. int ret;
  165. int i;
  166. event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
  167. ret_entry = ring_buffer_event_data(event);
  168. graph_ret = &ret_entry->ret;
  169. call = &entry->graph_ent;
  170. duration = graph_ret->rettime - graph_ret->calltime;
  171. /* Must not exceed 8 characters: 9999.999 us */
  172. if (duration > 10000000ULL)
  173. duration = 9999999ULL;
  174. /* Overhead */
  175. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
  176. ret = print_graph_overhead(duration, s);
  177. if (!ret)
  178. return TRACE_TYPE_PARTIAL_LINE;
  179. }
  180. /* Duration */
  181. ret = print_graph_duration(duration, s);
  182. if (!ret)
  183. return TRACE_TYPE_PARTIAL_LINE;
  184. /* Function */
  185. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  186. ret = trace_seq_printf(s, " ");
  187. if (!ret)
  188. return TRACE_TYPE_PARTIAL_LINE;
  189. }
  190. ret = seq_print_ip_sym(s, call->func, 0);
  191. if (!ret)
  192. return TRACE_TYPE_PARTIAL_LINE;
  193. ret = trace_seq_printf(s, "();\n");
  194. if (!ret)
  195. return TRACE_TYPE_PARTIAL_LINE;
  196. return TRACE_TYPE_HANDLED;
  197. }
  198. static enum print_line_t
  199. print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
  200. struct trace_seq *s)
  201. {
  202. int i;
  203. int ret;
  204. struct ftrace_graph_ent *call = &entry->graph_ent;
  205. /* No overhead */
  206. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
  207. ret = trace_seq_printf(s, " ");
  208. if (!ret)
  209. return TRACE_TYPE_PARTIAL_LINE;
  210. }
  211. /* No time */
  212. ret = trace_seq_printf(s, " | ");
  213. /* Function */
  214. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  215. ret = trace_seq_printf(s, " ");
  216. if (!ret)
  217. return TRACE_TYPE_PARTIAL_LINE;
  218. }
  219. ret = seq_print_ip_sym(s, call->func, 0);
  220. if (!ret)
  221. return TRACE_TYPE_PARTIAL_LINE;
  222. ret = trace_seq_printf(s, "() {\n");
  223. if (!ret)
  224. return TRACE_TYPE_PARTIAL_LINE;
  225. return TRACE_TYPE_HANDLED;
  226. }
  227. static enum print_line_t
  228. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  229. struct trace_iterator *iter, int cpu)
  230. {
  231. int ret;
  232. struct trace_entry *ent = iter->ent;
  233. /* Pid */
  234. if (!verif_pid(s, ent->pid, cpu))
  235. return TRACE_TYPE_PARTIAL_LINE;
  236. /* Cpu */
  237. if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
  238. ret = print_graph_cpu(s, cpu);
  239. if (!ret)
  240. return TRACE_TYPE_PARTIAL_LINE;
  241. }
  242. if (trace_branch_is_leaf(iter, field))
  243. return print_graph_entry_leaf(iter, field, s);
  244. else
  245. return print_graph_entry_nested(field, s);
  246. }
  247. static enum print_line_t
  248. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  249. struct trace_entry *ent, int cpu)
  250. {
  251. int i;
  252. int ret;
  253. unsigned long long duration = trace->rettime - trace->calltime;
  254. /* Must not exceed 8 characters: xxxx.yyy us */
  255. if (duration > 10000000ULL)
  256. duration = 9999999ULL;
  257. /* Pid */
  258. if (!verif_pid(s, ent->pid, cpu))
  259. return TRACE_TYPE_PARTIAL_LINE;
  260. /* Cpu */
  261. if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
  262. ret = print_graph_cpu(s, cpu);
  263. if (!ret)
  264. return TRACE_TYPE_PARTIAL_LINE;
  265. }
  266. /* Overhead */
  267. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
  268. ret = print_graph_overhead(duration, s);
  269. if (!ret)
  270. return TRACE_TYPE_PARTIAL_LINE;
  271. }
  272. /* Duration */
  273. ret = print_graph_duration(duration, s);
  274. if (!ret)
  275. return TRACE_TYPE_PARTIAL_LINE;
  276. /* Closing brace */
  277. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  278. ret = trace_seq_printf(s, " ");
  279. if (!ret)
  280. return TRACE_TYPE_PARTIAL_LINE;
  281. }
  282. ret = trace_seq_printf(s, "}\n");
  283. if (!ret)
  284. return TRACE_TYPE_PARTIAL_LINE;
  285. /* Overrun */
  286. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
  287. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  288. trace->overrun);
  289. if (!ret)
  290. return TRACE_TYPE_PARTIAL_LINE;
  291. }
  292. return TRACE_TYPE_HANDLED;
  293. }
  294. enum print_line_t
  295. print_graph_function(struct trace_iterator *iter)
  296. {
  297. struct trace_seq *s = &iter->seq;
  298. struct trace_entry *entry = iter->ent;
  299. switch (entry->type) {
  300. case TRACE_GRAPH_ENT: {
  301. struct ftrace_graph_ent_entry *field;
  302. trace_assign_type(field, entry);
  303. return print_graph_entry(field, s, iter,
  304. iter->cpu);
  305. }
  306. case TRACE_GRAPH_RET: {
  307. struct ftrace_graph_ret_entry *field;
  308. trace_assign_type(field, entry);
  309. return print_graph_return(&field->ret, s, entry, iter->cpu);
  310. }
  311. default:
  312. return TRACE_TYPE_UNHANDLED;
  313. }
  314. }
  315. static struct tracer graph_trace __read_mostly = {
  316. .name = "function_graph",
  317. .init = graph_trace_init,
  318. .reset = graph_trace_reset,
  319. .print_line = print_graph_function,
  320. .flags = &tracer_flags,
  321. };
  322. static __init int init_graph_trace(void)
  323. {
  324. return register_tracer(&graph_trace);
  325. }
  326. device_initcall(init_graph_trace);