trace_functions_graph.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/fs.h>
  13. #include "trace.h"
  14. #define TRACE_GRAPH_INDENT 2
  15. #define TRACE_GRAPH_PRINT_OVERRUN 0x1
  16. static struct tracer_opt trace_opts[] = {
  17. /* Display overruns or not */
  18. { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  19. { } /* Empty entry */
  20. };
  21. static struct tracer_flags tracer_flags = {
  22. .val = 0, /* Don't display overruns by default */
  23. .opts = trace_opts
  24. };
  25. /* pid on the last trace processed */
  26. static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
  27. static int graph_trace_init(struct trace_array *tr)
  28. {
  29. int cpu, ret;
  30. for_each_online_cpu(cpu)
  31. tracing_reset(tr, cpu);
  32. ret = register_ftrace_graph(&trace_graph_return,
  33. &trace_graph_entry);
  34. if (ret)
  35. return ret;
  36. tracing_start_cmdline_record();
  37. return 0;
  38. }
  39. static void graph_trace_reset(struct trace_array *tr)
  40. {
  41. tracing_stop_cmdline_record();
  42. unregister_ftrace_graph();
  43. }
  44. /* If the pid changed since the last trace, output this event */
  45. static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
  46. {
  47. char *comm;
  48. if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
  49. return 1;
  50. last_pid[cpu] = pid;
  51. comm = trace_find_cmdline(pid);
  52. return trace_seq_printf(s, "\nCPU[%03d]"
  53. " ------------8<---------- thread %s-%d"
  54. " ------------8<----------\n\n",
  55. cpu, comm, pid);
  56. }
  57. static enum print_line_t
  58. print_graph_entry(struct ftrace_graph_ent *call, struct trace_seq *s,
  59. struct trace_entry *ent, int cpu)
  60. {
  61. int i;
  62. int ret;
  63. if (!verif_pid(s, ent->pid, cpu))
  64. return TRACE_TYPE_PARTIAL_LINE;
  65. ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
  66. if (!ret)
  67. return TRACE_TYPE_PARTIAL_LINE;
  68. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  69. ret = trace_seq_printf(s, " ");
  70. if (!ret)
  71. return TRACE_TYPE_PARTIAL_LINE;
  72. }
  73. ret = seq_print_ip_sym(s, call->func, 0);
  74. if (!ret)
  75. return TRACE_TYPE_PARTIAL_LINE;
  76. ret = trace_seq_printf(s, "() {\n");
  77. if (!ret)
  78. return TRACE_TYPE_PARTIAL_LINE;
  79. return TRACE_TYPE_HANDLED;
  80. }
  81. static enum print_line_t
  82. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  83. struct trace_entry *ent, int cpu)
  84. {
  85. int i;
  86. int ret;
  87. if (!verif_pid(s, ent->pid, cpu))
  88. return TRACE_TYPE_PARTIAL_LINE;
  89. ret = trace_seq_printf(s, "CPU[%03d] ", cpu);
  90. if (!ret)
  91. return TRACE_TYPE_PARTIAL_LINE;
  92. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  93. ret = trace_seq_printf(s, " ");
  94. if (!ret)
  95. return TRACE_TYPE_PARTIAL_LINE;
  96. }
  97. ret = trace_seq_printf(s, "} ");
  98. if (!ret)
  99. return TRACE_TYPE_PARTIAL_LINE;
  100. ret = trace_seq_printf(s, "%llu\n", trace->rettime - trace->calltime);
  101. if (!ret)
  102. return TRACE_TYPE_PARTIAL_LINE;
  103. if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
  104. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  105. trace->overrun);
  106. if (!ret)
  107. return TRACE_TYPE_PARTIAL_LINE;
  108. }
  109. return TRACE_TYPE_HANDLED;
  110. }
  111. enum print_line_t
  112. print_graph_function(struct trace_iterator *iter)
  113. {
  114. struct trace_seq *s = &iter->seq;
  115. struct trace_entry *entry = iter->ent;
  116. switch (entry->type) {
  117. case TRACE_GRAPH_ENT: {
  118. struct ftrace_graph_ent_entry *field;
  119. trace_assign_type(field, entry);
  120. return print_graph_entry(&field->graph_ent, s, entry,
  121. iter->cpu);
  122. }
  123. case TRACE_GRAPH_RET: {
  124. struct ftrace_graph_ret_entry *field;
  125. trace_assign_type(field, entry);
  126. return print_graph_return(&field->ret, s, entry, iter->cpu);
  127. }
  128. default:
  129. return TRACE_TYPE_UNHANDLED;
  130. }
  131. }
  132. static struct tracer graph_trace __read_mostly = {
  133. .name = "function-graph",
  134. .init = graph_trace_init,
  135. .reset = graph_trace_reset,
  136. .print_line = print_graph_function,
  137. .flags = &tracer_flags,
  138. };
  139. static __init int init_graph_trace(void)
  140. {
  141. return register_tracer(&graph_trace);
  142. }
  143. device_initcall(init_graph_trace);