trace_boot.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. /*
  2. * ring buffer based initcalls tracer
  3. *
  4. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. *
  6. */
  7. #include <linux/init.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/ftrace.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/time.h>
  12. #include "trace.h"
  13. #include "trace_output.h"
  14. static struct trace_array *boot_trace;
  15. static bool pre_initcalls_finished;
  16. /* Tells the boot tracer that the pre_smp_initcalls are finished.
  17. * So we are ready .
  18. * It doesn't enable sched events tracing however.
  19. * You have to call enable_boot_trace to do so.
  20. */
  21. void start_boot_trace(void)
  22. {
  23. pre_initcalls_finished = true;
  24. }
  25. void enable_boot_trace(void)
  26. {
  27. if (boot_trace && pre_initcalls_finished)
  28. tracing_start_sched_switch_record();
  29. }
  30. void disable_boot_trace(void)
  31. {
  32. if (boot_trace && pre_initcalls_finished)
  33. tracing_stop_sched_switch_record();
  34. }
  35. static int boot_trace_init(struct trace_array *tr)
  36. {
  37. int cpu;
  38. boot_trace = tr;
  39. if (!tr)
  40. return 0;
  41. for_each_cpu(cpu, cpu_possible_mask)
  42. tracing_reset(tr, cpu);
  43. tracing_sched_switch_assign_trace(tr);
  44. return 0;
  45. }
  46. static enum print_line_t
  47. initcall_call_print_line(struct trace_iterator *iter)
  48. {
  49. struct trace_entry *entry = iter->ent;
  50. struct trace_seq *s = &iter->seq;
  51. struct trace_boot_call *field;
  52. struct boot_trace_call *call;
  53. u64 ts;
  54. unsigned long nsec_rem;
  55. int ret;
  56. trace_assign_type(field, entry);
  57. call = &field->boot_call;
  58. ts = iter->ts;
  59. nsec_rem = do_div(ts, NSEC_PER_SEC);
  60. ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
  61. (unsigned long)ts, nsec_rem, call->func, call->caller);
  62. if (!ret)
  63. return TRACE_TYPE_PARTIAL_LINE;
  64. else
  65. return TRACE_TYPE_HANDLED;
  66. }
  67. static enum print_line_t
  68. initcall_ret_print_line(struct trace_iterator *iter)
  69. {
  70. struct trace_entry *entry = iter->ent;
  71. struct trace_seq *s = &iter->seq;
  72. struct trace_boot_ret *field;
  73. struct boot_trace_ret *init_ret;
  74. u64 ts;
  75. unsigned long nsec_rem;
  76. int ret;
  77. trace_assign_type(field, entry);
  78. init_ret = &field->boot_ret;
  79. ts = iter->ts;
  80. nsec_rem = do_div(ts, NSEC_PER_SEC);
  81. ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
  82. "returned %d after %llu msecs\n",
  83. (unsigned long) ts,
  84. nsec_rem,
  85. init_ret->func, init_ret->result, init_ret->duration);
  86. if (!ret)
  87. return TRACE_TYPE_PARTIAL_LINE;
  88. else
  89. return TRACE_TYPE_HANDLED;
  90. }
  91. static enum print_line_t initcall_print_line(struct trace_iterator *iter)
  92. {
  93. struct trace_entry *entry = iter->ent;
  94. switch (entry->type) {
  95. case TRACE_BOOT_CALL:
  96. return initcall_call_print_line(iter);
  97. case TRACE_BOOT_RET:
  98. return initcall_ret_print_line(iter);
  99. default:
  100. return TRACE_TYPE_UNHANDLED;
  101. }
  102. }
  103. struct tracer boot_tracer __read_mostly =
  104. {
  105. .name = "initcall",
  106. .init = boot_trace_init,
  107. .reset = tracing_reset_online_cpus,
  108. .print_line = initcall_print_line,
  109. };
  110. void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
  111. {
  112. struct ring_buffer_event *event;
  113. struct trace_boot_call *entry;
  114. struct trace_array *tr = boot_trace;
  115. if (!tr || !pre_initcalls_finished)
  116. return;
  117. /* Get its name now since this function could
  118. * disappear because it is in the .init section.
  119. */
  120. sprint_symbol(bt->func, (unsigned long)fn);
  121. preempt_disable();
  122. event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
  123. sizeof(*entry), 0, 0);
  124. if (!event)
  125. goto out;
  126. entry = ring_buffer_event_data(event);
  127. entry->boot_call = *bt;
  128. trace_buffer_unlock_commit(tr, event, 0, 0);
  129. out:
  130. preempt_enable();
  131. }
  132. void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
  133. {
  134. struct ring_buffer_event *event;
  135. struct trace_boot_ret *entry;
  136. struct trace_array *tr = boot_trace;
  137. if (!tr || !pre_initcalls_finished)
  138. return;
  139. sprint_symbol(bt->func, (unsigned long)fn);
  140. preempt_disable();
  141. event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
  142. sizeof(*entry), 0, 0);
  143. if (!event)
  144. goto out;
  145. entry = ring_buffer_event_data(event);
  146. entry->boot_ret = *bt;
  147. trace_buffer_unlock_commit(tr, event, 0, 0);
  148. out:
  149. preempt_enable();
  150. }