trace_boot.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. /*
  2. * ring buffer based initcalls tracer
  3. *
  4. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. *
  6. */
  7. #include <linux/init.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/ftrace.h>
  10. #include <linux/kallsyms.h>
  11. #include "trace.h"
  12. #include "trace_output.h"
  13. static struct trace_array *boot_trace;
  14. static bool pre_initcalls_finished;
  15. /* Tells the boot tracer that the pre_smp_initcalls are finished.
  16. * So we are ready .
  17. * It doesn't enable sched events tracing however.
  18. * You have to call enable_boot_trace to do so.
  19. */
  20. void start_boot_trace(void)
  21. {
  22. pre_initcalls_finished = true;
  23. }
  24. void enable_boot_trace(void)
  25. {
  26. if (boot_trace && pre_initcalls_finished)
  27. tracing_start_sched_switch_record();
  28. }
  29. void disable_boot_trace(void)
  30. {
  31. if (boot_trace && pre_initcalls_finished)
  32. tracing_stop_sched_switch_record();
  33. }
  34. static int boot_trace_init(struct trace_array *tr)
  35. {
  36. int cpu;
  37. boot_trace = tr;
  38. if (!tr)
  39. return 0;
  40. for_each_cpu(cpu, cpu_possible_mask)
  41. tracing_reset(tr, cpu);
  42. tracing_sched_switch_assign_trace(tr);
  43. return 0;
  44. }
  45. static enum print_line_t
  46. initcall_call_print_line(struct trace_iterator *iter)
  47. {
  48. struct trace_entry *entry = iter->ent;
  49. struct trace_seq *s = &iter->seq;
  50. struct trace_boot_call *field;
  51. struct boot_trace_call *call;
  52. u64 ts;
  53. unsigned long nsec_rem;
  54. int ret;
  55. trace_assign_type(field, entry);
  56. call = &field->boot_call;
  57. ts = iter->ts;
  58. nsec_rem = do_div(ts, 1000000000);
  59. ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
  60. (unsigned long)ts, nsec_rem, call->func, call->caller);
  61. if (!ret)
  62. return TRACE_TYPE_PARTIAL_LINE;
  63. else
  64. return TRACE_TYPE_HANDLED;
  65. }
  66. static enum print_line_t
  67. initcall_ret_print_line(struct trace_iterator *iter)
  68. {
  69. struct trace_entry *entry = iter->ent;
  70. struct trace_seq *s = &iter->seq;
  71. struct trace_boot_ret *field;
  72. struct boot_trace_ret *init_ret;
  73. u64 ts;
  74. unsigned long nsec_rem;
  75. int ret;
  76. trace_assign_type(field, entry);
  77. init_ret = &field->boot_ret;
  78. ts = iter->ts;
  79. nsec_rem = do_div(ts, 1000000000);
  80. ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
  81. "returned %d after %llu msecs\n",
  82. (unsigned long) ts,
  83. nsec_rem,
  84. init_ret->func, init_ret->result, init_ret->duration);
  85. if (!ret)
  86. return TRACE_TYPE_PARTIAL_LINE;
  87. else
  88. return TRACE_TYPE_HANDLED;
  89. }
  90. static enum print_line_t initcall_print_line(struct trace_iterator *iter)
  91. {
  92. struct trace_entry *entry = iter->ent;
  93. switch (entry->type) {
  94. case TRACE_BOOT_CALL:
  95. return initcall_call_print_line(iter);
  96. case TRACE_BOOT_RET:
  97. return initcall_ret_print_line(iter);
  98. default:
  99. return TRACE_TYPE_UNHANDLED;
  100. }
  101. }
  102. struct tracer boot_tracer __read_mostly =
  103. {
  104. .name = "initcall",
  105. .init = boot_trace_init,
  106. .reset = tracing_reset_online_cpus,
  107. .print_line = initcall_print_line,
  108. };
  109. void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
  110. {
  111. struct ring_buffer_event *event;
  112. struct trace_boot_call *entry;
  113. struct trace_array *tr = boot_trace;
  114. if (!tr || !pre_initcalls_finished)
  115. return;
  116. /* Get its name now since this function could
  117. * disappear because it is in the .init section.
  118. */
  119. sprint_symbol(bt->func, (unsigned long)fn);
  120. preempt_disable();
  121. event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL,
  122. sizeof(*entry), 0, 0);
  123. if (!event)
  124. goto out;
  125. entry = ring_buffer_event_data(event);
  126. entry->boot_call = *bt;
  127. trace_buffer_unlock_commit(tr, event, 0, 0);
  128. out:
  129. preempt_enable();
  130. }
  131. void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
  132. {
  133. struct ring_buffer_event *event;
  134. struct trace_boot_ret *entry;
  135. struct trace_array *tr = boot_trace;
  136. if (!tr || !pre_initcalls_finished)
  137. return;
  138. sprint_symbol(bt->func, (unsigned long)fn);
  139. preempt_disable();
  140. event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET,
  141. sizeof(*entry), 0, 0);
  142. if (!event)
  143. goto out;
  144. entry = ring_buffer_event_data(event);
  145. entry->boot_ret = *bt;
  146. trace_buffer_unlock_commit(tr, event, 0, 0);
  147. out:
  148. preempt_enable();
  149. }