trace_boot.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /*
  2. * ring buffer based initcalls tracer
  3. *
  4. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. *
  6. */
  7. #include <linux/init.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/ftrace.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/time.h>
  12. #include "trace.h"
  13. #include "trace_output.h"
  14. static struct trace_array *boot_trace;
  15. static bool pre_initcalls_finished;
  16. /* Tells the boot tracer that the pre_smp_initcalls are finished.
  17. * So we are ready .
  18. * It doesn't enable sched events tracing however.
  19. * You have to call enable_boot_trace to do so.
  20. */
  21. void start_boot_trace(void)
  22. {
  23. pre_initcalls_finished = true;
  24. }
  25. void enable_boot_trace(void)
  26. {
  27. if (boot_trace && pre_initcalls_finished)
  28. tracing_start_sched_switch_record();
  29. }
  30. void disable_boot_trace(void)
  31. {
  32. if (boot_trace && pre_initcalls_finished)
  33. tracing_stop_sched_switch_record();
  34. }
  35. static int boot_trace_init(struct trace_array *tr)
  36. {
  37. boot_trace = tr;
  38. if (!tr)
  39. return 0;
  40. tracing_reset_online_cpus(tr);
  41. tracing_sched_switch_assign_trace(tr);
  42. return 0;
  43. }
  44. static enum print_line_t
  45. initcall_call_print_line(struct trace_iterator *iter)
  46. {
  47. struct trace_entry *entry = iter->ent;
  48. struct trace_seq *s = &iter->seq;
  49. struct trace_boot_call *field;
  50. struct boot_trace_call *call;
  51. u64 ts;
  52. unsigned long nsec_rem;
  53. int ret;
  54. trace_assign_type(field, entry);
  55. call = &field->boot_call;
  56. ts = iter->ts;
  57. nsec_rem = do_div(ts, NSEC_PER_SEC);
  58. ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
  59. (unsigned long)ts, nsec_rem, call->func, call->caller);
  60. if (!ret)
  61. return TRACE_TYPE_PARTIAL_LINE;
  62. else
  63. return TRACE_TYPE_HANDLED;
  64. }
  65. static enum print_line_t
  66. initcall_ret_print_line(struct trace_iterator *iter)
  67. {
  68. struct trace_entry *entry = iter->ent;
  69. struct trace_seq *s = &iter->seq;
  70. struct trace_boot_ret *field;
  71. struct boot_trace_ret *init_ret;
  72. u64 ts;
  73. unsigned long nsec_rem;
  74. int ret;
  75. trace_assign_type(field, entry);
  76. init_ret = &field->boot_ret;
  77. ts = iter->ts;
  78. nsec_rem = do_div(ts, NSEC_PER_SEC);
  79. ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
  80. "returned %d after %llu msecs\n",
  81. (unsigned long) ts,
  82. nsec_rem,
  83. init_ret->func, init_ret->result, init_ret->duration);
  84. if (!ret)
  85. return TRACE_TYPE_PARTIAL_LINE;
  86. else
  87. return TRACE_TYPE_HANDLED;
  88. }
  89. static enum print_line_t initcall_print_line(struct trace_iterator *iter)
  90. {
  91. struct trace_entry *entry = iter->ent;
  92. switch (entry->type) {
  93. case TRACE_BOOT_CALL:
  94. return initcall_call_print_line(iter);
  95. case TRACE_BOOT_RET:
  96. return initcall_ret_print_line(iter);
  97. default:
  98. return TRACE_TYPE_UNHANDLED;
  99. }
  100. }
  101. struct tracer boot_tracer __read_mostly =
  102. {
  103. .name = "initcall",
  104. .init = boot_trace_init,
  105. .reset = tracing_reset_online_cpus,
  106. .print_line = initcall_print_line,
  107. };
  108. void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
  109. {
  110. struct ftrace_event_call *call = &event_boot_call;
  111. struct ring_buffer_event *event;
  112. struct ring_buffer *buffer;
  113. struct trace_boot_call *entry;
  114. struct trace_array *tr = boot_trace;
  115. if (!tr || !pre_initcalls_finished)
  116. return;
  117. /* Get its name now since this function could
  118. * disappear because it is in the .init section.
  119. */
  120. sprint_symbol(bt->func, (unsigned long)fn);
  121. preempt_disable();
  122. buffer = tr->buffer;
  123. event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL,
  124. sizeof(*entry), 0, 0);
  125. if (!event)
  126. goto out;
  127. entry = ring_buffer_event_data(event);
  128. entry->boot_call = *bt;
  129. if (!filter_check_discard(call, entry, buffer, event))
  130. trace_buffer_unlock_commit(buffer, event, 0, 0);
  131. out:
  132. preempt_enable();
  133. }
  134. void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
  135. {
  136. struct ftrace_event_call *call = &event_boot_ret;
  137. struct ring_buffer_event *event;
  138. struct ring_buffer *buffer;
  139. struct trace_boot_ret *entry;
  140. struct trace_array *tr = boot_trace;
  141. if (!tr || !pre_initcalls_finished)
  142. return;
  143. sprint_symbol(bt->func, (unsigned long)fn);
  144. preempt_disable();
  145. buffer = tr->buffer;
  146. event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET,
  147. sizeof(*entry), 0, 0);
  148. if (!event)
  149. goto out;
  150. entry = ring_buffer_event_data(event);
  151. entry->boot_ret = *bt;
  152. if (!filter_check_discard(call, entry, buffer, event))
  153. trace_buffer_unlock_commit(buffer, event, 0, 0);
  154. out:
  155. preempt_enable();
  156. }