trace_boot.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /*
  2. * ring buffer based initcalls tracer
  3. *
  4. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. *
  6. */
  7. #include <linux/init.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/ftrace.h>
  10. #include <linux/kallsyms.h>
  11. #include "trace.h"
  12. static struct trace_array *boot_trace;
  13. static bool pre_initcalls_finished;
  14. /* Tells the boot tracer that the pre_smp_initcalls are finished.
  15. * So we are ready .
  16. * It doesn't enable sched events tracing however.
  17. * You have to call enable_boot_trace to do so.
  18. */
  19. void start_boot_trace(void)
  20. {
  21. pre_initcalls_finished = true;
  22. }
  23. void enable_boot_trace(void)
  24. {
  25. if (pre_initcalls_finished)
  26. tracing_start_sched_switch_record();
  27. }
  28. void disable_boot_trace(void)
  29. {
  30. if (pre_initcalls_finished)
  31. tracing_stop_sched_switch_record();
  32. }
  33. static int boot_trace_init(struct trace_array *tr)
  34. {
  35. int cpu;
  36. boot_trace = tr;
  37. for_each_cpu(cpu, cpu_possible_mask)
  38. tracing_reset(tr, cpu);
  39. tracing_sched_switch_assign_trace(tr);
  40. return 0;
  41. }
  42. static enum print_line_t
  43. initcall_call_print_line(struct trace_iterator *iter)
  44. {
  45. struct trace_entry *entry = iter->ent;
  46. struct trace_seq *s = &iter->seq;
  47. struct trace_boot_call *field;
  48. struct boot_trace_call *call;
  49. u64 ts;
  50. unsigned long nsec_rem;
  51. int ret;
  52. trace_assign_type(field, entry);
  53. call = &field->boot_call;
  54. ts = iter->ts;
  55. nsec_rem = do_div(ts, 1000000000);
  56. ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
  57. (unsigned long)ts, nsec_rem, call->func, call->caller);
  58. if (!ret)
  59. return TRACE_TYPE_PARTIAL_LINE;
  60. else
  61. return TRACE_TYPE_HANDLED;
  62. }
  63. static enum print_line_t
  64. initcall_ret_print_line(struct trace_iterator *iter)
  65. {
  66. struct trace_entry *entry = iter->ent;
  67. struct trace_seq *s = &iter->seq;
  68. struct trace_boot_ret *field;
  69. struct boot_trace_ret *init_ret;
  70. u64 ts;
  71. unsigned long nsec_rem;
  72. int ret;
  73. trace_assign_type(field, entry);
  74. init_ret = &field->boot_ret;
  75. ts = iter->ts;
  76. nsec_rem = do_div(ts, 1000000000);
  77. ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
  78. "returned %d after %llu msecs\n",
  79. (unsigned long) ts,
  80. nsec_rem,
  81. init_ret->func, init_ret->result, init_ret->duration);
  82. if (!ret)
  83. return TRACE_TYPE_PARTIAL_LINE;
  84. else
  85. return TRACE_TYPE_HANDLED;
  86. }
  87. static enum print_line_t initcall_print_line(struct trace_iterator *iter)
  88. {
  89. struct trace_entry *entry = iter->ent;
  90. switch (entry->type) {
  91. case TRACE_BOOT_CALL:
  92. return initcall_call_print_line(iter);
  93. case TRACE_BOOT_RET:
  94. return initcall_ret_print_line(iter);
  95. default:
  96. return TRACE_TYPE_UNHANDLED;
  97. }
  98. }
  99. struct tracer boot_tracer __read_mostly =
  100. {
  101. .name = "initcall",
  102. .init = boot_trace_init,
  103. .reset = tracing_reset_online_cpus,
  104. .print_line = initcall_print_line,
  105. };
  106. void trace_boot_call(struct boot_trace_call *bt, initcall_t fn)
  107. {
  108. struct ring_buffer_event *event;
  109. struct trace_boot_call *entry;
  110. unsigned long irq_flags;
  111. struct trace_array *tr = boot_trace;
  112. if (!pre_initcalls_finished)
  113. return;
  114. /* Get its name now since this function could
  115. * disappear because it is in the .init section.
  116. */
  117. sprint_symbol(bt->func, (unsigned long)fn);
  118. preempt_disable();
  119. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  120. &irq_flags);
  121. if (!event)
  122. goto out;
  123. entry = ring_buffer_event_data(event);
  124. tracing_generic_entry_update(&entry->ent, 0, 0);
  125. entry->ent.type = TRACE_BOOT_CALL;
  126. entry->boot_call = *bt;
  127. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  128. trace_wake_up();
  129. out:
  130. preempt_enable();
  131. }
  132. void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn)
  133. {
  134. struct ring_buffer_event *event;
  135. struct trace_boot_ret *entry;
  136. unsigned long irq_flags;
  137. struct trace_array *tr = boot_trace;
  138. if (!pre_initcalls_finished)
  139. return;
  140. sprint_symbol(bt->func, (unsigned long)fn);
  141. preempt_disable();
  142. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  143. &irq_flags);
  144. if (!event)
  145. goto out;
  146. entry = ring_buffer_event_data(event);
  147. tracing_generic_entry_update(&entry->ent, 0, 0);
  148. entry->ent.type = TRACE_BOOT_RET;
  149. entry->boot_ret = *bt;
  150. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  151. trace_wake_up();
  152. out:
  153. preempt_enable();
  154. }