trace_boot.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * ring buffer based initcalls tracer
  3. *
  4. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. *
  6. */
  7. #include <linux/init.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/ftrace.h>
  10. #include <linux/kallsyms.h>
  11. #include "trace.h"
  12. static struct trace_array *boot_trace;
  13. static bool pre_initcalls_finished;
  14. /* Tells the boot tracer that the pre_smp_initcalls are finished.
  15. * So we are ready .
  16. * It doesn't enable sched events tracing however.
  17. * You have to call enable_boot_trace to do so.
  18. */
  19. void start_boot_trace(void)
  20. {
  21. pre_initcalls_finished = true;
  22. }
  23. void enable_boot_trace(void)
  24. {
  25. if (pre_initcalls_finished)
  26. tracing_start_sched_switch_record();
  27. }
  28. void disable_boot_trace(void)
  29. {
  30. if (pre_initcalls_finished)
  31. tracing_stop_sched_switch_record();
  32. }
  33. static void reset_boot_trace(struct trace_array *tr)
  34. {
  35. int cpu;
  36. tr->time_start = ftrace_now(tr->cpu);
  37. for_each_online_cpu(cpu)
  38. tracing_reset(tr, cpu);
  39. }
  40. static void boot_trace_init(struct trace_array *tr)
  41. {
  42. int cpu;
  43. boot_trace = tr;
  44. for_each_cpu_mask(cpu, cpu_possible_map)
  45. tracing_reset(tr, cpu);
  46. tracing_sched_switch_assign_trace(tr);
  47. }
  48. static void boot_trace_ctrl_update(struct trace_array *tr)
  49. {
  50. if (tr->ctrl)
  51. enable_boot_trace();
  52. else
  53. disable_boot_trace();
  54. }
  55. static enum print_line_t initcall_print_line(struct trace_iterator *iter)
  56. {
  57. int ret;
  58. struct trace_entry *entry = iter->ent;
  59. struct trace_boot *field = (struct trace_boot *)entry;
  60. struct boot_trace *it = &field->initcall;
  61. struct trace_seq *s = &iter->seq;
  62. struct timespec calltime = ktime_to_timespec(it->calltime);
  63. struct timespec rettime = ktime_to_timespec(it->rettime);
  64. if (entry->type == TRACE_BOOT) {
  65. ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
  66. calltime.tv_sec,
  67. calltime.tv_nsec,
  68. it->func, it->caller);
  69. if (!ret)
  70. return TRACE_TYPE_PARTIAL_LINE;
  71. ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
  72. "returned %d after %lld msecs\n",
  73. rettime.tv_sec,
  74. rettime.tv_nsec,
  75. it->func, it->result, it->duration);
  76. if (!ret)
  77. return TRACE_TYPE_PARTIAL_LINE;
  78. return TRACE_TYPE_HANDLED;
  79. }
  80. return TRACE_TYPE_UNHANDLED;
  81. }
  82. struct tracer boot_tracer __read_mostly =
  83. {
  84. .name = "initcall",
  85. .init = boot_trace_init,
  86. .reset = reset_boot_trace,
  87. .ctrl_update = boot_trace_ctrl_update,
  88. .print_line = initcall_print_line,
  89. };
  90. void trace_boot(struct boot_trace *it, initcall_t fn)
  91. {
  92. struct ring_buffer_event *event;
  93. struct trace_boot *entry;
  94. struct trace_array_cpu *data;
  95. unsigned long irq_flags;
  96. struct trace_array *tr = boot_trace;
  97. if (!pre_initcalls_finished)
  98. return;
  99. /* Get its name now since this function could
  100. * disappear because it is in the .init section.
  101. */
  102. sprint_symbol(it->func, (unsigned long)fn);
  103. preempt_disable();
  104. data = tr->data[smp_processor_id()];
  105. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  106. &irq_flags);
  107. if (!event)
  108. goto out;
  109. entry = ring_buffer_event_data(event);
  110. tracing_generic_entry_update(&entry->ent, 0, 0);
  111. entry->ent.type = TRACE_BOOT;
  112. entry->initcall = *it;
  113. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  114. trace_wake_up();
  115. out:
  116. preempt_enable();
  117. }