trace_boot.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /*
  2. * ring buffer based initcalls tracer
  3. *
  4. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. *
  6. */
  7. #include <linux/init.h>
  8. #include <linux/debugfs.h>
  9. #include <linux/ftrace.h>
  10. #include <linux/kallsyms.h>
  11. #include "trace.h"
  12. static struct trace_array *boot_trace;
  13. static bool pre_initcalls_finished;
  14. /* Tells the boot tracer that the pre_smp_initcalls are finished.
  15. * So we are ready .
  16. * It doesn't enable sched events tracing however.
  17. * You have to call enable_boot_trace to do so.
  18. */
  19. void start_boot_trace(void)
  20. {
  21. pre_initcalls_finished = true;
  22. }
  23. void enable_boot_trace(void)
  24. {
  25. }
  26. void disable_boot_trace(void)
  27. {
  28. }
  29. void reset_boot_trace(struct trace_array *tr)
  30. {
  31. disable_boot_trace();
  32. }
  33. static void boot_trace_init(struct trace_array *tr)
  34. {
  35. int cpu;
  36. boot_trace = tr;
  37. for_each_cpu_mask(cpu, cpu_possible_map)
  38. tracing_reset(tr, cpu);
  39. }
  40. static void boot_trace_ctrl_update(struct trace_array *tr)
  41. {
  42. if (tr->ctrl)
  43. enable_boot_trace();
  44. else
  45. disable_boot_trace();
  46. }
  47. static enum print_line_t initcall_print_line(struct trace_iterator *iter)
  48. {
  49. int ret;
  50. struct trace_entry *entry = iter->ent;
  51. struct trace_boot *field = (struct trace_boot *)entry;
  52. struct boot_trace *it = &field->initcall;
  53. struct trace_seq *s = &iter->seq;
  54. struct timespec calltime = ktime_to_timespec(it->calltime);
  55. struct timespec rettime = ktime_to_timespec(it->rettime);
  56. if (entry->type == TRACE_BOOT) {
  57. ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
  58. calltime.tv_sec,
  59. calltime.tv_nsec,
  60. it->func, it->caller);
  61. if (!ret)
  62. return TRACE_TYPE_PARTIAL_LINE;
  63. ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
  64. "returned %d after %lld msecs\n",
  65. rettime.tv_sec,
  66. rettime.tv_nsec,
  67. it->func, it->result, it->duration);
  68. if (!ret)
  69. return TRACE_TYPE_PARTIAL_LINE;
  70. return TRACE_TYPE_HANDLED;
  71. }
  72. return TRACE_TYPE_UNHANDLED;
  73. }
  74. struct tracer boot_tracer __read_mostly =
  75. {
  76. .name = "initcall",
  77. .init = boot_trace_init,
  78. .reset = reset_boot_trace,
  79. .ctrl_update = boot_trace_ctrl_update,
  80. .print_line = initcall_print_line,
  81. };
  82. void trace_boot(struct boot_trace *it, initcall_t fn)
  83. {
  84. struct ring_buffer_event *event;
  85. struct trace_boot *entry;
  86. struct trace_array_cpu *data;
  87. unsigned long irq_flags;
  88. struct trace_array *tr = boot_trace;
  89. if (!pre_initcalls_finished)
  90. return;
  91. /* Get its name now since this function could
  92. * disappear because it is in the .init section.
  93. */
  94. sprint_symbol(it->func, (unsigned long)fn);
  95. preempt_disable();
  96. data = tr->data[smp_processor_id()];
  97. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  98. &irq_flags);
  99. if (!event)
  100. goto out;
  101. entry = ring_buffer_event_data(event);
  102. tracing_generic_entry_update(&entry->ent, 0, 0);
  103. entry->ent.type = TRACE_BOOT;
  104. entry->initcall = *it;
  105. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  106. trace_wake_up();
  107. out:
  108. preempt_enable();
  109. }