trace_hw_branches.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * h/w branch tracer for x86 based on bts
  3. *
  4. * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/ftrace.h>
  11. #include <linux/kallsyms.h>
  12. #include <asm/ds.h>
  13. #include "trace.h"
  14. #define SIZEOF_BTS (1 << 13)
  15. static DEFINE_PER_CPU(struct bts_tracer *, tracer);
  16. static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
  17. #define this_tracer per_cpu(tracer, smp_processor_id())
  18. #define this_buffer per_cpu(buffer, smp_processor_id())
  19. static void bts_trace_reset(struct trace_array *tr)
  20. {
  21. int cpu;
  22. tr->time_start = ftrace_now(tr->cpu);
  23. for_each_online_cpu(cpu)
  24. tracing_reset(tr, cpu);
  25. }
  26. static void bts_trace_start_cpu(void *arg)
  27. {
  28. if (this_tracer)
  29. ds_release_bts(this_tracer);
  30. this_tracer =
  31. ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
  32. /* ovfl = */ NULL, /* th = */ (size_t)-1,
  33. BTS_KERNEL);
  34. if (IS_ERR(this_tracer)) {
  35. this_tracer = NULL;
  36. return;
  37. }
  38. }
  39. static void bts_trace_start(struct trace_array *tr)
  40. {
  41. int cpu;
  42. bts_trace_reset(tr);
  43. for_each_cpu_mask(cpu, cpu_possible_map)
  44. smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
  45. }
  46. static void bts_trace_stop_cpu(void *arg)
  47. {
  48. if (this_tracer) {
  49. ds_release_bts(this_tracer);
  50. this_tracer = NULL;
  51. }
  52. }
  53. static void bts_trace_stop(struct trace_array *tr)
  54. {
  55. int cpu;
  56. for_each_cpu_mask(cpu, cpu_possible_map)
  57. smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
  58. }
  59. static int bts_trace_init(struct trace_array *tr)
  60. {
  61. bts_trace_reset(tr);
  62. bts_trace_start(tr);
  63. return 0;
  64. }
  65. static void bts_trace_print_header(struct seq_file *m)
  66. {
  67. seq_puts(m,
  68. "# CPU# FROM TO FUNCTION\n");
  69. seq_puts(m,
  70. "# | | | |\n");
  71. }
  72. static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
  73. {
  74. struct trace_entry *entry = iter->ent;
  75. struct trace_seq *seq = &iter->seq;
  76. struct hw_branch_entry *it;
  77. trace_assign_type(it, entry);
  78. if (entry->type == TRACE_HW_BRANCHES) {
  79. if (trace_seq_printf(seq, "%4d ", entry->cpu) &&
  80. trace_seq_printf(seq, "0x%016llx -> 0x%016llx ",
  81. it->from, it->to) &&
  82. (!it->from ||
  83. seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) &&
  84. trace_seq_printf(seq, "\n"))
  85. return TRACE_TYPE_HANDLED;
  86. return TRACE_TYPE_PARTIAL_LINE;;
  87. }
  88. return TRACE_TYPE_UNHANDLED;
  89. }
  90. void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
  91. {
  92. struct ring_buffer_event *event;
  93. struct hw_branch_entry *entry;
  94. unsigned long irq;
  95. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq);
  96. if (!event)
  97. return;
  98. entry = ring_buffer_event_data(event);
  99. tracing_generic_entry_update(&entry->ent, 0, from);
  100. entry->ent.type = TRACE_HW_BRANCHES;
  101. entry->ent.cpu = smp_processor_id();
  102. entry->from = from;
  103. entry->to = to;
  104. ring_buffer_unlock_commit(tr->buffer, event, irq);
  105. }
  106. static void trace_bts_at(struct trace_array *tr,
  107. const struct bts_trace *trace, void *at)
  108. {
  109. struct bts_struct bts;
  110. int err = 0;
  111. WARN_ON_ONCE(!trace->read);
  112. if (!trace->read)
  113. return;
  114. err = trace->read(this_tracer, at, &bts);
  115. if (err < 0)
  116. return;
  117. switch (bts.qualifier) {
  118. case BTS_BRANCH:
  119. trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to);
  120. break;
  121. }
  122. }
  123. static void trace_bts_cpu(void *arg)
  124. {
  125. struct trace_array *tr = (struct trace_array *) arg;
  126. const struct bts_trace *trace;
  127. unsigned char *at;
  128. if (!this_tracer)
  129. return;
  130. ds_suspend_bts(this_tracer);
  131. trace = ds_read_bts(this_tracer);
  132. if (!trace)
  133. goto out;
  134. for (at = trace->ds.top; (void *)at < trace->ds.end;
  135. at += trace->ds.size)
  136. trace_bts_at(tr, trace, at);
  137. for (at = trace->ds.begin; (void *)at < trace->ds.top;
  138. at += trace->ds.size)
  139. trace_bts_at(tr, trace, at);
  140. out:
  141. ds_resume_bts(this_tracer);
  142. }
  143. static void trace_bts_prepare(struct trace_iterator *iter)
  144. {
  145. int cpu;
  146. for_each_cpu_mask(cpu, cpu_possible_map)
  147. smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
  148. }
  149. struct tracer bts_tracer __read_mostly =
  150. {
  151. .name = "hw-branch-tracer",
  152. .init = bts_trace_init,
  153. .reset = bts_trace_stop,
  154. .print_header = bts_trace_print_header,
  155. .print_line = bts_trace_print_line,
  156. .start = bts_trace_start,
  157. .stop = bts_trace_stop,
  158. .open = trace_bts_prepare
  159. };
  160. __init static int init_bts_trace(void)
  161. {
  162. return register_tracer(&bts_tracer);
  163. }
  164. device_initcall(init_bts_trace);