trace_branch.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /*
  2. * unlikely profiler
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/module.h>
  12. #include <linux/ftrace.h>
  13. #include <linux/hash.h>
  14. #include <linux/fs.h>
  15. #include <asm/local.h>
  16. #include "trace.h"
  17. #ifdef CONFIG_BRANCH_TRACER
  18. static int branch_tracing_enabled __read_mostly;
  19. static DEFINE_MUTEX(branch_tracing_mutex);
  20. static struct trace_array *branch_tracer;
  21. static void
  22. probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  23. {
  24. struct trace_array *tr = branch_tracer;
  25. struct ring_buffer_event *event;
  26. struct trace_branch *entry;
  27. unsigned long flags, irq_flags;
  28. int cpu, pc;
  29. const char *p;
  30. /*
  31. * I would love to save just the ftrace_likely_data pointer, but
  32. * this code can also be used by modules. Ugly things can happen
  33. * if the module is unloaded, and then we go and read the
  34. * pointer. This is slower, but much safer.
  35. */
  36. if (unlikely(!tr))
  37. return;
  38. raw_local_irq_save(flags);
  39. cpu = raw_smp_processor_id();
  40. if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
  41. goto out;
  42. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  43. &irq_flags);
  44. if (!event)
  45. goto out;
  46. pc = preempt_count();
  47. entry = ring_buffer_event_data(event);
  48. tracing_generic_entry_update(&entry->ent, flags, pc);
  49. entry->ent.type = TRACE_BRANCH;
  50. /* Strip off the path, only save the file */
  51. p = f->file + strlen(f->file);
  52. while (p >= f->file && *p != '/')
  53. p--;
  54. p++;
  55. strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
  56. strncpy(entry->file, p, TRACE_FILE_SIZE);
  57. entry->func[TRACE_FUNC_SIZE] = 0;
  58. entry->file[TRACE_FILE_SIZE] = 0;
  59. entry->line = f->line;
  60. entry->correct = val == expect;
  61. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  62. out:
  63. atomic_dec(&tr->data[cpu]->disabled);
  64. raw_local_irq_restore(flags);
  65. }
  66. static inline
  67. void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  68. {
  69. if (!branch_tracing_enabled)
  70. return;
  71. probe_likely_condition(f, val, expect);
  72. }
  73. int enable_branch_tracing(struct trace_array *tr)
  74. {
  75. int ret = 0;
  76. mutex_lock(&branch_tracing_mutex);
  77. branch_tracer = tr;
  78. /*
  79. * Must be seen before enabling. The reader is a condition
  80. * where we do not need a matching rmb()
  81. */
  82. smp_wmb();
  83. branch_tracing_enabled++;
  84. mutex_unlock(&branch_tracing_mutex);
  85. return ret;
  86. }
  87. void disable_branch_tracing(void)
  88. {
  89. mutex_lock(&branch_tracing_mutex);
  90. if (!branch_tracing_enabled)
  91. goto out_unlock;
  92. branch_tracing_enabled--;
  93. out_unlock:
  94. mutex_unlock(&branch_tracing_mutex);
  95. }
  96. static void start_branch_trace(struct trace_array *tr)
  97. {
  98. enable_branch_tracing(tr);
  99. }
  100. static void stop_branch_trace(struct trace_array *tr)
  101. {
  102. disable_branch_tracing();
  103. }
  104. static int branch_trace_init(struct trace_array *tr)
  105. {
  106. int cpu;
  107. for_each_online_cpu(cpu)
  108. tracing_reset(tr, cpu);
  109. start_branch_trace(tr);
  110. return 0;
  111. }
  112. static void branch_trace_reset(struct trace_array *tr)
  113. {
  114. stop_branch_trace(tr);
  115. }
  116. struct tracer branch_trace __read_mostly =
  117. {
  118. .name = "branch",
  119. .init = branch_trace_init,
  120. .reset = branch_trace_reset,
  121. #ifdef CONFIG_FTRACE_SELFTEST
  122. .selftest = trace_selftest_startup_branch,
  123. #endif
  124. };
  125. __init static int init_branch_trace(void)
  126. {
  127. return register_tracer(&branch_trace);
  128. }
  129. device_initcall(init_branch_trace);
  130. #else
  131. static inline
  132. void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  133. {
  134. }
  135. #endif /* CONFIG_BRANCH_TRACER */
  136. void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
  137. {
  138. /*
  139. * I would love to have a trace point here instead, but the
  140. * trace point code is so inundated with unlikely and likely
  141. * conditions that the recursive nightmare that exists is too
  142. * much to try to get working. At least for now.
  143. */
  144. trace_likely_condition(f, val, expect);
  145. /* FIXME: Make this atomic! */
  146. if (val == expect)
  147. f->correct++;
  148. else
  149. f->incorrect++;
  150. }
  151. EXPORT_SYMBOL(ftrace_likely_update);
  152. struct ftrace_pointer {
  153. void *start;
  154. void *stop;
  155. };
  156. static void *
  157. t_next(struct seq_file *m, void *v, loff_t *pos)
  158. {
  159. struct ftrace_pointer *f = m->private;
  160. struct ftrace_branch_data *p = v;
  161. (*pos)++;
  162. if (v == (void *)1)
  163. return f->start;
  164. ++p;
  165. if ((void *)p >= (void *)f->stop)
  166. return NULL;
  167. return p;
  168. }
  169. static void *t_start(struct seq_file *m, loff_t *pos)
  170. {
  171. void *t = (void *)1;
  172. loff_t l = 0;
  173. for (; t && l < *pos; t = t_next(m, t, &l))
  174. ;
  175. return t;
  176. }
  177. static void t_stop(struct seq_file *m, void *p)
  178. {
  179. }
  180. static int t_show(struct seq_file *m, void *v)
  181. {
  182. struct ftrace_branch_data *p = v;
  183. const char *f;
  184. unsigned long percent;
  185. if (v == (void *)1) {
  186. seq_printf(m, " correct incorrect %% "
  187. " Function "
  188. " File Line\n"
  189. " ------- --------- - "
  190. " -------- "
  191. " ---- ----\n");
  192. return 0;
  193. }
  194. /* Only print the file, not the path */
  195. f = p->file + strlen(p->file);
  196. while (f >= p->file && *f != '/')
  197. f--;
  198. f++;
  199. if (p->correct) {
  200. percent = p->incorrect * 100;
  201. percent /= p->correct + p->incorrect;
  202. } else
  203. percent = p->incorrect ? 100 : 0;
  204. seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent);
  205. seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
  206. return 0;
  207. }
  208. static struct seq_operations tracing_likely_seq_ops = {
  209. .start = t_start,
  210. .next = t_next,
  211. .stop = t_stop,
  212. .show = t_show,
  213. };
  214. static int tracing_likely_open(struct inode *inode, struct file *file)
  215. {
  216. int ret;
  217. ret = seq_open(file, &tracing_likely_seq_ops);
  218. if (!ret) {
  219. struct seq_file *m = file->private_data;
  220. m->private = (void *)inode->i_private;
  221. }
  222. return ret;
  223. }
  224. static struct file_operations tracing_likely_fops = {
  225. .open = tracing_likely_open,
  226. .read = seq_read,
  227. .llseek = seq_lseek,
  228. };
  229. extern unsigned long __start_likely_profile[];
  230. extern unsigned long __stop_likely_profile[];
  231. extern unsigned long __start_unlikely_profile[];
  232. extern unsigned long __stop_unlikely_profile[];
  233. static struct ftrace_pointer ftrace_likely_pos = {
  234. .start = __start_likely_profile,
  235. .stop = __stop_likely_profile,
  236. };
  237. static struct ftrace_pointer ftrace_unlikely_pos = {
  238. .start = __start_unlikely_profile,
  239. .stop = __stop_unlikely_profile,
  240. };
  241. static __init int ftrace_branch_init(void)
  242. {
  243. struct dentry *d_tracer;
  244. struct dentry *entry;
  245. d_tracer = tracing_init_dentry();
  246. entry = debugfs_create_file("profile_likely", 0444, d_tracer,
  247. &ftrace_likely_pos,
  248. &tracing_likely_fops);
  249. if (!entry)
  250. pr_warning("Could not create debugfs 'profile_likely' entry\n");
  251. entry = debugfs_create_file("profile_unlikely", 0444, d_tracer,
  252. &ftrace_unlikely_pos,
  253. &tracing_likely_fops);
  254. if (!entry)
  255. pr_warning("Could not create debugfs"
  256. " 'profile_unlikely' entry\n");
  257. return 0;
  258. }
  259. device_initcall(ftrace_branch_init);