trace_branch.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. /*
  2. * unlikely profiler
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/module.h>
  12. #include <linux/ftrace.h>
  13. #include <linux/hash.h>
  14. #include <linux/fs.h>
  15. #include <asm/local.h>
  16. #include "trace.h"
  17. #ifdef CONFIG_BRANCH_TRACER
  18. static int branch_tracing_enabled __read_mostly;
  19. static DEFINE_MUTEX(branch_tracing_mutex);
  20. static struct trace_array *branch_tracer;
  21. static void
  22. probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  23. {
  24. struct trace_array *tr = branch_tracer;
  25. struct ring_buffer_event *event;
  26. struct trace_branch *entry;
  27. unsigned long flags, irq_flags;
  28. int cpu, pc;
  29. const char *p;
  30. /*
  31. * I would love to save just the ftrace_likely_data pointer, but
  32. * this code can also be used by modules. Ugly things can happen
  33. * if the module is unloaded, and then we go and read the
  34. * pointer. This is slower, but much safer.
  35. */
  36. if (unlikely(!tr))
  37. return;
  38. local_irq_save(flags);
  39. cpu = raw_smp_processor_id();
  40. if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
  41. goto out;
  42. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  43. &irq_flags);
  44. if (!event)
  45. goto out;
  46. pc = preempt_count();
  47. entry = ring_buffer_event_data(event);
  48. tracing_generic_entry_update(&entry->ent, flags, pc);
  49. entry->ent.type = TRACE_BRANCH;
  50. /* Strip off the path, only save the file */
  51. p = f->file + strlen(f->file);
  52. while (p >= f->file && *p != '/')
  53. p--;
  54. p++;
  55. strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
  56. strncpy(entry->file, p, TRACE_FILE_SIZE);
  57. entry->func[TRACE_FUNC_SIZE] = 0;
  58. entry->file[TRACE_FILE_SIZE] = 0;
  59. entry->line = f->line;
  60. entry->correct = val == expect;
  61. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  62. out:
  63. atomic_dec(&tr->data[cpu]->disabled);
  64. local_irq_restore(flags);
  65. }
  66. static inline
  67. void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  68. {
  69. if (!branch_tracing_enabled)
  70. return;
  71. probe_likely_condition(f, val, expect);
  72. }
  73. int enable_branch_tracing(struct trace_array *tr)
  74. {
  75. int ret = 0;
  76. mutex_lock(&branch_tracing_mutex);
  77. branch_tracer = tr;
  78. /*
  79. * Must be seen before enabling. The reader is a condition
  80. * where we do not need a matching rmb()
  81. */
  82. smp_wmb();
  83. branch_tracing_enabled++;
  84. mutex_unlock(&branch_tracing_mutex);
  85. return ret;
  86. }
  87. void disable_branch_tracing(void)
  88. {
  89. mutex_lock(&branch_tracing_mutex);
  90. if (!branch_tracing_enabled)
  91. goto out_unlock;
  92. branch_tracing_enabled--;
  93. out_unlock:
  94. mutex_unlock(&branch_tracing_mutex);
  95. }
  96. static void start_branch_trace(struct trace_array *tr)
  97. {
  98. enable_branch_tracing(tr);
  99. }
  100. static void stop_branch_trace(struct trace_array *tr)
  101. {
  102. disable_branch_tracing();
  103. }
  104. static void branch_trace_init(struct trace_array *tr)
  105. {
  106. int cpu;
  107. for_each_online_cpu(cpu)
  108. tracing_reset(tr, cpu);
  109. start_branch_trace(tr);
  110. }
  111. static void branch_trace_reset(struct trace_array *tr)
  112. {
  113. stop_branch_trace(tr);
  114. }
  115. struct tracer branch_trace __read_mostly =
  116. {
  117. .name = "branch",
  118. .init = branch_trace_init,
  119. .reset = branch_trace_reset,
  120. #ifdef CONFIG_FTRACE_SELFTEST
  121. .selftest = trace_selftest_startup_branch,
  122. #endif
  123. };
  124. __init static int init_branch_trace(void)
  125. {
  126. return register_tracer(&branch_trace);
  127. }
  128. device_initcall(init_branch_trace);
  129. #else
  130. static inline
  131. void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  132. {
  133. }
  134. #endif /* CONFIG_BRANCH_TRACER */
  135. void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
  136. {
  137. /*
  138. * I would love to have a trace point here instead, but the
  139. * trace point code is so inundated with unlikely and likely
  140. * conditions that the recursive nightmare that exists is too
  141. * much to try to get working. At least for now.
  142. */
  143. trace_likely_condition(f, val, expect);
  144. /* FIXME: Make this atomic! */
  145. if (val == expect)
  146. f->correct++;
  147. else
  148. f->incorrect++;
  149. }
  150. EXPORT_SYMBOL(ftrace_likely_update);
  151. struct ftrace_pointer {
  152. void *start;
  153. void *stop;
  154. };
  155. static void *
  156. t_next(struct seq_file *m, void *v, loff_t *pos)
  157. {
  158. struct ftrace_pointer *f = m->private;
  159. struct ftrace_branch_data *p = v;
  160. (*pos)++;
  161. if (v == (void *)1)
  162. return f->start;
  163. ++p;
  164. if ((void *)p >= (void *)f->stop)
  165. return NULL;
  166. return p;
  167. }
  168. static void *t_start(struct seq_file *m, loff_t *pos)
  169. {
  170. void *t = (void *)1;
  171. loff_t l = 0;
  172. for (; t && l < *pos; t = t_next(m, t, &l))
  173. ;
  174. return t;
  175. }
  176. static void t_stop(struct seq_file *m, void *p)
  177. {
  178. }
  179. static int t_show(struct seq_file *m, void *v)
  180. {
  181. struct ftrace_branch_data *p = v;
  182. const char *f;
  183. unsigned long percent;
  184. if (v == (void *)1) {
  185. seq_printf(m, " correct incorrect %% "
  186. " Function "
  187. " File Line\n"
  188. " ------- --------- - "
  189. " -------- "
  190. " ---- ----\n");
  191. return 0;
  192. }
  193. /* Only print the file, not the path */
  194. f = p->file + strlen(p->file);
  195. while (f >= p->file && *f != '/')
  196. f--;
  197. f++;
  198. if (p->correct) {
  199. percent = p->incorrect * 100;
  200. percent /= p->correct + p->incorrect;
  201. } else
  202. percent = p->incorrect ? 100 : 0;
  203. seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent);
  204. seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
  205. return 0;
  206. }
  207. static struct seq_operations tracing_likely_seq_ops = {
  208. .start = t_start,
  209. .next = t_next,
  210. .stop = t_stop,
  211. .show = t_show,
  212. };
  213. static int tracing_likely_open(struct inode *inode, struct file *file)
  214. {
  215. int ret;
  216. ret = seq_open(file, &tracing_likely_seq_ops);
  217. if (!ret) {
  218. struct seq_file *m = file->private_data;
  219. m->private = (void *)inode->i_private;
  220. }
  221. return ret;
  222. }
  223. static struct file_operations tracing_likely_fops = {
  224. .open = tracing_likely_open,
  225. .read = seq_read,
  226. .llseek = seq_lseek,
  227. };
  228. extern unsigned long __start_likely_profile[];
  229. extern unsigned long __stop_likely_profile[];
  230. extern unsigned long __start_unlikely_profile[];
  231. extern unsigned long __stop_unlikely_profile[];
  232. static struct ftrace_pointer ftrace_likely_pos = {
  233. .start = __start_likely_profile,
  234. .stop = __stop_likely_profile,
  235. };
  236. static struct ftrace_pointer ftrace_unlikely_pos = {
  237. .start = __start_unlikely_profile,
  238. .stop = __stop_unlikely_profile,
  239. };
  240. static __init int ftrace_branch_init(void)
  241. {
  242. struct dentry *d_tracer;
  243. struct dentry *entry;
  244. d_tracer = tracing_init_dentry();
  245. entry = debugfs_create_file("profile_likely", 0444, d_tracer,
  246. &ftrace_likely_pos,
  247. &tracing_likely_fops);
  248. if (!entry)
  249. pr_warning("Could not create debugfs 'profile_likely' entry\n");
  250. entry = debugfs_create_file("profile_unlikely", 0444, d_tracer,
  251. &ftrace_unlikely_pos,
  252. &tracing_likely_fops);
  253. if (!entry)
  254. pr_warning("Could not create debugfs"
  255. " 'profile_unlikely' entry\n");
  256. return 0;
  257. }
  258. device_initcall(ftrace_branch_init);