trace_branch.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. /*
  2. * unlikely profiler
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/irqflags.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/module.h>
  13. #include <linux/ftrace.h>
  14. #include <linux/hash.h>
  15. #include <linux/fs.h>
  16. #include <asm/local.h>
  17. #include "trace.h"
  18. #ifdef CONFIG_BRANCH_TRACER
  19. static int branch_tracing_enabled __read_mostly;
  20. static DEFINE_MUTEX(branch_tracing_mutex);
  21. static struct trace_array *branch_tracer;
  22. static void
  23. probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  24. {
  25. struct trace_array *tr = branch_tracer;
  26. struct ring_buffer_event *event;
  27. struct trace_branch *entry;
  28. unsigned long flags, irq_flags;
  29. int cpu, pc;
  30. const char *p;
  31. /*
  32. * I would love to save just the ftrace_likely_data pointer, but
  33. * this code can also be used by modules. Ugly things can happen
  34. * if the module is unloaded, and then we go and read the
  35. * pointer. This is slower, but much safer.
  36. */
  37. if (unlikely(!tr))
  38. return;
  39. local_irq_save(flags);
  40. cpu = raw_smp_processor_id();
  41. if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
  42. goto out;
  43. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  44. &irq_flags);
  45. if (!event)
  46. goto out;
  47. pc = preempt_count();
  48. entry = ring_buffer_event_data(event);
  49. tracing_generic_entry_update(&entry->ent, flags, pc);
  50. entry->ent.type = TRACE_BRANCH;
  51. /* Strip off the path, only save the file */
  52. p = f->file + strlen(f->file);
  53. while (p >= f->file && *p != '/')
  54. p--;
  55. p++;
  56. strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
  57. strncpy(entry->file, p, TRACE_FILE_SIZE);
  58. entry->func[TRACE_FUNC_SIZE] = 0;
  59. entry->file[TRACE_FILE_SIZE] = 0;
  60. entry->line = f->line;
  61. entry->correct = val == expect;
  62. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  63. out:
  64. atomic_dec(&tr->data[cpu]->disabled);
  65. local_irq_restore(flags);
  66. }
  67. static inline
  68. void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  69. {
  70. if (!branch_tracing_enabled)
  71. return;
  72. probe_likely_condition(f, val, expect);
  73. }
  74. int enable_branch_tracing(struct trace_array *tr)
  75. {
  76. int ret = 0;
  77. mutex_lock(&branch_tracing_mutex);
  78. branch_tracer = tr;
  79. /*
  80. * Must be seen before enabling. The reader is a condition
  81. * where we do not need a matching rmb()
  82. */
  83. smp_wmb();
  84. branch_tracing_enabled++;
  85. mutex_unlock(&branch_tracing_mutex);
  86. return ret;
  87. }
  88. void disable_branch_tracing(void)
  89. {
  90. mutex_lock(&branch_tracing_mutex);
  91. if (!branch_tracing_enabled)
  92. goto out_unlock;
  93. branch_tracing_enabled--;
  94. out_unlock:
  95. mutex_unlock(&branch_tracing_mutex);
  96. }
  97. static void start_branch_trace(struct trace_array *tr)
  98. {
  99. enable_branch_tracing(tr);
  100. }
  101. static void stop_branch_trace(struct trace_array *tr)
  102. {
  103. disable_branch_tracing();
  104. }
  105. static int branch_trace_init(struct trace_array *tr)
  106. {
  107. int cpu;
  108. for_each_online_cpu(cpu)
  109. tracing_reset(tr, cpu);
  110. start_branch_trace(tr);
  111. return 0;
  112. }
  113. static void branch_trace_reset(struct trace_array *tr)
  114. {
  115. stop_branch_trace(tr);
  116. }
  117. struct tracer branch_trace __read_mostly =
  118. {
  119. .name = "branch",
  120. .init = branch_trace_init,
  121. .reset = branch_trace_reset,
  122. #ifdef CONFIG_FTRACE_SELFTEST
  123. .selftest = trace_selftest_startup_branch,
  124. #endif
  125. };
  126. __init static int init_branch_trace(void)
  127. {
  128. return register_tracer(&branch_trace);
  129. }
  130. device_initcall(init_branch_trace);
  131. #else
  132. static inline
  133. void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  134. {
  135. }
  136. #endif /* CONFIG_BRANCH_TRACER */
  137. void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
  138. {
  139. /*
  140. * I would love to have a trace point here instead, but the
  141. * trace point code is so inundated with unlikely and likely
  142. * conditions that the recursive nightmare that exists is too
  143. * much to try to get working. At least for now.
  144. */
  145. trace_likely_condition(f, val, expect);
  146. /* FIXME: Make this atomic! */
  147. if (val == expect)
  148. f->correct++;
  149. else
  150. f->incorrect++;
  151. }
  152. EXPORT_SYMBOL(ftrace_likely_update);
  153. struct ftrace_pointer {
  154. void *start;
  155. void *stop;
  156. int hit;
  157. };
  158. static void *
  159. t_next(struct seq_file *m, void *v, loff_t *pos)
  160. {
  161. const struct ftrace_pointer *f = m->private;
  162. struct ftrace_branch_data *p = v;
  163. (*pos)++;
  164. if (v == (void *)1)
  165. return f->start;
  166. ++p;
  167. if ((void *)p >= (void *)f->stop)
  168. return NULL;
  169. return p;
  170. }
  171. static void *t_start(struct seq_file *m, loff_t *pos)
  172. {
  173. void *t = (void *)1;
  174. loff_t l = 0;
  175. for (; t && l < *pos; t = t_next(m, t, &l))
  176. ;
  177. return t;
  178. }
  179. static void t_stop(struct seq_file *m, void *p)
  180. {
  181. }
  182. static int t_show(struct seq_file *m, void *v)
  183. {
  184. const struct ftrace_pointer *fp = m->private;
  185. struct ftrace_branch_data *p = v;
  186. const char *f;
  187. long percent;
  188. if (v == (void *)1) {
  189. if (fp->hit)
  190. seq_printf(m, " miss hit %% ");
  191. else
  192. seq_printf(m, " correct incorrect %% ");
  193. seq_printf(m, " Function "
  194. " File Line\n"
  195. " ------- --------- - "
  196. " -------- "
  197. " ---- ----\n");
  198. return 0;
  199. }
  200. /* Only print the file, not the path */
  201. f = p->file + strlen(p->file);
  202. while (f >= p->file && *f != '/')
  203. f--;
  204. f++;
  205. /*
  206. * The miss is overlayed on correct, and hit on incorrect.
  207. */
  208. if (p->correct) {
  209. percent = p->incorrect * 100;
  210. percent /= p->correct + p->incorrect;
  211. } else
  212. percent = p->incorrect ? 100 : -1;
  213. seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
  214. if (percent < 0)
  215. seq_printf(m, " X ");
  216. else
  217. seq_printf(m, "%3ld ", percent);
  218. seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
  219. return 0;
  220. }
  221. static struct seq_operations tracing_likely_seq_ops = {
  222. .start = t_start,
  223. .next = t_next,
  224. .stop = t_stop,
  225. .show = t_show,
  226. };
  227. static int tracing_branch_open(struct inode *inode, struct file *file)
  228. {
  229. int ret;
  230. ret = seq_open(file, &tracing_likely_seq_ops);
  231. if (!ret) {
  232. struct seq_file *m = file->private_data;
  233. m->private = (void *)inode->i_private;
  234. }
  235. return ret;
  236. }
  237. static const struct file_operations tracing_branch_fops = {
  238. .open = tracing_branch_open,
  239. .read = seq_read,
  240. .llseek = seq_lseek,
  241. };
  242. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  243. extern unsigned long __start_branch_profile[];
  244. extern unsigned long __stop_branch_profile[];
  245. static const struct ftrace_pointer ftrace_branch_pos = {
  246. .start = __start_branch_profile,
  247. .stop = __stop_branch_profile,
  248. .hit = 1,
  249. };
  250. #endif /* CONFIG_PROFILE_ALL_BRANCHES */
  251. extern unsigned long __start_annotated_branch_profile[];
  252. extern unsigned long __stop_annotated_branch_profile[];
  253. static const struct ftrace_pointer ftrace_annotated_branch_pos = {
  254. .start = __start_annotated_branch_profile,
  255. .stop = __stop_annotated_branch_profile,
  256. };
  257. static __init int ftrace_branch_init(void)
  258. {
  259. struct dentry *d_tracer;
  260. struct dentry *entry;
  261. d_tracer = tracing_init_dentry();
  262. entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
  263. (void *)&ftrace_annotated_branch_pos,
  264. &tracing_branch_fops);
  265. if (!entry)
  266. pr_warning("Could not create debugfs "
  267. "'profile_annotatet_branch' entry\n");
  268. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  269. entry = debugfs_create_file("profile_branch", 0444, d_tracer,
  270. (void *)&ftrace_branch_pos,
  271. &tracing_branch_fops);
  272. if (!entry)
  273. pr_warning("Could not create debugfs"
  274. " 'profile_branch' entry\n");
  275. #endif
  276. return 0;
  277. }
  278. device_initcall(ftrace_branch_init);