trace_branch.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * unlikely profiler
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/irqflags.h>
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/module.h>
  13. #include <linux/ftrace.h>
  14. #include <linux/hash.h>
  15. #include <linux/fs.h>
  16. #include <asm/local.h>
  17. #include "trace.h"
  18. #include "trace_output.h"
  19. #ifdef CONFIG_BRANCH_TRACER
  20. static int branch_tracing_enabled __read_mostly;
  21. static DEFINE_MUTEX(branch_tracing_mutex);
  22. static struct trace_array *branch_tracer;
  23. static void
  24. probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  25. {
  26. struct trace_array *tr = branch_tracer;
  27. struct ring_buffer_event *event;
  28. struct trace_branch *entry;
  29. unsigned long flags, irq_flags;
  30. int cpu, pc;
  31. const char *p;
  32. /*
  33. * I would love to save just the ftrace_likely_data pointer, but
  34. * this code can also be used by modules. Ugly things can happen
  35. * if the module is unloaded, and then we go and read the
  36. * pointer. This is slower, but much safer.
  37. */
  38. if (unlikely(!tr))
  39. return;
  40. local_irq_save(flags);
  41. cpu = raw_smp_processor_id();
  42. if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
  43. goto out;
  44. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  45. &irq_flags);
  46. if (!event)
  47. goto out;
  48. pc = preempt_count();
  49. entry = ring_buffer_event_data(event);
  50. tracing_generic_entry_update(&entry->ent, flags, pc);
  51. entry->ent.type = TRACE_BRANCH;
  52. /* Strip off the path, only save the file */
  53. p = f->file + strlen(f->file);
  54. while (p >= f->file && *p != '/')
  55. p--;
  56. p++;
  57. strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
  58. strncpy(entry->file, p, TRACE_FILE_SIZE);
  59. entry->func[TRACE_FUNC_SIZE] = 0;
  60. entry->file[TRACE_FILE_SIZE] = 0;
  61. entry->line = f->line;
  62. entry->correct = val == expect;
  63. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  64. out:
  65. atomic_dec(&tr->data[cpu]->disabled);
  66. local_irq_restore(flags);
  67. }
  68. static inline
  69. void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  70. {
  71. if (!branch_tracing_enabled)
  72. return;
  73. probe_likely_condition(f, val, expect);
  74. }
  75. int enable_branch_tracing(struct trace_array *tr)
  76. {
  77. int ret = 0;
  78. mutex_lock(&branch_tracing_mutex);
  79. branch_tracer = tr;
  80. /*
  81. * Must be seen before enabling. The reader is a condition
  82. * where we do not need a matching rmb()
  83. */
  84. smp_wmb();
  85. branch_tracing_enabled++;
  86. mutex_unlock(&branch_tracing_mutex);
  87. return ret;
  88. }
  89. void disable_branch_tracing(void)
  90. {
  91. mutex_lock(&branch_tracing_mutex);
  92. if (!branch_tracing_enabled)
  93. goto out_unlock;
  94. branch_tracing_enabled--;
  95. out_unlock:
  96. mutex_unlock(&branch_tracing_mutex);
  97. }
  98. static void start_branch_trace(struct trace_array *tr)
  99. {
  100. enable_branch_tracing(tr);
  101. }
  102. static void stop_branch_trace(struct trace_array *tr)
  103. {
  104. disable_branch_tracing();
  105. }
  106. static int branch_trace_init(struct trace_array *tr)
  107. {
  108. int cpu;
  109. for_each_online_cpu(cpu)
  110. tracing_reset(tr, cpu);
  111. start_branch_trace(tr);
  112. return 0;
  113. }
  114. static void branch_trace_reset(struct trace_array *tr)
  115. {
  116. stop_branch_trace(tr);
  117. }
  118. static int
  119. trace_print_print(struct trace_seq *s, struct trace_entry *entry, int flags)
  120. {
  121. struct print_entry *field;
  122. trace_assign_type(field, entry);
  123. if (seq_print_ip_sym(s, field->ip, flags))
  124. goto partial;
  125. if (trace_seq_printf(s, ": %s", field->buf))
  126. goto partial;
  127. partial:
  128. return TRACE_TYPE_PARTIAL_LINE;
  129. }
  130. static int
  131. trace_branch_print(struct trace_seq *s, struct trace_entry *entry, int flags)
  132. {
  133. struct trace_branch *field;
  134. trace_assign_type(field, entry);
  135. if (trace_seq_printf(s, "[%s] %s:%s:%d\n",
  136. field->correct ? " ok " : " MISS ",
  137. field->func,
  138. field->file,
  139. field->line))
  140. return TRACE_TYPE_PARTIAL_LINE;
  141. return 0;
  142. }
  143. static struct trace_event trace_branch_event = {
  144. .type = TRACE_BRANCH,
  145. .trace = trace_branch_print,
  146. .latency_trace = trace_branch_print,
  147. .raw = trace_nop_print,
  148. .hex = trace_nop_print,
  149. .binary = trace_nop_print,
  150. };
  151. struct tracer branch_trace __read_mostly =
  152. {
  153. .name = "branch",
  154. .init = branch_trace_init,
  155. .reset = branch_trace_reset,
  156. #ifdef CONFIG_FTRACE_SELFTEST
  157. .selftest = trace_selftest_startup_branch,
  158. #endif
  159. };
  160. __init static int init_branch_trace(void)
  161. {
  162. int ret;
  163. ret = register_ftrace_event(&trace_branch_event);
  164. if (!ret) {
  165. printk(KERN_WARNING "Warning: could not register branch events\n");
  166. return 1;
  167. }
  168. return register_tracer(&branch_trace);
  169. }
  170. device_initcall(init_branch_trace);
  171. #else
  172. static inline
  173. void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
  174. {
  175. }
  176. #endif /* CONFIG_BRANCH_TRACER */
  177. void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
  178. {
  179. /*
  180. * I would love to have a trace point here instead, but the
  181. * trace point code is so inundated with unlikely and likely
  182. * conditions that the recursive nightmare that exists is too
  183. * much to try to get working. At least for now.
  184. */
  185. trace_likely_condition(f, val, expect);
  186. /* FIXME: Make this atomic! */
  187. if (val == expect)
  188. f->correct++;
  189. else
  190. f->incorrect++;
  191. }
  192. EXPORT_SYMBOL(ftrace_likely_update);
  193. struct ftrace_pointer {
  194. void *start;
  195. void *stop;
  196. int hit;
  197. };
  198. static void *
  199. t_next(struct seq_file *m, void *v, loff_t *pos)
  200. {
  201. const struct ftrace_pointer *f = m->private;
  202. struct ftrace_branch_data *p = v;
  203. (*pos)++;
  204. if (v == (void *)1)
  205. return f->start;
  206. ++p;
  207. if ((void *)p >= (void *)f->stop)
  208. return NULL;
  209. return p;
  210. }
  211. static void *t_start(struct seq_file *m, loff_t *pos)
  212. {
  213. void *t = (void *)1;
  214. loff_t l = 0;
  215. for (; t && l < *pos; t = t_next(m, t, &l))
  216. ;
  217. return t;
  218. }
  219. static void t_stop(struct seq_file *m, void *p)
  220. {
  221. }
  222. static int t_show(struct seq_file *m, void *v)
  223. {
  224. const struct ftrace_pointer *fp = m->private;
  225. struct ftrace_branch_data *p = v;
  226. const char *f;
  227. long percent;
  228. if (v == (void *)1) {
  229. if (fp->hit)
  230. seq_printf(m, " miss hit %% ");
  231. else
  232. seq_printf(m, " correct incorrect %% ");
  233. seq_printf(m, " Function "
  234. " File Line\n"
  235. " ------- --------- - "
  236. " -------- "
  237. " ---- ----\n");
  238. return 0;
  239. }
  240. /* Only print the file, not the path */
  241. f = p->file + strlen(p->file);
  242. while (f >= p->file && *f != '/')
  243. f--;
  244. f++;
  245. /*
  246. * The miss is overlayed on correct, and hit on incorrect.
  247. */
  248. if (p->correct) {
  249. percent = p->incorrect * 100;
  250. percent /= p->correct + p->incorrect;
  251. } else
  252. percent = p->incorrect ? 100 : -1;
  253. seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
  254. if (percent < 0)
  255. seq_printf(m, " X ");
  256. else
  257. seq_printf(m, "%3ld ", percent);
  258. seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
  259. return 0;
  260. }
  261. static struct seq_operations tracing_likely_seq_ops = {
  262. .start = t_start,
  263. .next = t_next,
  264. .stop = t_stop,
  265. .show = t_show,
  266. };
  267. static int tracing_branch_open(struct inode *inode, struct file *file)
  268. {
  269. int ret;
  270. ret = seq_open(file, &tracing_likely_seq_ops);
  271. if (!ret) {
  272. struct seq_file *m = file->private_data;
  273. m->private = (void *)inode->i_private;
  274. }
  275. return ret;
  276. }
  277. static const struct file_operations tracing_branch_fops = {
  278. .open = tracing_branch_open,
  279. .read = seq_read,
  280. .llseek = seq_lseek,
  281. };
  282. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  283. extern unsigned long __start_branch_profile[];
  284. extern unsigned long __stop_branch_profile[];
  285. static const struct ftrace_pointer ftrace_branch_pos = {
  286. .start = __start_branch_profile,
  287. .stop = __stop_branch_profile,
  288. .hit = 1,
  289. };
  290. #endif /* CONFIG_PROFILE_ALL_BRANCHES */
  291. extern unsigned long __start_annotated_branch_profile[];
  292. extern unsigned long __stop_annotated_branch_profile[];
  293. static const struct ftrace_pointer ftrace_annotated_branch_pos = {
  294. .start = __start_annotated_branch_profile,
  295. .stop = __stop_annotated_branch_profile,
  296. };
  297. static __init int ftrace_branch_init(void)
  298. {
  299. struct dentry *d_tracer;
  300. struct dentry *entry;
  301. d_tracer = tracing_init_dentry();
  302. entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
  303. (void *)&ftrace_annotated_branch_pos,
  304. &tracing_branch_fops);
  305. if (!entry)
  306. pr_warning("Could not create debugfs "
  307. "'profile_annotatet_branch' entry\n");
  308. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  309. entry = debugfs_create_file("profile_branch", 0444, d_tracer,
  310. (void *)&ftrace_branch_pos,
  311. &tracing_branch_fops);
  312. if (!entry)
  313. pr_warning("Could not create debugfs"
  314. " 'profile_branch' entry\n");
  315. #endif
  316. return 0;
  317. }
  318. device_initcall(ftrace_branch_init);