trace_unlikely.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /*
  2. * unlikely profiler
  3. *
  4. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  5. */
  6. #include <linux/kallsyms.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/module.h>
  12. #include <linux/ftrace.h>
  13. #include <linux/hash.h>
  14. #include <linux/fs.h>
  15. #include <asm/local.h>
  16. #include "trace.h"
  17. #ifdef CONFIG_UNLIKELY_TRACER
  18. static int unlikely_tracing_enabled __read_mostly;
  19. static DEFINE_MUTEX(unlikely_tracing_mutex);
  20. static struct trace_array *unlikely_tracer;
  21. static void
  22. probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
  23. {
  24. struct trace_array *tr = unlikely_tracer;
  25. struct ring_buffer_event *event;
  26. struct trace_unlikely *entry;
  27. unsigned long flags, irq_flags;
  28. int cpu, pc;
  29. const char *p;
  30. /*
  31. * I would love to save just the ftrace_likely_data pointer, but
  32. * this code can also be used by modules. Ugly things can happen
  33. * if the module is unloaded, and then we go and read the
  34. * pointer. This is slower, but much safer.
  35. */
  36. if (unlikely(!tr))
  37. return;
  38. local_irq_save(flags);
  39. cpu = raw_smp_processor_id();
  40. if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
  41. goto out;
  42. event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
  43. &irq_flags);
  44. if (!event)
  45. goto out;
  46. pc = preempt_count();
  47. entry = ring_buffer_event_data(event);
  48. tracing_generic_entry_update(&entry->ent, flags, pc);
  49. entry->ent.type = TRACE_UNLIKELY;
  50. /* Strip off the path, only save the file */
  51. p = f->file + strlen(f->file);
  52. while (p >= f->file && *p != '/')
  53. p--;
  54. p++;
  55. strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
  56. strncpy(entry->file, p, TRACE_FILE_SIZE);
  57. entry->func[TRACE_FUNC_SIZE] = 0;
  58. entry->file[TRACE_FILE_SIZE] = 0;
  59. entry->line = f->line;
  60. entry->correct = val == expect;
  61. ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
  62. out:
  63. atomic_dec(&tr->data[cpu]->disabled);
  64. local_irq_restore(flags);
  65. }
  66. static inline
  67. void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
  68. {
  69. if (!unlikely_tracing_enabled)
  70. return;
  71. probe_likely_condition(f, val, expect);
  72. }
  73. int enable_unlikely_tracing(struct trace_array *tr)
  74. {
  75. int ret = 0;
  76. mutex_lock(&unlikely_tracing_mutex);
  77. unlikely_tracer = tr;
  78. /*
  79. * Must be seen before enabling. The reader is a condition
  80. * where we do not need a matching rmb()
  81. */
  82. smp_wmb();
  83. unlikely_tracing_enabled++;
  84. mutex_unlock(&unlikely_tracing_mutex);
  85. return ret;
  86. }
  87. void disable_unlikely_tracing(void)
  88. {
  89. mutex_lock(&unlikely_tracing_mutex);
  90. if (!unlikely_tracing_enabled)
  91. goto out_unlock;
  92. unlikely_tracing_enabled--;
  93. out_unlock:
  94. mutex_unlock(&unlikely_tracing_mutex);
  95. }
  96. #else
  97. static inline
  98. void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
  99. {
  100. }
  101. #endif /* CONFIG_UNLIKELY_TRACER */
  102. void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect)
  103. {
  104. /*
  105. * I would love to have a trace point here instead, but the
  106. * trace point code is so inundated with unlikely and likely
  107. * conditions that the recursive nightmare that exists is too
  108. * much to try to get working. At least for now.
  109. */
  110. trace_likely_condition(f, val, expect);
  111. /* FIXME: Make this atomic! */
  112. if (val == expect)
  113. f->correct++;
  114. else
  115. f->incorrect++;
  116. }
  117. EXPORT_SYMBOL(ftrace_likely_update);
  118. struct ftrace_pointer {
  119. void *start;
  120. void *stop;
  121. };
  122. static void *
  123. t_next(struct seq_file *m, void *v, loff_t *pos)
  124. {
  125. struct ftrace_pointer *f = m->private;
  126. struct ftrace_likely_data *p = v;
  127. (*pos)++;
  128. if (v == (void *)1)
  129. return f->start;
  130. ++p;
  131. if ((void *)p >= (void *)f->stop)
  132. return NULL;
  133. return p;
  134. }
  135. static void *t_start(struct seq_file *m, loff_t *pos)
  136. {
  137. void *t = (void *)1;
  138. loff_t l = 0;
  139. for (; t && l < *pos; t = t_next(m, t, &l))
  140. ;
  141. return t;
  142. }
  143. static void t_stop(struct seq_file *m, void *p)
  144. {
  145. }
  146. static int t_show(struct seq_file *m, void *v)
  147. {
  148. struct ftrace_likely_data *p = v;
  149. const char *f;
  150. unsigned long percent;
  151. if (v == (void *)1) {
  152. seq_printf(m, " correct incorrect %% "
  153. " Function "
  154. " File Line\n"
  155. " ------- --------- - "
  156. " -------- "
  157. " ---- ----\n");
  158. return 0;
  159. }
  160. /* Only print the file, not the path */
  161. f = p->file + strlen(p->file);
  162. while (f >= p->file && *f != '/')
  163. f--;
  164. f++;
  165. if (p->correct) {
  166. percent = p->incorrect * 100;
  167. percent /= p->correct + p->incorrect;
  168. } else
  169. percent = p->incorrect ? 100 : 0;
  170. seq_printf(m, "%8lu %8lu %3lu ", p->correct, p->incorrect, percent);
  171. seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
  172. return 0;
  173. }
  174. static struct seq_operations tracing_likely_seq_ops = {
  175. .start = t_start,
  176. .next = t_next,
  177. .stop = t_stop,
  178. .show = t_show,
  179. };
  180. static int tracing_likely_open(struct inode *inode, struct file *file)
  181. {
  182. int ret;
  183. ret = seq_open(file, &tracing_likely_seq_ops);
  184. if (!ret) {
  185. struct seq_file *m = file->private_data;
  186. m->private = (void *)inode->i_private;
  187. }
  188. return ret;
  189. }
  190. static struct file_operations tracing_likely_fops = {
  191. .open = tracing_likely_open,
  192. .read = seq_read,
  193. .llseek = seq_lseek,
  194. };
  195. extern unsigned long __start_likely_profile[];
  196. extern unsigned long __stop_likely_profile[];
  197. extern unsigned long __start_unlikely_profile[];
  198. extern unsigned long __stop_unlikely_profile[];
  199. static struct ftrace_pointer ftrace_likely_pos = {
  200. .start = __start_likely_profile,
  201. .stop = __stop_likely_profile,
  202. };
  203. static struct ftrace_pointer ftrace_unlikely_pos = {
  204. .start = __start_unlikely_profile,
  205. .stop = __stop_unlikely_profile,
  206. };
  207. static __init int ftrace_unlikely_init(void)
  208. {
  209. struct dentry *d_tracer;
  210. struct dentry *entry;
  211. d_tracer = tracing_init_dentry();
  212. entry = debugfs_create_file("profile_likely", 0444, d_tracer,
  213. &ftrace_likely_pos,
  214. &tracing_likely_fops);
  215. if (!entry)
  216. pr_warning("Could not create debugfs 'profile_likely' entry\n");
  217. entry = debugfs_create_file("profile_unlikely", 0444, d_tracer,
  218. &ftrace_unlikely_pos,
  219. &tracing_likely_fops);
  220. if (!entry)
  221. pr_warning("Could not create debugfs"
  222. " 'profile_unlikely' entry\n");
  223. return 0;
  224. }
  225. device_initcall(ftrace_unlikely_init);