trace_stat.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * Infrastructure for statistic tracing (histogram output).
  3. *
  4. * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
  5. *
  6. * Based on the code from trace_branch.c which is
  7. * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
  8. *
  9. */
  10. #include <linux/list.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/debugfs.h>
  13. #include "trace.h"
  14. /* List of stat entries from a tracer */
  15. struct trace_stat_list {
  16. struct list_head list;
  17. void *stat;
  18. };
  19. static struct trace_stat_list stat_list;
  20. /*
  21. * This is a copy of the current tracer to avoid racy
  22. * and dangerous output while the current tracer is
  23. * switched.
  24. */
  25. static struct tracer current_tracer;
  26. /*
  27. * Protect both the current tracer and the global
  28. * stat list.
  29. */
  30. static DEFINE_MUTEX(stat_list_mutex);
  31. static void reset_stat_list(void)
  32. {
  33. struct trace_stat_list *node;
  34. struct list_head *next;
  35. if (list_empty(&stat_list.list))
  36. return;
  37. node = list_entry(stat_list.list.next, struct trace_stat_list, list);
  38. next = node->list.next;
  39. while (&node->list != next) {
  40. kfree(node);
  41. node = list_entry(next, struct trace_stat_list, list);
  42. }
  43. kfree(node);
  44. INIT_LIST_HEAD(&stat_list.list);
  45. }
  46. void init_tracer_stat(struct tracer *trace)
  47. {
  48. mutex_lock(&stat_list_mutex);
  49. current_tracer = *trace;
  50. mutex_unlock(&stat_list_mutex);
  51. }
  52. /*
  53. * For tracers that don't provide a stat_cmp callback.
  54. * This one will force an immediate insertion on tail of
  55. * the list.
  56. */
  57. static int dummy_cmp(void *p1, void *p2)
  58. {
  59. return 1;
  60. }
  61. /*
  62. * Initialize the stat list at each trace_stat file opening.
  63. * All of these copies and sorting are required on all opening
  64. * since the stats could have changed between two file sessions.
  65. */
  66. static int stat_seq_init(void)
  67. {
  68. struct trace_stat_list *iter_entry, *new_entry;
  69. void *prev_stat;
  70. int ret = 0;
  71. int i;
  72. mutex_lock(&stat_list_mutex);
  73. reset_stat_list();
  74. if (!current_tracer.stat_start || !current_tracer.stat_next ||
  75. !current_tracer.stat_show)
  76. goto exit;
  77. if (!current_tracer.stat_cmp)
  78. current_tracer.stat_cmp = dummy_cmp;
  79. /*
  80. * The first entry. Actually this is the second, but the first
  81. * one (the stat_list head) is pointless.
  82. */
  83. new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
  84. if (!new_entry) {
  85. ret = -ENOMEM;
  86. goto exit;
  87. }
  88. INIT_LIST_HEAD(&new_entry->list);
  89. list_add(&new_entry->list, &stat_list.list);
  90. new_entry->stat = current_tracer.stat_start();
  91. prev_stat = new_entry->stat;
  92. /*
  93. * Iterate over the tracer stat entries and store them in a sorted
  94. * list.
  95. */
  96. for (i = 1; ; i++) {
  97. new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL);
  98. if (!new_entry) {
  99. ret = -ENOMEM;
  100. goto exit_free_list;
  101. }
  102. INIT_LIST_HEAD(&new_entry->list);
  103. new_entry->stat = current_tracer.stat_next(prev_stat, i);
  104. /* End of insertion */
  105. if (!new_entry->stat)
  106. break;
  107. list_for_each_entry(iter_entry, &stat_list.list, list) {
  108. /* Insertion with a descendent sorting */
  109. if (current_tracer.stat_cmp(new_entry->stat,
  110. iter_entry->stat) > 0) {
  111. list_add_tail(&new_entry->list,
  112. &iter_entry->list);
  113. break;
  114. /* The current smaller value */
  115. } else if (list_is_last(&iter_entry->list,
  116. &stat_list.list)) {
  117. list_add(&new_entry->list, &iter_entry->list);
  118. break;
  119. }
  120. }
  121. prev_stat = new_entry->stat;
  122. }
  123. exit:
  124. mutex_unlock(&stat_list_mutex);
  125. return ret;
  126. exit_free_list:
  127. reset_stat_list();
  128. mutex_unlock(&stat_list_mutex);
  129. return ret;
  130. }
  131. static void *stat_seq_start(struct seq_file *s, loff_t *pos)
  132. {
  133. struct trace_stat_list *l = (struct trace_stat_list *)s->private;
  134. /* Prevent from tracer switch or stat_list modification */
  135. mutex_lock(&stat_list_mutex);
  136. /* If we are in the beginning of the file, print the headers */
  137. if (!*pos && current_tracer.stat_headers)
  138. current_tracer.stat_headers(s);
  139. return seq_list_start(&l->list, *pos);
  140. }
  141. static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos)
  142. {
  143. struct trace_stat_list *l = (struct trace_stat_list *)s->private;
  144. return seq_list_next(p, &l->list, pos);
  145. }
  146. static void stat_seq_stop(struct seq_file *m, void *p)
  147. {
  148. mutex_unlock(&stat_list_mutex);
  149. }
  150. static int stat_seq_show(struct seq_file *s, void *v)
  151. {
  152. struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list);
  153. return current_tracer.stat_show(s, l->stat);
  154. }
  155. static const struct seq_operations trace_stat_seq_ops = {
  156. .start = stat_seq_start,
  157. .next = stat_seq_next,
  158. .stop = stat_seq_stop,
  159. .show = stat_seq_show
  160. };
  161. static int tracing_stat_open(struct inode *inode, struct file *file)
  162. {
  163. int ret;
  164. ret = seq_open(file, &trace_stat_seq_ops);
  165. if (!ret) {
  166. struct seq_file *m = file->private_data;
  167. m->private = &stat_list;
  168. ret = stat_seq_init();
  169. }
  170. return ret;
  171. }
  172. /*
  173. * Avoid consuming memory with our now useless list.
  174. */
  175. static int tracing_stat_release(struct inode *i, struct file *f)
  176. {
  177. mutex_lock(&stat_list_mutex);
  178. reset_stat_list();
  179. mutex_unlock(&stat_list_mutex);
  180. return 0;
  181. }
  182. static const struct file_operations tracing_stat_fops = {
  183. .open = tracing_stat_open,
  184. .read = seq_read,
  185. .llseek = seq_lseek,
  186. .release = tracing_stat_release
  187. };
  188. static int __init tracing_stat_init(void)
  189. {
  190. struct dentry *d_tracing;
  191. struct dentry *entry;
  192. INIT_LIST_HEAD(&stat_list.list);
  193. d_tracing = tracing_init_dentry();
  194. entry = debugfs_create_file("trace_stat", 0444, d_tracing,
  195. NULL,
  196. &tracing_stat_fops);
  197. if (!entry)
  198. pr_warning("Could not create debugfs "
  199. "'trace_stat' entry\n");
  200. return 0;
  201. }
  202. fs_initcall(tracing_stat_init);