trace_events.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/debugfs.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/module.h>
  10. #include <linux/ctype.h>
  11. #include "trace_events.h"
  12. void event_trace_printk(unsigned long ip, const char *fmt, ...)
  13. {
  14. va_list ap;
  15. va_start(ap, fmt);
  16. tracing_record_cmdline(current);
  17. trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
  18. va_end(ap);
  19. }
  20. static void ftrace_clear_events(void)
  21. {
  22. struct ftrace_event_call *call = (void *)__start_ftrace_events;
  23. while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
  24. if (call->enabled) {
  25. call->enabled = 0;
  26. call->unregfunc();
  27. }
  28. call++;
  29. }
  30. }
  31. static int ftrace_set_clr_event(char *buf, int set)
  32. {
  33. struct ftrace_event_call *call = (void *)__start_ftrace_events;
  34. while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
  35. if (strcmp(buf, call->name) != 0) {
  36. call++;
  37. continue;
  38. }
  39. if (set) {
  40. /* Already set? */
  41. if (call->enabled)
  42. return 0;
  43. call->enabled = 1;
  44. call->regfunc();
  45. } else {
  46. /* Already cleared? */
  47. if (!call->enabled)
  48. return 0;
  49. call->enabled = 0;
  50. call->unregfunc();
  51. }
  52. return 0;
  53. }
  54. return -EINVAL;
  55. }
  56. /* 128 should be much more than enough */
  57. #define EVENT_BUF_SIZE 127
  58. static ssize_t
  59. ftrace_event_write(struct file *file, const char __user *ubuf,
  60. size_t cnt, loff_t *ppos)
  61. {
  62. size_t read = 0;
  63. int i, set = 1;
  64. ssize_t ret;
  65. char *buf;
  66. char ch;
  67. if (!cnt || cnt < 0)
  68. return 0;
  69. ret = get_user(ch, ubuf++);
  70. if (ret)
  71. return ret;
  72. read++;
  73. cnt--;
  74. /* skip white space */
  75. while (cnt && isspace(ch)) {
  76. ret = get_user(ch, ubuf++);
  77. if (ret)
  78. return ret;
  79. read++;
  80. cnt--;
  81. }
  82. /* Only white space found? */
  83. if (isspace(ch)) {
  84. file->f_pos += read;
  85. ret = read;
  86. return ret;
  87. }
  88. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  89. if (!buf)
  90. return -ENOMEM;
  91. if (cnt > EVENT_BUF_SIZE)
  92. cnt = EVENT_BUF_SIZE;
  93. i = 0;
  94. while (cnt && !isspace(ch)) {
  95. if (!i && ch == '!')
  96. set = 0;
  97. else
  98. buf[i++] = ch;
  99. ret = get_user(ch, ubuf++);
  100. if (ret)
  101. goto out_free;
  102. read++;
  103. cnt--;
  104. }
  105. buf[i] = 0;
  106. file->f_pos += read;
  107. ret = ftrace_set_clr_event(buf, set);
  108. if (ret)
  109. goto out_free;
  110. ret = read;
  111. out_free:
  112. kfree(buf);
  113. return ret;
  114. }
  115. static void *
  116. t_next(struct seq_file *m, void *v, loff_t *pos)
  117. {
  118. struct ftrace_event_call *call = m->private;
  119. struct ftrace_event_call *next = call;
  120. (*pos)++;
  121. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  122. return NULL;
  123. m->private = ++next;
  124. return call;
  125. }
  126. static void *t_start(struct seq_file *m, loff_t *pos)
  127. {
  128. return t_next(m, NULL, pos);
  129. }
  130. static void *
  131. s_next(struct seq_file *m, void *v, loff_t *pos)
  132. {
  133. struct ftrace_event_call *call = m->private;
  134. struct ftrace_event_call *next;
  135. (*pos)++;
  136. retry:
  137. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  138. return NULL;
  139. if (!call->enabled) {
  140. call++;
  141. goto retry;
  142. }
  143. next = call;
  144. m->private = ++next;
  145. return call;
  146. }
  147. static void *s_start(struct seq_file *m, loff_t *pos)
  148. {
  149. return s_next(m, NULL, pos);
  150. }
  151. static int t_show(struct seq_file *m, void *v)
  152. {
  153. struct ftrace_event_call *call = v;
  154. seq_printf(m, "%s\n", call->name);
  155. return 0;
  156. }
  157. static void t_stop(struct seq_file *m, void *p)
  158. {
  159. }
  160. static int
  161. ftrace_event_seq_open(struct inode *inode, struct file *file)
  162. {
  163. int ret;
  164. const struct seq_operations *seq_ops;
  165. if ((file->f_mode & FMODE_WRITE) &&
  166. !(file->f_flags & O_APPEND))
  167. ftrace_clear_events();
  168. seq_ops = inode->i_private;
  169. ret = seq_open(file, seq_ops);
  170. if (!ret) {
  171. struct seq_file *m = file->private_data;
  172. m->private = __start_ftrace_events;
  173. }
  174. return ret;
  175. }
  176. static const struct seq_operations show_event_seq_ops = {
  177. .start = t_start,
  178. .next = t_next,
  179. .show = t_show,
  180. .stop = t_stop,
  181. };
  182. static const struct seq_operations show_set_event_seq_ops = {
  183. .start = s_start,
  184. .next = s_next,
  185. .show = t_show,
  186. .stop = t_stop,
  187. };
  188. static const struct file_operations ftrace_avail_fops = {
  189. .open = ftrace_event_seq_open,
  190. .read = seq_read,
  191. .llseek = seq_lseek,
  192. .release = seq_release,
  193. };
  194. static const struct file_operations ftrace_set_event_fops = {
  195. .open = ftrace_event_seq_open,
  196. .read = seq_read,
  197. .write = ftrace_event_write,
  198. .llseek = seq_lseek,
  199. .release = seq_release,
  200. };
  201. static __init int event_trace_init(void)
  202. {
  203. struct dentry *d_tracer;
  204. struct dentry *entry;
  205. d_tracer = tracing_init_dentry();
  206. if (!d_tracer)
  207. return 0;
  208. entry = debugfs_create_file("available_events", 0444, d_tracer,
  209. (void *)&show_event_seq_ops,
  210. &ftrace_avail_fops);
  211. if (!entry)
  212. pr_warning("Could not create debugfs "
  213. "'available_events' entry\n");
  214. entry = debugfs_create_file("set_event", 0644, d_tracer,
  215. (void *)&show_set_event_seq_ops,
  216. &ftrace_set_event_fops);
  217. if (!entry)
  218. pr_warning("Could not create debugfs "
  219. "'set_event' entry\n");
  220. return 0;
  221. }
  222. fs_initcall(event_trace_init);