trace_events.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/debugfs.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/module.h>
  10. #include <linux/ctype.h>
  11. #include "trace_events.h"
  12. #define events_for_each(event) \
  13. for (event = __start_ftrace_events; \
  14. (unsigned long)event < (unsigned long)__stop_ftrace_events; \
  15. event++)
  16. void event_trace_printk(unsigned long ip, const char *fmt, ...)
  17. {
  18. va_list ap;
  19. va_start(ap, fmt);
  20. tracing_record_cmdline(current);
  21. trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
  22. va_end(ap);
  23. }
  24. static void ftrace_clear_events(void)
  25. {
  26. struct ftrace_event_call *call = (void *)__start_ftrace_events;
  27. while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
  28. if (call->enabled) {
  29. call->enabled = 0;
  30. call->unregfunc();
  31. }
  32. call++;
  33. }
  34. }
  35. static int ftrace_set_clr_event(char *buf, int set)
  36. {
  37. struct ftrace_event_call *call = __start_ftrace_events;
  38. events_for_each(call) {
  39. if (!call->name)
  40. continue;
  41. if (strcmp(buf, call->name) != 0)
  42. continue;
  43. if (set) {
  44. /* Already set? */
  45. if (call->enabled)
  46. return 0;
  47. call->enabled = 1;
  48. call->regfunc();
  49. } else {
  50. /* Already cleared? */
  51. if (!call->enabled)
  52. return 0;
  53. call->enabled = 0;
  54. call->unregfunc();
  55. }
  56. return 0;
  57. }
  58. return -EINVAL;
  59. }
  60. /* 128 should be much more than enough */
  61. #define EVENT_BUF_SIZE 127
  62. static ssize_t
  63. ftrace_event_write(struct file *file, const char __user *ubuf,
  64. size_t cnt, loff_t *ppos)
  65. {
  66. size_t read = 0;
  67. int i, set = 1;
  68. ssize_t ret;
  69. char *buf;
  70. char ch;
  71. if (!cnt || cnt < 0)
  72. return 0;
  73. ret = get_user(ch, ubuf++);
  74. if (ret)
  75. return ret;
  76. read++;
  77. cnt--;
  78. /* skip white space */
  79. while (cnt && isspace(ch)) {
  80. ret = get_user(ch, ubuf++);
  81. if (ret)
  82. return ret;
  83. read++;
  84. cnt--;
  85. }
  86. /* Only white space found? */
  87. if (isspace(ch)) {
  88. file->f_pos += read;
  89. ret = read;
  90. return ret;
  91. }
  92. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  93. if (!buf)
  94. return -ENOMEM;
  95. if (cnt > EVENT_BUF_SIZE)
  96. cnt = EVENT_BUF_SIZE;
  97. i = 0;
  98. while (cnt && !isspace(ch)) {
  99. if (!i && ch == '!')
  100. set = 0;
  101. else
  102. buf[i++] = ch;
  103. ret = get_user(ch, ubuf++);
  104. if (ret)
  105. goto out_free;
  106. read++;
  107. cnt--;
  108. }
  109. buf[i] = 0;
  110. file->f_pos += read;
  111. ret = ftrace_set_clr_event(buf, set);
  112. if (ret)
  113. goto out_free;
  114. ret = read;
  115. out_free:
  116. kfree(buf);
  117. return ret;
  118. }
  119. static void *
  120. t_next(struct seq_file *m, void *v, loff_t *pos)
  121. {
  122. struct ftrace_event_call *call = m->private;
  123. struct ftrace_event_call *next = call;
  124. (*pos)++;
  125. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  126. return NULL;
  127. m->private = ++next;
  128. return call;
  129. }
  130. static void *t_start(struct seq_file *m, loff_t *pos)
  131. {
  132. return t_next(m, NULL, pos);
  133. }
  134. static void *
  135. s_next(struct seq_file *m, void *v, loff_t *pos)
  136. {
  137. struct ftrace_event_call *call = m->private;
  138. struct ftrace_event_call *next;
  139. (*pos)++;
  140. retry:
  141. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  142. return NULL;
  143. if (!call->enabled) {
  144. call++;
  145. goto retry;
  146. }
  147. next = call;
  148. m->private = ++next;
  149. return call;
  150. }
  151. static void *s_start(struct seq_file *m, loff_t *pos)
  152. {
  153. return s_next(m, NULL, pos);
  154. }
  155. static int t_show(struct seq_file *m, void *v)
  156. {
  157. struct ftrace_event_call *call = v;
  158. seq_printf(m, "%s\n", call->name);
  159. return 0;
  160. }
  161. static void t_stop(struct seq_file *m, void *p)
  162. {
  163. }
  164. static int
  165. ftrace_event_seq_open(struct inode *inode, struct file *file)
  166. {
  167. int ret;
  168. const struct seq_operations *seq_ops;
  169. if ((file->f_mode & FMODE_WRITE) &&
  170. !(file->f_flags & O_APPEND))
  171. ftrace_clear_events();
  172. seq_ops = inode->i_private;
  173. ret = seq_open(file, seq_ops);
  174. if (!ret) {
  175. struct seq_file *m = file->private_data;
  176. m->private = __start_ftrace_events;
  177. }
  178. return ret;
  179. }
  180. static ssize_t
  181. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  182. loff_t *ppos)
  183. {
  184. struct ftrace_event_call *call = filp->private_data;
  185. char *buf;
  186. if (call->enabled)
  187. buf = "1\n";
  188. else
  189. buf = "0\n";
  190. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  191. }
  192. static ssize_t
  193. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  194. loff_t *ppos)
  195. {
  196. struct ftrace_event_call *call = filp->private_data;
  197. char buf[64];
  198. unsigned long val;
  199. int ret;
  200. if (cnt >= sizeof(buf))
  201. return -EINVAL;
  202. if (copy_from_user(&buf, ubuf, cnt))
  203. return -EFAULT;
  204. buf[cnt] = 0;
  205. ret = strict_strtoul(buf, 10, &val);
  206. if (ret < 0)
  207. return ret;
  208. switch (val) {
  209. case 0:
  210. if (!call->enabled)
  211. break;
  212. call->enabled = 0;
  213. call->unregfunc();
  214. break;
  215. case 1:
  216. if (call->enabled)
  217. break;
  218. call->enabled = 1;
  219. call->regfunc();
  220. break;
  221. default:
  222. return -EINVAL;
  223. }
  224. *ppos += cnt;
  225. return cnt;
  226. }
  227. static const struct seq_operations show_event_seq_ops = {
  228. .start = t_start,
  229. .next = t_next,
  230. .show = t_show,
  231. .stop = t_stop,
  232. };
  233. static const struct seq_operations show_set_event_seq_ops = {
  234. .start = s_start,
  235. .next = s_next,
  236. .show = t_show,
  237. .stop = t_stop,
  238. };
  239. static const struct file_operations ftrace_avail_fops = {
  240. .open = ftrace_event_seq_open,
  241. .read = seq_read,
  242. .llseek = seq_lseek,
  243. .release = seq_release,
  244. };
  245. static const struct file_operations ftrace_set_event_fops = {
  246. .open = ftrace_event_seq_open,
  247. .read = seq_read,
  248. .write = ftrace_event_write,
  249. .llseek = seq_lseek,
  250. .release = seq_release,
  251. };
  252. static const struct file_operations ftrace_enable_fops = {
  253. .open = tracing_open_generic,
  254. .read = event_enable_read,
  255. .write = event_enable_write,
  256. };
  257. static struct dentry *event_trace_events_dir(void)
  258. {
  259. static struct dentry *d_tracer;
  260. static struct dentry *d_events;
  261. if (d_events)
  262. return d_events;
  263. d_tracer = tracing_init_dentry();
  264. if (!d_tracer)
  265. return NULL;
  266. d_events = debugfs_create_dir("events", d_tracer);
  267. if (!d_events)
  268. pr_warning("Could not create debugfs "
  269. "'events' directory\n");
  270. return d_events;
  271. }
  272. static int
  273. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
  274. {
  275. struct dentry *entry;
  276. call->dir = debugfs_create_dir(call->name, d_events);
  277. if (!call->dir) {
  278. pr_warning("Could not create debugfs "
  279. "'%s' directory\n", call->name);
  280. return -1;
  281. }
  282. entry = debugfs_create_file("enable", 0644, call->dir, call,
  283. &ftrace_enable_fops);
  284. if (!entry)
  285. pr_warning("Could not create debugfs "
  286. "'%s/enable' entry\n", call->name);
  287. return 0;
  288. }
  289. static __init int event_trace_init(void)
  290. {
  291. struct ftrace_event_call *call = __start_ftrace_events;
  292. struct dentry *d_tracer;
  293. struct dentry *entry;
  294. struct dentry *d_events;
  295. d_tracer = tracing_init_dentry();
  296. if (!d_tracer)
  297. return 0;
  298. entry = debugfs_create_file("available_events", 0444, d_tracer,
  299. (void *)&show_event_seq_ops,
  300. &ftrace_avail_fops);
  301. if (!entry)
  302. pr_warning("Could not create debugfs "
  303. "'available_events' entry\n");
  304. entry = debugfs_create_file("set_event", 0644, d_tracer,
  305. (void *)&show_set_event_seq_ops,
  306. &ftrace_set_event_fops);
  307. if (!entry)
  308. pr_warning("Could not create debugfs "
  309. "'set_event' entry\n");
  310. d_events = event_trace_events_dir();
  311. if (!d_events)
  312. return 0;
  313. events_for_each(call) {
  314. /* The linker may leave blanks */
  315. if (!call->name)
  316. continue;
  317. event_create_dir(call, d_events);
  318. }
  319. return 0;
  320. }
  321. fs_initcall(event_trace_init);