trace_events.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/module.h>
  13. #include <linux/ctype.h>
  14. #include "trace_output.h"
  15. #define TRACE_SYSTEM "TRACE_SYSTEM"
  16. static DEFINE_MUTEX(event_mutex);
  17. #define events_for_each(event) \
  18. for (event = __start_ftrace_events; \
  19. (unsigned long)event < (unsigned long)__stop_ftrace_events; \
  20. event++)
  21. void event_trace_printk(unsigned long ip, const char *fmt, ...)
  22. {
  23. va_list ap;
  24. va_start(ap, fmt);
  25. tracing_record_cmdline(current);
  26. trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap);
  27. va_end(ap);
  28. }
  29. static void ftrace_clear_events(void)
  30. {
  31. struct ftrace_event_call *call = (void *)__start_ftrace_events;
  32. while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
  33. if (call->enabled) {
  34. call->enabled = 0;
  35. call->unregfunc();
  36. }
  37. call++;
  38. }
  39. }
  40. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  41. int enable)
  42. {
  43. switch (enable) {
  44. case 0:
  45. if (call->enabled) {
  46. call->enabled = 0;
  47. call->unregfunc();
  48. }
  49. break;
  50. case 1:
  51. if (!call->enabled) {
  52. call->enabled = 1;
  53. call->regfunc();
  54. }
  55. break;
  56. }
  57. }
  58. static int ftrace_set_clr_event(char *buf, int set)
  59. {
  60. struct ftrace_event_call *call = __start_ftrace_events;
  61. char *event = NULL, *sub = NULL, *match;
  62. int ret = -EINVAL;
  63. /*
  64. * The buf format can be <subsystem>:<event-name>
  65. * *:<event-name> means any event by that name.
  66. * :<event-name> is the same.
  67. *
  68. * <subsystem>:* means all events in that subsystem
  69. * <subsystem>: means the same.
  70. *
  71. * <name> (no ':') means all events in a subsystem with
  72. * the name <name> or any event that matches <name>
  73. */
  74. match = strsep(&buf, ":");
  75. if (buf) {
  76. sub = match;
  77. event = buf;
  78. match = NULL;
  79. if (!strlen(sub) || strcmp(sub, "*") == 0)
  80. sub = NULL;
  81. if (!strlen(event) || strcmp(event, "*") == 0)
  82. event = NULL;
  83. }
  84. mutex_lock(&event_mutex);
  85. events_for_each(call) {
  86. if (!call->name || !call->regfunc)
  87. continue;
  88. if (match &&
  89. strcmp(match, call->name) != 0 &&
  90. strcmp(match, call->system) != 0)
  91. continue;
  92. if (sub && strcmp(sub, call->system) != 0)
  93. continue;
  94. if (event && strcmp(event, call->name) != 0)
  95. continue;
  96. ftrace_event_enable_disable(call, set);
  97. ret = 0;
  98. }
  99. mutex_unlock(&event_mutex);
  100. return ret;
  101. }
  102. /* 128 should be much more than enough */
  103. #define EVENT_BUF_SIZE 127
  104. static ssize_t
  105. ftrace_event_write(struct file *file, const char __user *ubuf,
  106. size_t cnt, loff_t *ppos)
  107. {
  108. size_t read = 0;
  109. int i, set = 1;
  110. ssize_t ret;
  111. char *buf;
  112. char ch;
  113. if (!cnt || cnt < 0)
  114. return 0;
  115. ret = tracing_update_buffers();
  116. if (ret < 0)
  117. return ret;
  118. ret = get_user(ch, ubuf++);
  119. if (ret)
  120. return ret;
  121. read++;
  122. cnt--;
  123. /* skip white space */
  124. while (cnt && isspace(ch)) {
  125. ret = get_user(ch, ubuf++);
  126. if (ret)
  127. return ret;
  128. read++;
  129. cnt--;
  130. }
  131. /* Only white space found? */
  132. if (isspace(ch)) {
  133. file->f_pos += read;
  134. ret = read;
  135. return ret;
  136. }
  137. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  138. if (!buf)
  139. return -ENOMEM;
  140. if (cnt > EVENT_BUF_SIZE)
  141. cnt = EVENT_BUF_SIZE;
  142. i = 0;
  143. while (cnt && !isspace(ch)) {
  144. if (!i && ch == '!')
  145. set = 0;
  146. else
  147. buf[i++] = ch;
  148. ret = get_user(ch, ubuf++);
  149. if (ret)
  150. goto out_free;
  151. read++;
  152. cnt--;
  153. }
  154. buf[i] = 0;
  155. file->f_pos += read;
  156. ret = ftrace_set_clr_event(buf, set);
  157. if (ret)
  158. goto out_free;
  159. ret = read;
  160. out_free:
  161. kfree(buf);
  162. return ret;
  163. }
  164. static void *
  165. t_next(struct seq_file *m, void *v, loff_t *pos)
  166. {
  167. struct ftrace_event_call *call = m->private;
  168. struct ftrace_event_call *next = call;
  169. (*pos)++;
  170. for (;;) {
  171. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  172. return NULL;
  173. /*
  174. * The ftrace subsystem is for showing formats only.
  175. * They can not be enabled or disabled via the event files.
  176. */
  177. if (call->regfunc)
  178. break;
  179. call++;
  180. next = call;
  181. }
  182. m->private = ++next;
  183. return call;
  184. }
  185. static void *t_start(struct seq_file *m, loff_t *pos)
  186. {
  187. return t_next(m, NULL, pos);
  188. }
  189. static void *
  190. s_next(struct seq_file *m, void *v, loff_t *pos)
  191. {
  192. struct ftrace_event_call *call = m->private;
  193. struct ftrace_event_call *next;
  194. (*pos)++;
  195. retry:
  196. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  197. return NULL;
  198. if (!call->enabled) {
  199. call++;
  200. goto retry;
  201. }
  202. next = call;
  203. m->private = ++next;
  204. return call;
  205. }
  206. static void *s_start(struct seq_file *m, loff_t *pos)
  207. {
  208. return s_next(m, NULL, pos);
  209. }
  210. static int t_show(struct seq_file *m, void *v)
  211. {
  212. struct ftrace_event_call *call = v;
  213. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  214. seq_printf(m, "%s:", call->system);
  215. seq_printf(m, "%s\n", call->name);
  216. return 0;
  217. }
  218. static void t_stop(struct seq_file *m, void *p)
  219. {
  220. }
  221. static int
  222. ftrace_event_seq_open(struct inode *inode, struct file *file)
  223. {
  224. int ret;
  225. const struct seq_operations *seq_ops;
  226. if ((file->f_mode & FMODE_WRITE) &&
  227. !(file->f_flags & O_APPEND))
  228. ftrace_clear_events();
  229. seq_ops = inode->i_private;
  230. ret = seq_open(file, seq_ops);
  231. if (!ret) {
  232. struct seq_file *m = file->private_data;
  233. m->private = __start_ftrace_events;
  234. }
  235. return ret;
  236. }
  237. static ssize_t
  238. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  239. loff_t *ppos)
  240. {
  241. struct ftrace_event_call *call = filp->private_data;
  242. char *buf;
  243. if (call->enabled)
  244. buf = "1\n";
  245. else
  246. buf = "0\n";
  247. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  248. }
  249. static ssize_t
  250. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  251. loff_t *ppos)
  252. {
  253. struct ftrace_event_call *call = filp->private_data;
  254. char buf[64];
  255. unsigned long val;
  256. int ret;
  257. if (cnt >= sizeof(buf))
  258. return -EINVAL;
  259. if (copy_from_user(&buf, ubuf, cnt))
  260. return -EFAULT;
  261. buf[cnt] = 0;
  262. ret = strict_strtoul(buf, 10, &val);
  263. if (ret < 0)
  264. return ret;
  265. ret = tracing_update_buffers();
  266. if (ret < 0)
  267. return ret;
  268. switch (val) {
  269. case 0:
  270. case 1:
  271. mutex_lock(&event_mutex);
  272. ftrace_event_enable_disable(call, val);
  273. mutex_unlock(&event_mutex);
  274. break;
  275. default:
  276. return -EINVAL;
  277. }
  278. *ppos += cnt;
  279. return cnt;
  280. }
  281. #undef FIELD
  282. #define FIELD(type, name) \
  283. #type, #name, offsetof(typeof(field), name), sizeof(field.name)
  284. static int trace_write_header(struct trace_seq *s)
  285. {
  286. struct trace_entry field;
  287. /* struct trace_entry */
  288. return trace_seq_printf(s,
  289. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  290. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  291. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  292. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  293. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  294. "\n",
  295. FIELD(unsigned char, type),
  296. FIELD(unsigned char, flags),
  297. FIELD(unsigned char, preempt_count),
  298. FIELD(int, pid),
  299. FIELD(int, tgid));
  300. }
  301. static ssize_t
  302. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  303. loff_t *ppos)
  304. {
  305. struct ftrace_event_call *call = filp->private_data;
  306. struct trace_seq *s;
  307. char *buf;
  308. int r;
  309. s = kmalloc(sizeof(*s), GFP_KERNEL);
  310. if (!s)
  311. return -ENOMEM;
  312. trace_seq_init(s);
  313. if (*ppos)
  314. return 0;
  315. /* If any of the first writes fail, so will the show_format. */
  316. trace_seq_printf(s, "name: %s\n", call->name);
  317. trace_seq_printf(s, "ID: %d\n", call->id);
  318. trace_seq_printf(s, "format:\n");
  319. trace_write_header(s);
  320. r = call->show_format(s);
  321. if (!r) {
  322. /*
  323. * ug! The format output is bigger than a PAGE!!
  324. */
  325. buf = "FORMAT TOO BIG\n";
  326. r = simple_read_from_buffer(ubuf, cnt, ppos,
  327. buf, strlen(buf));
  328. goto out;
  329. }
  330. r = simple_read_from_buffer(ubuf, cnt, ppos,
  331. s->buffer, s->len);
  332. out:
  333. kfree(s);
  334. return r;
  335. }
  336. static const struct seq_operations show_event_seq_ops = {
  337. .start = t_start,
  338. .next = t_next,
  339. .show = t_show,
  340. .stop = t_stop,
  341. };
  342. static const struct seq_operations show_set_event_seq_ops = {
  343. .start = s_start,
  344. .next = s_next,
  345. .show = t_show,
  346. .stop = t_stop,
  347. };
  348. static const struct file_operations ftrace_avail_fops = {
  349. .open = ftrace_event_seq_open,
  350. .read = seq_read,
  351. .llseek = seq_lseek,
  352. .release = seq_release,
  353. };
  354. static const struct file_operations ftrace_set_event_fops = {
  355. .open = ftrace_event_seq_open,
  356. .read = seq_read,
  357. .write = ftrace_event_write,
  358. .llseek = seq_lseek,
  359. .release = seq_release,
  360. };
  361. static const struct file_operations ftrace_enable_fops = {
  362. .open = tracing_open_generic,
  363. .read = event_enable_read,
  364. .write = event_enable_write,
  365. };
  366. static const struct file_operations ftrace_event_format_fops = {
  367. .open = tracing_open_generic,
  368. .read = event_format_read,
  369. };
  370. static struct dentry *event_trace_events_dir(void)
  371. {
  372. static struct dentry *d_tracer;
  373. static struct dentry *d_events;
  374. if (d_events)
  375. return d_events;
  376. d_tracer = tracing_init_dentry();
  377. if (!d_tracer)
  378. return NULL;
  379. d_events = debugfs_create_dir("events", d_tracer);
  380. if (!d_events)
  381. pr_warning("Could not create debugfs "
  382. "'events' directory\n");
  383. return d_events;
  384. }
  385. struct event_subsystem {
  386. struct list_head list;
  387. const char *name;
  388. struct dentry *entry;
  389. };
  390. static LIST_HEAD(event_subsystems);
  391. static struct dentry *
  392. event_subsystem_dir(const char *name, struct dentry *d_events)
  393. {
  394. struct event_subsystem *system;
  395. /* First see if we did not already create this dir */
  396. list_for_each_entry(system, &event_subsystems, list) {
  397. if (strcmp(system->name, name) == 0)
  398. return system->entry;
  399. }
  400. /* need to create new entry */
  401. system = kmalloc(sizeof(*system), GFP_KERNEL);
  402. if (!system) {
  403. pr_warning("No memory to create event subsystem %s\n",
  404. name);
  405. return d_events;
  406. }
  407. system->entry = debugfs_create_dir(name, d_events);
  408. if (!system->entry) {
  409. pr_warning("Could not create event subsystem %s\n",
  410. name);
  411. kfree(system);
  412. return d_events;
  413. }
  414. system->name = name;
  415. list_add(&system->list, &event_subsystems);
  416. return system->entry;
  417. }
  418. static int
  419. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
  420. {
  421. struct dentry *entry;
  422. int ret;
  423. /*
  424. * If the trace point header did not define TRACE_SYSTEM
  425. * then the system would be called "TRACE_SYSTEM".
  426. */
  427. if (strcmp(call->system, "TRACE_SYSTEM") != 0)
  428. d_events = event_subsystem_dir(call->system, d_events);
  429. if (call->raw_init) {
  430. ret = call->raw_init();
  431. if (ret < 0) {
  432. pr_warning("Could not initialize trace point"
  433. " events/%s\n", call->name);
  434. return ret;
  435. }
  436. }
  437. call->dir = debugfs_create_dir(call->name, d_events);
  438. if (!call->dir) {
  439. pr_warning("Could not create debugfs "
  440. "'%s' directory\n", call->name);
  441. return -1;
  442. }
  443. if (call->regfunc) {
  444. entry = debugfs_create_file("enable", 0644, call->dir, call,
  445. &ftrace_enable_fops);
  446. if (!entry)
  447. pr_warning("Could not create debugfs "
  448. "'%s/enable' entry\n", call->name);
  449. }
  450. /* A trace may not want to export its format */
  451. if (!call->show_format)
  452. return 0;
  453. entry = debugfs_create_file("format", 0444, call->dir, call,
  454. &ftrace_event_format_fops);
  455. if (!entry)
  456. pr_warning("Could not create debugfs "
  457. "'%s/format' entry\n", call->name);
  458. return 0;
  459. }
  460. static __init int event_trace_init(void)
  461. {
  462. struct ftrace_event_call *call = __start_ftrace_events;
  463. struct dentry *d_tracer;
  464. struct dentry *entry;
  465. struct dentry *d_events;
  466. d_tracer = tracing_init_dentry();
  467. if (!d_tracer)
  468. return 0;
  469. entry = debugfs_create_file("available_events", 0444, d_tracer,
  470. (void *)&show_event_seq_ops,
  471. &ftrace_avail_fops);
  472. if (!entry)
  473. pr_warning("Could not create debugfs "
  474. "'available_events' entry\n");
  475. entry = debugfs_create_file("set_event", 0644, d_tracer,
  476. (void *)&show_set_event_seq_ops,
  477. &ftrace_set_event_fops);
  478. if (!entry)
  479. pr_warning("Could not create debugfs "
  480. "'set_event' entry\n");
  481. d_events = event_trace_events_dir();
  482. if (!d_events)
  483. return 0;
  484. events_for_each(call) {
  485. /* The linker may leave blanks */
  486. if (!call->name)
  487. continue;
  488. event_create_dir(call, d_events);
  489. }
  490. return 0;
  491. }
  492. fs_initcall(event_trace_init);