trace_events.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/module.h>
  13. #include <linux/ctype.h>
  14. #include "trace_output.h"
  15. #define TRACE_SYSTEM "TRACE_SYSTEM"
  16. static DEFINE_MUTEX(event_mutex);
  17. int trace_define_field(struct ftrace_event_call *call, char *type,
  18. char *name, int offset, int size)
  19. {
  20. struct ftrace_event_field *field;
  21. field = kzalloc(sizeof(*field), GFP_KERNEL);
  22. if (!field)
  23. goto err;
  24. field->name = kstrdup(name, GFP_KERNEL);
  25. if (!field->name)
  26. goto err;
  27. field->type = kstrdup(type, GFP_KERNEL);
  28. if (!field->type)
  29. goto err;
  30. field->offset = offset;
  31. field->size = size;
  32. list_add(&field->link, &call->fields);
  33. return 0;
  34. err:
  35. if (field) {
  36. kfree(field->name);
  37. kfree(field->type);
  38. }
  39. kfree(field);
  40. return -ENOMEM;
  41. }
  42. static void ftrace_clear_events(void)
  43. {
  44. struct ftrace_event_call *call = (void *)__start_ftrace_events;
  45. while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
  46. if (call->enabled) {
  47. call->enabled = 0;
  48. call->unregfunc();
  49. }
  50. call++;
  51. }
  52. }
  53. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  54. int enable)
  55. {
  56. switch (enable) {
  57. case 0:
  58. if (call->enabled) {
  59. call->enabled = 0;
  60. call->unregfunc();
  61. }
  62. break;
  63. case 1:
  64. if (!call->enabled) {
  65. call->enabled = 1;
  66. call->regfunc();
  67. }
  68. break;
  69. }
  70. }
  71. static int ftrace_set_clr_event(char *buf, int set)
  72. {
  73. struct ftrace_event_call *call = __start_ftrace_events;
  74. char *event = NULL, *sub = NULL, *match;
  75. int ret = -EINVAL;
  76. /*
  77. * The buf format can be <subsystem>:<event-name>
  78. * *:<event-name> means any event by that name.
  79. * :<event-name> is the same.
  80. *
  81. * <subsystem>:* means all events in that subsystem
  82. * <subsystem>: means the same.
  83. *
  84. * <name> (no ':') means all events in a subsystem with
  85. * the name <name> or any event that matches <name>
  86. */
  87. match = strsep(&buf, ":");
  88. if (buf) {
  89. sub = match;
  90. event = buf;
  91. match = NULL;
  92. if (!strlen(sub) || strcmp(sub, "*") == 0)
  93. sub = NULL;
  94. if (!strlen(event) || strcmp(event, "*") == 0)
  95. event = NULL;
  96. }
  97. mutex_lock(&event_mutex);
  98. for_each_event(call) {
  99. if (!call->name || !call->regfunc)
  100. continue;
  101. if (match &&
  102. strcmp(match, call->name) != 0 &&
  103. strcmp(match, call->system) != 0)
  104. continue;
  105. if (sub && strcmp(sub, call->system) != 0)
  106. continue;
  107. if (event && strcmp(event, call->name) != 0)
  108. continue;
  109. ftrace_event_enable_disable(call, set);
  110. ret = 0;
  111. }
  112. mutex_unlock(&event_mutex);
  113. return ret;
  114. }
  115. /* 128 should be much more than enough */
  116. #define EVENT_BUF_SIZE 127
  117. static ssize_t
  118. ftrace_event_write(struct file *file, const char __user *ubuf,
  119. size_t cnt, loff_t *ppos)
  120. {
  121. size_t read = 0;
  122. int i, set = 1;
  123. ssize_t ret;
  124. char *buf;
  125. char ch;
  126. if (!cnt || cnt < 0)
  127. return 0;
  128. ret = tracing_update_buffers();
  129. if (ret < 0)
  130. return ret;
  131. ret = get_user(ch, ubuf++);
  132. if (ret)
  133. return ret;
  134. read++;
  135. cnt--;
  136. /* skip white space */
  137. while (cnt && isspace(ch)) {
  138. ret = get_user(ch, ubuf++);
  139. if (ret)
  140. return ret;
  141. read++;
  142. cnt--;
  143. }
  144. /* Only white space found? */
  145. if (isspace(ch)) {
  146. file->f_pos += read;
  147. ret = read;
  148. return ret;
  149. }
  150. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  151. if (!buf)
  152. return -ENOMEM;
  153. if (cnt > EVENT_BUF_SIZE)
  154. cnt = EVENT_BUF_SIZE;
  155. i = 0;
  156. while (cnt && !isspace(ch)) {
  157. if (!i && ch == '!')
  158. set = 0;
  159. else
  160. buf[i++] = ch;
  161. ret = get_user(ch, ubuf++);
  162. if (ret)
  163. goto out_free;
  164. read++;
  165. cnt--;
  166. }
  167. buf[i] = 0;
  168. file->f_pos += read;
  169. ret = ftrace_set_clr_event(buf, set);
  170. if (ret)
  171. goto out_free;
  172. ret = read;
  173. out_free:
  174. kfree(buf);
  175. return ret;
  176. }
  177. static void *
  178. t_next(struct seq_file *m, void *v, loff_t *pos)
  179. {
  180. struct ftrace_event_call *call = m->private;
  181. struct ftrace_event_call *next = call;
  182. (*pos)++;
  183. for (;;) {
  184. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  185. return NULL;
  186. /*
  187. * The ftrace subsystem is for showing formats only.
  188. * They can not be enabled or disabled via the event files.
  189. */
  190. if (call->regfunc)
  191. break;
  192. call++;
  193. next = call;
  194. }
  195. m->private = ++next;
  196. return call;
  197. }
  198. static void *t_start(struct seq_file *m, loff_t *pos)
  199. {
  200. return t_next(m, NULL, pos);
  201. }
  202. static void *
  203. s_next(struct seq_file *m, void *v, loff_t *pos)
  204. {
  205. struct ftrace_event_call *call = m->private;
  206. struct ftrace_event_call *next;
  207. (*pos)++;
  208. retry:
  209. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  210. return NULL;
  211. if (!call->enabled) {
  212. call++;
  213. goto retry;
  214. }
  215. next = call;
  216. m->private = ++next;
  217. return call;
  218. }
  219. static void *s_start(struct seq_file *m, loff_t *pos)
  220. {
  221. return s_next(m, NULL, pos);
  222. }
  223. static int t_show(struct seq_file *m, void *v)
  224. {
  225. struct ftrace_event_call *call = v;
  226. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  227. seq_printf(m, "%s:", call->system);
  228. seq_printf(m, "%s\n", call->name);
  229. return 0;
  230. }
  231. static void t_stop(struct seq_file *m, void *p)
  232. {
  233. }
  234. static int
  235. ftrace_event_seq_open(struct inode *inode, struct file *file)
  236. {
  237. int ret;
  238. const struct seq_operations *seq_ops;
  239. if ((file->f_mode & FMODE_WRITE) &&
  240. !(file->f_flags & O_APPEND))
  241. ftrace_clear_events();
  242. seq_ops = inode->i_private;
  243. ret = seq_open(file, seq_ops);
  244. if (!ret) {
  245. struct seq_file *m = file->private_data;
  246. m->private = __start_ftrace_events;
  247. }
  248. return ret;
  249. }
  250. static ssize_t
  251. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  252. loff_t *ppos)
  253. {
  254. struct ftrace_event_call *call = filp->private_data;
  255. char *buf;
  256. if (call->enabled)
  257. buf = "1\n";
  258. else
  259. buf = "0\n";
  260. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  261. }
  262. static ssize_t
  263. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  264. loff_t *ppos)
  265. {
  266. struct ftrace_event_call *call = filp->private_data;
  267. char buf[64];
  268. unsigned long val;
  269. int ret;
  270. if (cnt >= sizeof(buf))
  271. return -EINVAL;
  272. if (copy_from_user(&buf, ubuf, cnt))
  273. return -EFAULT;
  274. buf[cnt] = 0;
  275. ret = strict_strtoul(buf, 10, &val);
  276. if (ret < 0)
  277. return ret;
  278. ret = tracing_update_buffers();
  279. if (ret < 0)
  280. return ret;
  281. switch (val) {
  282. case 0:
  283. case 1:
  284. mutex_lock(&event_mutex);
  285. ftrace_event_enable_disable(call, val);
  286. mutex_unlock(&event_mutex);
  287. break;
  288. default:
  289. return -EINVAL;
  290. }
  291. *ppos += cnt;
  292. return cnt;
  293. }
  294. #undef FIELD
  295. #define FIELD(type, name) \
  296. #type, "common_" #name, offsetof(typeof(field), name), \
  297. sizeof(field.name)
  298. static int trace_write_header(struct trace_seq *s)
  299. {
  300. struct trace_entry field;
  301. /* struct trace_entry */
  302. return trace_seq_printf(s,
  303. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  304. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  305. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  306. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  307. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  308. "\n",
  309. FIELD(unsigned char, type),
  310. FIELD(unsigned char, flags),
  311. FIELD(unsigned char, preempt_count),
  312. FIELD(int, pid),
  313. FIELD(int, tgid));
  314. }
  315. static ssize_t
  316. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  317. loff_t *ppos)
  318. {
  319. struct ftrace_event_call *call = filp->private_data;
  320. struct trace_seq *s;
  321. char *buf;
  322. int r;
  323. if (*ppos)
  324. return 0;
  325. s = kmalloc(sizeof(*s), GFP_KERNEL);
  326. if (!s)
  327. return -ENOMEM;
  328. trace_seq_init(s);
  329. /* If any of the first writes fail, so will the show_format. */
  330. trace_seq_printf(s, "name: %s\n", call->name);
  331. trace_seq_printf(s, "ID: %d\n", call->id);
  332. trace_seq_printf(s, "format:\n");
  333. trace_write_header(s);
  334. r = call->show_format(s);
  335. if (!r) {
  336. /*
  337. * ug! The format output is bigger than a PAGE!!
  338. */
  339. buf = "FORMAT TOO BIG\n";
  340. r = simple_read_from_buffer(ubuf, cnt, ppos,
  341. buf, strlen(buf));
  342. goto out;
  343. }
  344. r = simple_read_from_buffer(ubuf, cnt, ppos,
  345. s->buffer, s->len);
  346. out:
  347. kfree(s);
  348. return r;
  349. }
  350. static ssize_t
  351. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  352. {
  353. struct ftrace_event_call *call = filp->private_data;
  354. struct trace_seq *s;
  355. int r;
  356. if (*ppos)
  357. return 0;
  358. s = kmalloc(sizeof(*s), GFP_KERNEL);
  359. if (!s)
  360. return -ENOMEM;
  361. trace_seq_init(s);
  362. trace_seq_printf(s, "%d\n", call->id);
  363. r = simple_read_from_buffer(ubuf, cnt, ppos,
  364. s->buffer, s->len);
  365. kfree(s);
  366. return r;
  367. }
  368. static ssize_t
  369. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  370. loff_t *ppos)
  371. {
  372. struct ftrace_event_call *call = filp->private_data;
  373. struct trace_seq *s;
  374. int r;
  375. if (*ppos)
  376. return 0;
  377. s = kmalloc(sizeof(*s), GFP_KERNEL);
  378. if (!s)
  379. return -ENOMEM;
  380. trace_seq_init(s);
  381. filter_print_preds(call->preds, s);
  382. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  383. kfree(s);
  384. return r;
  385. }
  386. static ssize_t
  387. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  388. loff_t *ppos)
  389. {
  390. struct ftrace_event_call *call = filp->private_data;
  391. char buf[64], *pbuf = buf;
  392. struct filter_pred *pred;
  393. int err;
  394. if (cnt >= sizeof(buf))
  395. return -EINVAL;
  396. if (copy_from_user(&buf, ubuf, cnt))
  397. return -EFAULT;
  398. buf[cnt] = '\0';
  399. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  400. if (!pred)
  401. return -ENOMEM;
  402. err = filter_parse(&pbuf, pred);
  403. if (err < 0) {
  404. filter_free_pred(pred);
  405. return err;
  406. }
  407. if (pred->clear) {
  408. filter_free_preds(call);
  409. filter_free_pred(pred);
  410. return cnt;
  411. }
  412. err = filter_add_pred(call, pred);
  413. if (err < 0) {
  414. filter_free_pred(pred);
  415. return err;
  416. }
  417. *ppos += cnt;
  418. return cnt;
  419. }
  420. static ssize_t
  421. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  422. loff_t *ppos)
  423. {
  424. struct event_subsystem *system = filp->private_data;
  425. struct trace_seq *s;
  426. int r;
  427. if (*ppos)
  428. return 0;
  429. s = kmalloc(sizeof(*s), GFP_KERNEL);
  430. if (!s)
  431. return -ENOMEM;
  432. trace_seq_init(s);
  433. filter_print_preds(system->preds, s);
  434. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  435. kfree(s);
  436. return r;
  437. }
  438. static ssize_t
  439. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  440. loff_t *ppos)
  441. {
  442. struct event_subsystem *system = filp->private_data;
  443. char buf[64], *pbuf = buf;
  444. struct filter_pred *pred;
  445. int err;
  446. if (cnt >= sizeof(buf))
  447. return -EINVAL;
  448. if (copy_from_user(&buf, ubuf, cnt))
  449. return -EFAULT;
  450. buf[cnt] = '\0';
  451. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  452. if (!pred)
  453. return -ENOMEM;
  454. err = filter_parse(&pbuf, pred);
  455. if (err < 0) {
  456. filter_free_pred(pred);
  457. return err;
  458. }
  459. if (pred->clear) {
  460. filter_free_subsystem_preds(system);
  461. filter_free_pred(pred);
  462. return cnt;
  463. }
  464. err = filter_add_subsystem_pred(system, pred);
  465. if (err < 0) {
  466. filter_free_subsystem_preds(system);
  467. filter_free_pred(pred);
  468. return err;
  469. }
  470. *ppos += cnt;
  471. return cnt;
  472. }
  473. static const struct seq_operations show_event_seq_ops = {
  474. .start = t_start,
  475. .next = t_next,
  476. .show = t_show,
  477. .stop = t_stop,
  478. };
  479. static const struct seq_operations show_set_event_seq_ops = {
  480. .start = s_start,
  481. .next = s_next,
  482. .show = t_show,
  483. .stop = t_stop,
  484. };
  485. static const struct file_operations ftrace_avail_fops = {
  486. .open = ftrace_event_seq_open,
  487. .read = seq_read,
  488. .llseek = seq_lseek,
  489. .release = seq_release,
  490. };
  491. static const struct file_operations ftrace_set_event_fops = {
  492. .open = ftrace_event_seq_open,
  493. .read = seq_read,
  494. .write = ftrace_event_write,
  495. .llseek = seq_lseek,
  496. .release = seq_release,
  497. };
  498. static const struct file_operations ftrace_enable_fops = {
  499. .open = tracing_open_generic,
  500. .read = event_enable_read,
  501. .write = event_enable_write,
  502. };
  503. static const struct file_operations ftrace_event_format_fops = {
  504. .open = tracing_open_generic,
  505. .read = event_format_read,
  506. };
  507. static const struct file_operations ftrace_event_id_fops = {
  508. .open = tracing_open_generic,
  509. .read = event_id_read,
  510. };
  511. static const struct file_operations ftrace_event_filter_fops = {
  512. .open = tracing_open_generic,
  513. .read = event_filter_read,
  514. .write = event_filter_write,
  515. };
  516. static const struct file_operations ftrace_subsystem_filter_fops = {
  517. .open = tracing_open_generic,
  518. .read = subsystem_filter_read,
  519. .write = subsystem_filter_write,
  520. };
  521. static struct dentry *event_trace_events_dir(void)
  522. {
  523. static struct dentry *d_tracer;
  524. static struct dentry *d_events;
  525. if (d_events)
  526. return d_events;
  527. d_tracer = tracing_init_dentry();
  528. if (!d_tracer)
  529. return NULL;
  530. d_events = debugfs_create_dir("events", d_tracer);
  531. if (!d_events)
  532. pr_warning("Could not create debugfs "
  533. "'events' directory\n");
  534. return d_events;
  535. }
  536. static LIST_HEAD(event_subsystems);
  537. static struct dentry *
  538. event_subsystem_dir(const char *name, struct dentry *d_events)
  539. {
  540. struct event_subsystem *system;
  541. /* First see if we did not already create this dir */
  542. list_for_each_entry(system, &event_subsystems, list) {
  543. if (strcmp(system->name, name) == 0)
  544. return system->entry;
  545. }
  546. /* need to create new entry */
  547. system = kmalloc(sizeof(*system), GFP_KERNEL);
  548. if (!system) {
  549. pr_warning("No memory to create event subsystem %s\n",
  550. name);
  551. return d_events;
  552. }
  553. system->entry = debugfs_create_dir(name, d_events);
  554. if (!system->entry) {
  555. pr_warning("Could not create event subsystem %s\n",
  556. name);
  557. kfree(system);
  558. return d_events;
  559. }
  560. system->name = name;
  561. list_add(&system->list, &event_subsystems);
  562. system->preds = NULL;
  563. return system->entry;
  564. }
  565. static int
  566. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
  567. {
  568. struct dentry *entry;
  569. int ret;
  570. /*
  571. * If the trace point header did not define TRACE_SYSTEM
  572. * then the system would be called "TRACE_SYSTEM".
  573. */
  574. if (strcmp(call->system, "TRACE_SYSTEM") != 0)
  575. d_events = event_subsystem_dir(call->system, d_events);
  576. if (call->raw_init) {
  577. ret = call->raw_init();
  578. if (ret < 0) {
  579. pr_warning("Could not initialize trace point"
  580. " events/%s\n", call->name);
  581. return ret;
  582. }
  583. }
  584. call->dir = debugfs_create_dir(call->name, d_events);
  585. if (!call->dir) {
  586. pr_warning("Could not create debugfs "
  587. "'%s' directory\n", call->name);
  588. return -1;
  589. }
  590. if (call->regfunc) {
  591. entry = debugfs_create_file("enable", 0644, call->dir, call,
  592. &ftrace_enable_fops);
  593. if (!entry)
  594. pr_warning("Could not create debugfs "
  595. "'%s/enable' entry\n", call->name);
  596. }
  597. if (call->id) {
  598. entry = debugfs_create_file("id", 0444, call->dir, call,
  599. &ftrace_event_id_fops);
  600. if (!entry)
  601. pr_warning("Could not create debugfs '%s/id' entry\n",
  602. call->name);
  603. }
  604. if (call->define_fields) {
  605. ret = call->define_fields();
  606. if (ret < 0) {
  607. pr_warning("Could not initialize trace point"
  608. " events/%s\n", call->name);
  609. return ret;
  610. }
  611. entry = debugfs_create_file("filter", 0644, call->dir, call,
  612. &ftrace_event_filter_fops);
  613. if (!entry)
  614. pr_warning("Could not create debugfs "
  615. "'%s/filter' entry\n", call->name);
  616. }
  617. /* A trace may not want to export its format */
  618. if (!call->show_format)
  619. return 0;
  620. entry = debugfs_create_file("format", 0444, call->dir, call,
  621. &ftrace_event_format_fops);
  622. if (!entry)
  623. pr_warning("Could not create debugfs "
  624. "'%s/format' entry\n", call->name);
  625. return 0;
  626. }
  627. static __init int event_trace_init(void)
  628. {
  629. struct ftrace_event_call *call = __start_ftrace_events;
  630. struct dentry *d_tracer;
  631. struct dentry *entry;
  632. struct dentry *d_events;
  633. d_tracer = tracing_init_dentry();
  634. if (!d_tracer)
  635. return 0;
  636. entry = debugfs_create_file("available_events", 0444, d_tracer,
  637. (void *)&show_event_seq_ops,
  638. &ftrace_avail_fops);
  639. if (!entry)
  640. pr_warning("Could not create debugfs "
  641. "'available_events' entry\n");
  642. entry = debugfs_create_file("set_event", 0644, d_tracer,
  643. (void *)&show_set_event_seq_ops,
  644. &ftrace_set_event_fops);
  645. if (!entry)
  646. pr_warning("Could not create debugfs "
  647. "'set_event' entry\n");
  648. d_events = event_trace_events_dir();
  649. if (!d_events)
  650. return 0;
  651. for_each_event(call) {
  652. /* The linker may leave blanks */
  653. if (!call->name)
  654. continue;
  655. event_create_dir(call, d_events);
  656. }
  657. return 0;
  658. }
  659. fs_initcall(event_trace_init);