trace_events.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/module.h>
  13. #include <linux/ctype.h>
  14. #include "trace_output.h"
  15. #define TRACE_SYSTEM "TRACE_SYSTEM"
  16. static DEFINE_MUTEX(event_mutex);
  17. int trace_define_field(struct ftrace_event_call *call, char *type,
  18. char *name, int offset, int size)
  19. {
  20. struct ftrace_event_field *field;
  21. field = kzalloc(sizeof(*field), GFP_KERNEL);
  22. if (!field)
  23. goto err;
  24. field->name = kstrdup(name, GFP_KERNEL);
  25. if (!field->name)
  26. goto err;
  27. field->type = kstrdup(type, GFP_KERNEL);
  28. if (!field->type)
  29. goto err;
  30. field->offset = offset;
  31. field->size = size;
  32. list_add(&field->link, &call->fields);
  33. return 0;
  34. err:
  35. if (field) {
  36. kfree(field->name);
  37. kfree(field->type);
  38. }
  39. kfree(field);
  40. return -ENOMEM;
  41. }
  42. static void ftrace_clear_events(void)
  43. {
  44. struct ftrace_event_call *call = (void *)__start_ftrace_events;
  45. while ((unsigned long)call < (unsigned long)__stop_ftrace_events) {
  46. if (call->enabled) {
  47. call->enabled = 0;
  48. call->unregfunc();
  49. }
  50. call++;
  51. }
  52. }
  53. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  54. int enable)
  55. {
  56. switch (enable) {
  57. case 0:
  58. if (call->enabled) {
  59. call->enabled = 0;
  60. call->unregfunc();
  61. }
  62. break;
  63. case 1:
  64. if (!call->enabled) {
  65. call->enabled = 1;
  66. call->regfunc();
  67. }
  68. break;
  69. }
  70. }
  71. static int ftrace_set_clr_event(char *buf, int set)
  72. {
  73. struct ftrace_event_call *call = __start_ftrace_events;
  74. char *event = NULL, *sub = NULL, *match;
  75. int ret = -EINVAL;
  76. /*
  77. * The buf format can be <subsystem>:<event-name>
  78. * *:<event-name> means any event by that name.
  79. * :<event-name> is the same.
  80. *
  81. * <subsystem>:* means all events in that subsystem
  82. * <subsystem>: means the same.
  83. *
  84. * <name> (no ':') means all events in a subsystem with
  85. * the name <name> or any event that matches <name>
  86. */
  87. match = strsep(&buf, ":");
  88. if (buf) {
  89. sub = match;
  90. event = buf;
  91. match = NULL;
  92. if (!strlen(sub) || strcmp(sub, "*") == 0)
  93. sub = NULL;
  94. if (!strlen(event) || strcmp(event, "*") == 0)
  95. event = NULL;
  96. }
  97. mutex_lock(&event_mutex);
  98. for_each_event(call) {
  99. if (!call->name || !call->regfunc)
  100. continue;
  101. if (match &&
  102. strcmp(match, call->name) != 0 &&
  103. strcmp(match, call->system) != 0)
  104. continue;
  105. if (sub && strcmp(sub, call->system) != 0)
  106. continue;
  107. if (event && strcmp(event, call->name) != 0)
  108. continue;
  109. ftrace_event_enable_disable(call, set);
  110. ret = 0;
  111. }
  112. mutex_unlock(&event_mutex);
  113. return ret;
  114. }
  115. /* 128 should be much more than enough */
  116. #define EVENT_BUF_SIZE 127
  117. static ssize_t
  118. ftrace_event_write(struct file *file, const char __user *ubuf,
  119. size_t cnt, loff_t *ppos)
  120. {
  121. size_t read = 0;
  122. int i, set = 1;
  123. ssize_t ret;
  124. char *buf;
  125. char ch;
  126. if (!cnt || cnt < 0)
  127. return 0;
  128. ret = tracing_update_buffers();
  129. if (ret < 0)
  130. return ret;
  131. ret = get_user(ch, ubuf++);
  132. if (ret)
  133. return ret;
  134. read++;
  135. cnt--;
  136. /* skip white space */
  137. while (cnt && isspace(ch)) {
  138. ret = get_user(ch, ubuf++);
  139. if (ret)
  140. return ret;
  141. read++;
  142. cnt--;
  143. }
  144. /* Only white space found? */
  145. if (isspace(ch)) {
  146. file->f_pos += read;
  147. ret = read;
  148. return ret;
  149. }
  150. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  151. if (!buf)
  152. return -ENOMEM;
  153. if (cnt > EVENT_BUF_SIZE)
  154. cnt = EVENT_BUF_SIZE;
  155. i = 0;
  156. while (cnt && !isspace(ch)) {
  157. if (!i && ch == '!')
  158. set = 0;
  159. else
  160. buf[i++] = ch;
  161. ret = get_user(ch, ubuf++);
  162. if (ret)
  163. goto out_free;
  164. read++;
  165. cnt--;
  166. }
  167. buf[i] = 0;
  168. file->f_pos += read;
  169. ret = ftrace_set_clr_event(buf, set);
  170. if (ret)
  171. goto out_free;
  172. ret = read;
  173. out_free:
  174. kfree(buf);
  175. return ret;
  176. }
  177. static void *
  178. t_next(struct seq_file *m, void *v, loff_t *pos)
  179. {
  180. struct ftrace_event_call *call = m->private;
  181. struct ftrace_event_call *next = call;
  182. (*pos)++;
  183. for (;;) {
  184. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  185. return NULL;
  186. /*
  187. * The ftrace subsystem is for showing formats only.
  188. * They can not be enabled or disabled via the event files.
  189. */
  190. if (call->regfunc)
  191. break;
  192. call++;
  193. next = call;
  194. }
  195. m->private = ++next;
  196. return call;
  197. }
  198. static void *t_start(struct seq_file *m, loff_t *pos)
  199. {
  200. return t_next(m, NULL, pos);
  201. }
  202. static void *
  203. s_next(struct seq_file *m, void *v, loff_t *pos)
  204. {
  205. struct ftrace_event_call *call = m->private;
  206. struct ftrace_event_call *next;
  207. (*pos)++;
  208. retry:
  209. if ((unsigned long)call >= (unsigned long)__stop_ftrace_events)
  210. return NULL;
  211. if (!call->enabled) {
  212. call++;
  213. goto retry;
  214. }
  215. next = call;
  216. m->private = ++next;
  217. return call;
  218. }
  219. static void *s_start(struct seq_file *m, loff_t *pos)
  220. {
  221. return s_next(m, NULL, pos);
  222. }
  223. static int t_show(struct seq_file *m, void *v)
  224. {
  225. struct ftrace_event_call *call = v;
  226. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  227. seq_printf(m, "%s:", call->system);
  228. seq_printf(m, "%s\n", call->name);
  229. return 0;
  230. }
  231. static void t_stop(struct seq_file *m, void *p)
  232. {
  233. }
  234. static int
  235. ftrace_event_seq_open(struct inode *inode, struct file *file)
  236. {
  237. int ret;
  238. const struct seq_operations *seq_ops;
  239. if ((file->f_mode & FMODE_WRITE) &&
  240. !(file->f_flags & O_APPEND))
  241. ftrace_clear_events();
  242. seq_ops = inode->i_private;
  243. ret = seq_open(file, seq_ops);
  244. if (!ret) {
  245. struct seq_file *m = file->private_data;
  246. m->private = __start_ftrace_events;
  247. }
  248. return ret;
  249. }
  250. static ssize_t
  251. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  252. loff_t *ppos)
  253. {
  254. struct ftrace_event_call *call = filp->private_data;
  255. char *buf;
  256. if (call->enabled)
  257. buf = "1\n";
  258. else
  259. buf = "0\n";
  260. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  261. }
  262. static ssize_t
  263. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  264. loff_t *ppos)
  265. {
  266. struct ftrace_event_call *call = filp->private_data;
  267. char buf[64];
  268. unsigned long val;
  269. int ret;
  270. if (cnt >= sizeof(buf))
  271. return -EINVAL;
  272. if (copy_from_user(&buf, ubuf, cnt))
  273. return -EFAULT;
  274. buf[cnt] = 0;
  275. ret = strict_strtoul(buf, 10, &val);
  276. if (ret < 0)
  277. return ret;
  278. ret = tracing_update_buffers();
  279. if (ret < 0)
  280. return ret;
  281. switch (val) {
  282. case 0:
  283. case 1:
  284. mutex_lock(&event_mutex);
  285. ftrace_event_enable_disable(call, val);
  286. mutex_unlock(&event_mutex);
  287. break;
  288. default:
  289. return -EINVAL;
  290. }
  291. *ppos += cnt;
  292. return cnt;
  293. }
  294. #undef FIELD
  295. #define FIELD(type, name) \
  296. #type, "common_" #name, offsetof(typeof(field), name), \
  297. sizeof(field.name)
  298. static int trace_write_header(struct trace_seq *s)
  299. {
  300. struct trace_entry field;
  301. /* struct trace_entry */
  302. return trace_seq_printf(s,
  303. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  304. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  305. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  306. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  307. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  308. "\n",
  309. FIELD(unsigned char, type),
  310. FIELD(unsigned char, flags),
  311. FIELD(unsigned char, preempt_count),
  312. FIELD(int, pid),
  313. FIELD(int, tgid));
  314. }
  315. static ssize_t
  316. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  317. loff_t *ppos)
  318. {
  319. struct ftrace_event_call *call = filp->private_data;
  320. struct trace_seq *s;
  321. char *buf;
  322. int r;
  323. if (*ppos)
  324. return 0;
  325. s = kmalloc(sizeof(*s), GFP_KERNEL);
  326. if (!s)
  327. return -ENOMEM;
  328. trace_seq_init(s);
  329. /* If any of the first writes fail, so will the show_format. */
  330. trace_seq_printf(s, "name: %s\n", call->name);
  331. trace_seq_printf(s, "ID: %d\n", call->id);
  332. trace_seq_printf(s, "format:\n");
  333. trace_write_header(s);
  334. r = call->show_format(s);
  335. if (!r) {
  336. /*
  337. * ug! The format output is bigger than a PAGE!!
  338. */
  339. buf = "FORMAT TOO BIG\n";
  340. r = simple_read_from_buffer(ubuf, cnt, ppos,
  341. buf, strlen(buf));
  342. goto out;
  343. }
  344. r = simple_read_from_buffer(ubuf, cnt, ppos,
  345. s->buffer, s->len);
  346. out:
  347. kfree(s);
  348. return r;
  349. }
  350. static ssize_t
  351. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  352. {
  353. struct ftrace_event_call *call = filp->private_data;
  354. struct trace_seq *s;
  355. int r;
  356. if (*ppos)
  357. return 0;
  358. s = kmalloc(sizeof(*s), GFP_KERNEL);
  359. if (!s)
  360. return -ENOMEM;
  361. trace_seq_init(s);
  362. trace_seq_printf(s, "%d\n", call->id);
  363. r = simple_read_from_buffer(ubuf, cnt, ppos,
  364. s->buffer, s->len);
  365. kfree(s);
  366. return r;
  367. }
  368. static ssize_t
  369. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  370. loff_t *ppos)
  371. {
  372. struct ftrace_event_call *call = filp->private_data;
  373. struct trace_seq *s;
  374. int r;
  375. if (*ppos)
  376. return 0;
  377. s = kmalloc(sizeof(*s), GFP_KERNEL);
  378. if (!s)
  379. return -ENOMEM;
  380. trace_seq_init(s);
  381. filter_print_preds(call->preds, s);
  382. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  383. kfree(s);
  384. return r;
  385. }
  386. static ssize_t
  387. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  388. loff_t *ppos)
  389. {
  390. struct ftrace_event_call *call = filp->private_data;
  391. char buf[64], *pbuf = buf;
  392. struct filter_pred *pred;
  393. int err;
  394. if (cnt >= sizeof(buf))
  395. return -EINVAL;
  396. if (copy_from_user(&buf, ubuf, cnt))
  397. return -EFAULT;
  398. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  399. if (!pred)
  400. return -ENOMEM;
  401. err = filter_parse(&pbuf, pred);
  402. if (err < 0) {
  403. filter_free_pred(pred);
  404. return err;
  405. }
  406. if (pred->clear) {
  407. filter_free_preds(call);
  408. filter_free_pred(pred);
  409. return cnt;
  410. }
  411. if (filter_add_pred(call, pred)) {
  412. filter_free_pred(pred);
  413. return -EINVAL;
  414. }
  415. *ppos += cnt;
  416. return cnt;
  417. }
  418. static ssize_t
  419. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  420. loff_t *ppos)
  421. {
  422. struct event_subsystem *system = filp->private_data;
  423. struct trace_seq *s;
  424. int r;
  425. if (*ppos)
  426. return 0;
  427. s = kmalloc(sizeof(*s), GFP_KERNEL);
  428. if (!s)
  429. return -ENOMEM;
  430. trace_seq_init(s);
  431. filter_print_preds(system->preds, s);
  432. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  433. kfree(s);
  434. return r;
  435. }
  436. static ssize_t
  437. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  438. loff_t *ppos)
  439. {
  440. struct event_subsystem *system = filp->private_data;
  441. char buf[64], *pbuf = buf;
  442. struct filter_pred *pred;
  443. int err;
  444. if (cnt >= sizeof(buf))
  445. return -EINVAL;
  446. if (copy_from_user(&buf, ubuf, cnt))
  447. return -EFAULT;
  448. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  449. if (!pred)
  450. return -ENOMEM;
  451. err = filter_parse(&pbuf, pred);
  452. if (err < 0) {
  453. filter_free_pred(pred);
  454. return err;
  455. }
  456. if (pred->clear) {
  457. filter_free_subsystem_preds(system);
  458. filter_free_pred(pred);
  459. return cnt;
  460. }
  461. if (filter_add_subsystem_pred(system, pred)) {
  462. filter_free_subsystem_preds(system);
  463. filter_free_pred(pred);
  464. return -EINVAL;
  465. }
  466. *ppos += cnt;
  467. return cnt;
  468. }
  469. static const struct seq_operations show_event_seq_ops = {
  470. .start = t_start,
  471. .next = t_next,
  472. .show = t_show,
  473. .stop = t_stop,
  474. };
  475. static const struct seq_operations show_set_event_seq_ops = {
  476. .start = s_start,
  477. .next = s_next,
  478. .show = t_show,
  479. .stop = t_stop,
  480. };
  481. static const struct file_operations ftrace_avail_fops = {
  482. .open = ftrace_event_seq_open,
  483. .read = seq_read,
  484. .llseek = seq_lseek,
  485. .release = seq_release,
  486. };
  487. static const struct file_operations ftrace_set_event_fops = {
  488. .open = ftrace_event_seq_open,
  489. .read = seq_read,
  490. .write = ftrace_event_write,
  491. .llseek = seq_lseek,
  492. .release = seq_release,
  493. };
  494. static const struct file_operations ftrace_enable_fops = {
  495. .open = tracing_open_generic,
  496. .read = event_enable_read,
  497. .write = event_enable_write,
  498. };
  499. static const struct file_operations ftrace_event_format_fops = {
  500. .open = tracing_open_generic,
  501. .read = event_format_read,
  502. };
  503. static const struct file_operations ftrace_event_id_fops = {
  504. .open = tracing_open_generic,
  505. .read = event_id_read,
  506. };
  507. static const struct file_operations ftrace_event_filter_fops = {
  508. .open = tracing_open_generic,
  509. .read = event_filter_read,
  510. .write = event_filter_write,
  511. };
  512. static const struct file_operations ftrace_subsystem_filter_fops = {
  513. .open = tracing_open_generic,
  514. .read = subsystem_filter_read,
  515. .write = subsystem_filter_write,
  516. };
  517. static struct dentry *event_trace_events_dir(void)
  518. {
  519. static struct dentry *d_tracer;
  520. static struct dentry *d_events;
  521. if (d_events)
  522. return d_events;
  523. d_tracer = tracing_init_dentry();
  524. if (!d_tracer)
  525. return NULL;
  526. d_events = debugfs_create_dir("events", d_tracer);
  527. if (!d_events)
  528. pr_warning("Could not create debugfs "
  529. "'events' directory\n");
  530. return d_events;
  531. }
  532. static LIST_HEAD(event_subsystems);
  533. static struct dentry *
  534. event_subsystem_dir(const char *name, struct dentry *d_events)
  535. {
  536. struct event_subsystem *system;
  537. /* First see if we did not already create this dir */
  538. list_for_each_entry(system, &event_subsystems, list) {
  539. if (strcmp(system->name, name) == 0)
  540. return system->entry;
  541. }
  542. /* need to create new entry */
  543. system = kmalloc(sizeof(*system), GFP_KERNEL);
  544. if (!system) {
  545. pr_warning("No memory to create event subsystem %s\n",
  546. name);
  547. return d_events;
  548. }
  549. system->entry = debugfs_create_dir(name, d_events);
  550. if (!system->entry) {
  551. pr_warning("Could not create event subsystem %s\n",
  552. name);
  553. kfree(system);
  554. return d_events;
  555. }
  556. system->name = name;
  557. list_add(&system->list, &event_subsystems);
  558. system->preds = NULL;
  559. return system->entry;
  560. }
  561. static int
  562. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
  563. {
  564. struct dentry *entry;
  565. int ret;
  566. /*
  567. * If the trace point header did not define TRACE_SYSTEM
  568. * then the system would be called "TRACE_SYSTEM".
  569. */
  570. if (strcmp(call->system, "TRACE_SYSTEM") != 0)
  571. d_events = event_subsystem_dir(call->system, d_events);
  572. if (call->raw_init) {
  573. ret = call->raw_init();
  574. if (ret < 0) {
  575. pr_warning("Could not initialize trace point"
  576. " events/%s\n", call->name);
  577. return ret;
  578. }
  579. }
  580. call->dir = debugfs_create_dir(call->name, d_events);
  581. if (!call->dir) {
  582. pr_warning("Could not create debugfs "
  583. "'%s' directory\n", call->name);
  584. return -1;
  585. }
  586. if (call->regfunc) {
  587. entry = debugfs_create_file("enable", 0644, call->dir, call,
  588. &ftrace_enable_fops);
  589. if (!entry)
  590. pr_warning("Could not create debugfs "
  591. "'%s/enable' entry\n", call->name);
  592. }
  593. if (call->id) {
  594. entry = debugfs_create_file("id", 0444, call->dir, call,
  595. &ftrace_event_id_fops);
  596. if (!entry)
  597. pr_warning("Could not create debugfs '%s/id' entry\n",
  598. call->name);
  599. }
  600. if (call->define_fields) {
  601. ret = call->define_fields();
  602. if (ret < 0) {
  603. pr_warning("Could not initialize trace point"
  604. " events/%s\n", call->name);
  605. return ret;
  606. }
  607. entry = debugfs_create_file("filter", 0644, call->dir, call,
  608. &ftrace_event_filter_fops);
  609. if (!entry)
  610. pr_warning("Could not create debugfs "
  611. "'%s/filter' entry\n", call->name);
  612. }
  613. /* A trace may not want to export its format */
  614. if (!call->show_format)
  615. return 0;
  616. entry = debugfs_create_file("format", 0444, call->dir, call,
  617. &ftrace_event_format_fops);
  618. if (!entry)
  619. pr_warning("Could not create debugfs "
  620. "'%s/format' entry\n", call->name);
  621. return 0;
  622. }
  623. static __init int event_trace_init(void)
  624. {
  625. struct ftrace_event_call *call = __start_ftrace_events;
  626. struct dentry *d_tracer;
  627. struct dentry *entry;
  628. struct dentry *d_events;
  629. d_tracer = tracing_init_dentry();
  630. if (!d_tracer)
  631. return 0;
  632. entry = debugfs_create_file("available_events", 0444, d_tracer,
  633. (void *)&show_event_seq_ops,
  634. &ftrace_avail_fops);
  635. if (!entry)
  636. pr_warning("Could not create debugfs "
  637. "'available_events' entry\n");
  638. entry = debugfs_create_file("set_event", 0644, d_tracer,
  639. (void *)&show_set_event_seq_ops,
  640. &ftrace_set_event_fops);
  641. if (!entry)
  642. pr_warning("Could not create debugfs "
  643. "'set_event' entry\n");
  644. d_events = event_trace_events_dir();
  645. if (!d_events)
  646. return 0;
  647. for_each_event(call) {
  648. /* The linker may leave blanks */
  649. if (!call->name)
  650. continue;
  651. event_create_dir(call, d_events);
  652. }
  653. return 0;
  654. }
  655. fs_initcall(event_trace_init);