trace_events.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/debugfs.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/module.h>
  13. #include <linux/ctype.h>
  14. #include "trace_output.h"
  15. #define TRACE_SYSTEM "TRACE_SYSTEM"
  16. static DEFINE_MUTEX(event_mutex);
  17. LIST_HEAD(ftrace_events);
  18. int trace_define_field(struct ftrace_event_call *call, char *type,
  19. char *name, int offset, int size)
  20. {
  21. struct ftrace_event_field *field;
  22. field = kzalloc(sizeof(*field), GFP_KERNEL);
  23. if (!field)
  24. goto err;
  25. field->name = kstrdup(name, GFP_KERNEL);
  26. if (!field->name)
  27. goto err;
  28. field->type = kstrdup(type, GFP_KERNEL);
  29. if (!field->type)
  30. goto err;
  31. field->offset = offset;
  32. field->size = size;
  33. list_add(&field->link, &call->fields);
  34. return 0;
  35. err:
  36. if (field) {
  37. kfree(field->name);
  38. kfree(field->type);
  39. }
  40. kfree(field);
  41. return -ENOMEM;
  42. }
  43. EXPORT_SYMBOL_GPL(trace_define_field);
  44. static void ftrace_clear_events(void)
  45. {
  46. struct ftrace_event_call *call;
  47. list_for_each_entry(call, &ftrace_events, list) {
  48. if (call->enabled) {
  49. call->enabled = 0;
  50. call->unregfunc();
  51. }
  52. }
  53. }
  54. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  55. int enable)
  56. {
  57. switch (enable) {
  58. case 0:
  59. if (call->enabled) {
  60. call->enabled = 0;
  61. call->unregfunc();
  62. }
  63. break;
  64. case 1:
  65. if (!call->enabled) {
  66. call->enabled = 1;
  67. call->regfunc();
  68. }
  69. break;
  70. }
  71. }
  72. static int ftrace_set_clr_event(char *buf, int set)
  73. {
  74. struct ftrace_event_call *call;
  75. char *event = NULL, *sub = NULL, *match;
  76. int ret = -EINVAL;
  77. /*
  78. * The buf format can be <subsystem>:<event-name>
  79. * *:<event-name> means any event by that name.
  80. * :<event-name> is the same.
  81. *
  82. * <subsystem>:* means all events in that subsystem
  83. * <subsystem>: means the same.
  84. *
  85. * <name> (no ':') means all events in a subsystem with
  86. * the name <name> or any event that matches <name>
  87. */
  88. match = strsep(&buf, ":");
  89. if (buf) {
  90. sub = match;
  91. event = buf;
  92. match = NULL;
  93. if (!strlen(sub) || strcmp(sub, "*") == 0)
  94. sub = NULL;
  95. if (!strlen(event) || strcmp(event, "*") == 0)
  96. event = NULL;
  97. }
  98. mutex_lock(&event_mutex);
  99. list_for_each_entry(call, &ftrace_events, list) {
  100. if (!call->name || !call->regfunc)
  101. continue;
  102. if (match &&
  103. strcmp(match, call->name) != 0 &&
  104. strcmp(match, call->system) != 0)
  105. continue;
  106. if (sub && strcmp(sub, call->system) != 0)
  107. continue;
  108. if (event && strcmp(event, call->name) != 0)
  109. continue;
  110. ftrace_event_enable_disable(call, set);
  111. ret = 0;
  112. }
  113. mutex_unlock(&event_mutex);
  114. return ret;
  115. }
  116. /* 128 should be much more than enough */
  117. #define EVENT_BUF_SIZE 127
  118. static ssize_t
  119. ftrace_event_write(struct file *file, const char __user *ubuf,
  120. size_t cnt, loff_t *ppos)
  121. {
  122. size_t read = 0;
  123. int i, set = 1;
  124. ssize_t ret;
  125. char *buf;
  126. char ch;
  127. if (!cnt || cnt < 0)
  128. return 0;
  129. ret = tracing_update_buffers();
  130. if (ret < 0)
  131. return ret;
  132. ret = get_user(ch, ubuf++);
  133. if (ret)
  134. return ret;
  135. read++;
  136. cnt--;
  137. /* skip white space */
  138. while (cnt && isspace(ch)) {
  139. ret = get_user(ch, ubuf++);
  140. if (ret)
  141. return ret;
  142. read++;
  143. cnt--;
  144. }
  145. /* Only white space found? */
  146. if (isspace(ch)) {
  147. file->f_pos += read;
  148. ret = read;
  149. return ret;
  150. }
  151. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  152. if (!buf)
  153. return -ENOMEM;
  154. if (cnt > EVENT_BUF_SIZE)
  155. cnt = EVENT_BUF_SIZE;
  156. i = 0;
  157. while (cnt && !isspace(ch)) {
  158. if (!i && ch == '!')
  159. set = 0;
  160. else
  161. buf[i++] = ch;
  162. ret = get_user(ch, ubuf++);
  163. if (ret)
  164. goto out_free;
  165. read++;
  166. cnt--;
  167. }
  168. buf[i] = 0;
  169. file->f_pos += read;
  170. ret = ftrace_set_clr_event(buf, set);
  171. if (ret)
  172. goto out_free;
  173. ret = read;
  174. out_free:
  175. kfree(buf);
  176. return ret;
  177. }
  178. static void *
  179. t_next(struct seq_file *m, void *v, loff_t *pos)
  180. {
  181. struct list_head *list = m->private;
  182. struct ftrace_event_call *call;
  183. (*pos)++;
  184. for (;;) {
  185. if (list == &ftrace_events)
  186. return NULL;
  187. call = list_entry(list, struct ftrace_event_call, list);
  188. /*
  189. * The ftrace subsystem is for showing formats only.
  190. * They can not be enabled or disabled via the event files.
  191. */
  192. if (call->regfunc)
  193. break;
  194. list = list->next;
  195. }
  196. m->private = list->next;
  197. return call;
  198. }
  199. static void *t_start(struct seq_file *m, loff_t *pos)
  200. {
  201. return t_next(m, NULL, pos);
  202. }
  203. static void *
  204. s_next(struct seq_file *m, void *v, loff_t *pos)
  205. {
  206. struct list_head *list = m->private;
  207. struct ftrace_event_call *call;
  208. (*pos)++;
  209. retry:
  210. if (list == &ftrace_events)
  211. return NULL;
  212. call = list_entry(list, struct ftrace_event_call, list);
  213. if (!call->enabled) {
  214. list = list->next;
  215. goto retry;
  216. }
  217. m->private = list->next;
  218. return call;
  219. }
  220. static void *s_start(struct seq_file *m, loff_t *pos)
  221. {
  222. return s_next(m, NULL, pos);
  223. }
  224. static int t_show(struct seq_file *m, void *v)
  225. {
  226. struct ftrace_event_call *call = v;
  227. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  228. seq_printf(m, "%s:", call->system);
  229. seq_printf(m, "%s\n", call->name);
  230. return 0;
  231. }
  232. static void t_stop(struct seq_file *m, void *p)
  233. {
  234. }
  235. static int
  236. ftrace_event_seq_open(struct inode *inode, struct file *file)
  237. {
  238. int ret;
  239. const struct seq_operations *seq_ops;
  240. if ((file->f_mode & FMODE_WRITE) &&
  241. !(file->f_flags & O_APPEND))
  242. ftrace_clear_events();
  243. seq_ops = inode->i_private;
  244. ret = seq_open(file, seq_ops);
  245. if (!ret) {
  246. struct seq_file *m = file->private_data;
  247. m->private = ftrace_events.next;
  248. }
  249. return ret;
  250. }
  251. static ssize_t
  252. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  253. loff_t *ppos)
  254. {
  255. struct ftrace_event_call *call = filp->private_data;
  256. char *buf;
  257. if (call->enabled)
  258. buf = "1\n";
  259. else
  260. buf = "0\n";
  261. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  262. }
  263. static ssize_t
  264. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  265. loff_t *ppos)
  266. {
  267. struct ftrace_event_call *call = filp->private_data;
  268. char buf[64];
  269. unsigned long val;
  270. int ret;
  271. if (cnt >= sizeof(buf))
  272. return -EINVAL;
  273. if (copy_from_user(&buf, ubuf, cnt))
  274. return -EFAULT;
  275. buf[cnt] = 0;
  276. ret = strict_strtoul(buf, 10, &val);
  277. if (ret < 0)
  278. return ret;
  279. ret = tracing_update_buffers();
  280. if (ret < 0)
  281. return ret;
  282. switch (val) {
  283. case 0:
  284. case 1:
  285. mutex_lock(&event_mutex);
  286. ftrace_event_enable_disable(call, val);
  287. mutex_unlock(&event_mutex);
  288. break;
  289. default:
  290. return -EINVAL;
  291. }
  292. *ppos += cnt;
  293. return cnt;
  294. }
  295. #undef FIELD
  296. #define FIELD(type, name) \
  297. #type, "common_" #name, offsetof(typeof(field), name), \
  298. sizeof(field.name)
  299. static int trace_write_header(struct trace_seq *s)
  300. {
  301. struct trace_entry field;
  302. /* struct trace_entry */
  303. return trace_seq_printf(s,
  304. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  305. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  306. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  307. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  308. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  309. "\n",
  310. FIELD(unsigned char, type),
  311. FIELD(unsigned char, flags),
  312. FIELD(unsigned char, preempt_count),
  313. FIELD(int, pid),
  314. FIELD(int, tgid));
  315. }
  316. static ssize_t
  317. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  318. loff_t *ppos)
  319. {
  320. struct ftrace_event_call *call = filp->private_data;
  321. struct trace_seq *s;
  322. char *buf;
  323. int r;
  324. if (*ppos)
  325. return 0;
  326. s = kmalloc(sizeof(*s), GFP_KERNEL);
  327. if (!s)
  328. return -ENOMEM;
  329. trace_seq_init(s);
  330. /* If any of the first writes fail, so will the show_format. */
  331. trace_seq_printf(s, "name: %s\n", call->name);
  332. trace_seq_printf(s, "ID: %d\n", call->id);
  333. trace_seq_printf(s, "format:\n");
  334. trace_write_header(s);
  335. r = call->show_format(s);
  336. if (!r) {
  337. /*
  338. * ug! The format output is bigger than a PAGE!!
  339. */
  340. buf = "FORMAT TOO BIG\n";
  341. r = simple_read_from_buffer(ubuf, cnt, ppos,
  342. buf, strlen(buf));
  343. goto out;
  344. }
  345. r = simple_read_from_buffer(ubuf, cnt, ppos,
  346. s->buffer, s->len);
  347. out:
  348. kfree(s);
  349. return r;
  350. }
  351. static ssize_t
  352. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  353. {
  354. struct ftrace_event_call *call = filp->private_data;
  355. struct trace_seq *s;
  356. int r;
  357. if (*ppos)
  358. return 0;
  359. s = kmalloc(sizeof(*s), GFP_KERNEL);
  360. if (!s)
  361. return -ENOMEM;
  362. trace_seq_init(s);
  363. trace_seq_printf(s, "%d\n", call->id);
  364. r = simple_read_from_buffer(ubuf, cnt, ppos,
  365. s->buffer, s->len);
  366. kfree(s);
  367. return r;
  368. }
  369. static ssize_t
  370. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  371. loff_t *ppos)
  372. {
  373. struct ftrace_event_call *call = filp->private_data;
  374. struct trace_seq *s;
  375. int r;
  376. if (*ppos)
  377. return 0;
  378. s = kmalloc(sizeof(*s), GFP_KERNEL);
  379. if (!s)
  380. return -ENOMEM;
  381. trace_seq_init(s);
  382. filter_print_preds(call->preds, call->n_preds, s);
  383. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  384. kfree(s);
  385. return r;
  386. }
  387. static ssize_t
  388. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  389. loff_t *ppos)
  390. {
  391. struct ftrace_event_call *call = filp->private_data;
  392. char buf[64], *pbuf = buf;
  393. struct filter_pred *pred;
  394. int err;
  395. if (cnt >= sizeof(buf))
  396. return -EINVAL;
  397. if (copy_from_user(&buf, ubuf, cnt))
  398. return -EFAULT;
  399. buf[cnt] = '\0';
  400. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  401. if (!pred)
  402. return -ENOMEM;
  403. err = filter_parse(&pbuf, pred);
  404. if (err < 0) {
  405. filter_free_pred(pred);
  406. return err;
  407. }
  408. if (pred->clear) {
  409. filter_disable_preds(call);
  410. filter_free_pred(pred);
  411. return cnt;
  412. }
  413. err = filter_add_pred(call, pred);
  414. if (err < 0) {
  415. filter_free_pred(pred);
  416. return err;
  417. }
  418. filter_free_pred(pred);
  419. *ppos += cnt;
  420. return cnt;
  421. }
  422. static ssize_t
  423. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  424. loff_t *ppos)
  425. {
  426. struct event_subsystem *system = filp->private_data;
  427. struct trace_seq *s;
  428. int r;
  429. if (*ppos)
  430. return 0;
  431. s = kmalloc(sizeof(*s), GFP_KERNEL);
  432. if (!s)
  433. return -ENOMEM;
  434. trace_seq_init(s);
  435. filter_print_preds(system->preds, system->n_preds, s);
  436. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  437. kfree(s);
  438. return r;
  439. }
  440. static ssize_t
  441. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  442. loff_t *ppos)
  443. {
  444. struct event_subsystem *system = filp->private_data;
  445. char buf[64], *pbuf = buf;
  446. struct filter_pred *pred;
  447. int err;
  448. if (cnt >= sizeof(buf))
  449. return -EINVAL;
  450. if (copy_from_user(&buf, ubuf, cnt))
  451. return -EFAULT;
  452. buf[cnt] = '\0';
  453. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  454. if (!pred)
  455. return -ENOMEM;
  456. err = filter_parse(&pbuf, pred);
  457. if (err < 0) {
  458. filter_free_pred(pred);
  459. return err;
  460. }
  461. if (pred->clear) {
  462. filter_free_subsystem_preds(system);
  463. filter_free_pred(pred);
  464. return cnt;
  465. }
  466. err = filter_add_subsystem_pred(system, pred);
  467. if (err < 0) {
  468. filter_free_subsystem_preds(system);
  469. filter_free_pred(pred);
  470. return err;
  471. }
  472. *ppos += cnt;
  473. return cnt;
  474. }
  475. static const struct seq_operations show_event_seq_ops = {
  476. .start = t_start,
  477. .next = t_next,
  478. .show = t_show,
  479. .stop = t_stop,
  480. };
  481. static const struct seq_operations show_set_event_seq_ops = {
  482. .start = s_start,
  483. .next = s_next,
  484. .show = t_show,
  485. .stop = t_stop,
  486. };
  487. static const struct file_operations ftrace_avail_fops = {
  488. .open = ftrace_event_seq_open,
  489. .read = seq_read,
  490. .llseek = seq_lseek,
  491. .release = seq_release,
  492. };
  493. static const struct file_operations ftrace_set_event_fops = {
  494. .open = ftrace_event_seq_open,
  495. .read = seq_read,
  496. .write = ftrace_event_write,
  497. .llseek = seq_lseek,
  498. .release = seq_release,
  499. };
  500. static const struct file_operations ftrace_enable_fops = {
  501. .open = tracing_open_generic,
  502. .read = event_enable_read,
  503. .write = event_enable_write,
  504. };
  505. static const struct file_operations ftrace_event_format_fops = {
  506. .open = tracing_open_generic,
  507. .read = event_format_read,
  508. };
  509. static const struct file_operations ftrace_event_id_fops = {
  510. .open = tracing_open_generic,
  511. .read = event_id_read,
  512. };
  513. static const struct file_operations ftrace_event_filter_fops = {
  514. .open = tracing_open_generic,
  515. .read = event_filter_read,
  516. .write = event_filter_write,
  517. };
  518. static const struct file_operations ftrace_subsystem_filter_fops = {
  519. .open = tracing_open_generic,
  520. .read = subsystem_filter_read,
  521. .write = subsystem_filter_write,
  522. };
  523. static struct dentry *event_trace_events_dir(void)
  524. {
  525. static struct dentry *d_tracer;
  526. static struct dentry *d_events;
  527. if (d_events)
  528. return d_events;
  529. d_tracer = tracing_init_dentry();
  530. if (!d_tracer)
  531. return NULL;
  532. d_events = debugfs_create_dir("events", d_tracer);
  533. if (!d_events)
  534. pr_warning("Could not create debugfs "
  535. "'events' directory\n");
  536. return d_events;
  537. }
  538. static LIST_HEAD(event_subsystems);
  539. static struct dentry *
  540. event_subsystem_dir(const char *name, struct dentry *d_events)
  541. {
  542. struct event_subsystem *system;
  543. struct dentry *entry;
  544. /* First see if we did not already create this dir */
  545. list_for_each_entry(system, &event_subsystems, list) {
  546. if (strcmp(system->name, name) == 0)
  547. return system->entry;
  548. }
  549. /* need to create new entry */
  550. system = kmalloc(sizeof(*system), GFP_KERNEL);
  551. if (!system) {
  552. pr_warning("No memory to create event subsystem %s\n",
  553. name);
  554. return d_events;
  555. }
  556. system->entry = debugfs_create_dir(name, d_events);
  557. if (!system->entry) {
  558. pr_warning("Could not create event subsystem %s\n",
  559. name);
  560. kfree(system);
  561. return d_events;
  562. }
  563. system->name = name;
  564. list_add(&system->list, &event_subsystems);
  565. system->preds = NULL;
  566. system->n_preds = 0;
  567. entry = debugfs_create_file("filter", 0644, system->entry, system,
  568. &ftrace_subsystem_filter_fops);
  569. if (!entry)
  570. pr_warning("Could not create debugfs "
  571. "'%s/filter' entry\n", name);
  572. return system->entry;
  573. }
  574. static int
  575. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
  576. {
  577. struct dentry *entry;
  578. int ret;
  579. /*
  580. * If the trace point header did not define TRACE_SYSTEM
  581. * then the system would be called "TRACE_SYSTEM".
  582. */
  583. if (strcmp(call->system, "TRACE_SYSTEM") != 0)
  584. d_events = event_subsystem_dir(call->system, d_events);
  585. if (call->raw_init) {
  586. ret = call->raw_init();
  587. if (ret < 0) {
  588. pr_warning("Could not initialize trace point"
  589. " events/%s\n", call->name);
  590. return ret;
  591. }
  592. }
  593. call->dir = debugfs_create_dir(call->name, d_events);
  594. if (!call->dir) {
  595. pr_warning("Could not create debugfs "
  596. "'%s' directory\n", call->name);
  597. return -1;
  598. }
  599. if (call->regfunc) {
  600. entry = debugfs_create_file("enable", 0644, call->dir, call,
  601. &ftrace_enable_fops);
  602. if (!entry)
  603. pr_warning("Could not create debugfs "
  604. "'%s/enable' entry\n", call->name);
  605. }
  606. if (call->id) {
  607. entry = debugfs_create_file("id", 0444, call->dir, call,
  608. &ftrace_event_id_fops);
  609. if (!entry)
  610. pr_warning("Could not create debugfs '%s/id' entry\n",
  611. call->name);
  612. }
  613. if (call->define_fields) {
  614. ret = call->define_fields();
  615. if (ret < 0) {
  616. pr_warning("Could not initialize trace point"
  617. " events/%s\n", call->name);
  618. return ret;
  619. }
  620. entry = debugfs_create_file("filter", 0644, call->dir, call,
  621. &ftrace_event_filter_fops);
  622. if (!entry)
  623. pr_warning("Could not create debugfs "
  624. "'%s/filter' entry\n", call->name);
  625. }
  626. /* A trace may not want to export its format */
  627. if (!call->show_format)
  628. return 0;
  629. entry = debugfs_create_file("format", 0444, call->dir, call,
  630. &ftrace_event_format_fops);
  631. if (!entry)
  632. pr_warning("Could not create debugfs "
  633. "'%s/format' entry\n", call->name);
  634. return 0;
  635. }
  636. extern struct ftrace_event_call __start_ftrace_events[];
  637. extern struct ftrace_event_call __stop_ftrace_events[];
  638. #define for_each_event(event) \
  639. for (event = __start_ftrace_events; \
  640. (unsigned long)event < (unsigned long)__stop_ftrace_events; \
  641. event++)
  642. static __init int event_trace_init(void)
  643. {
  644. struct ftrace_event_call *call;
  645. struct dentry *d_tracer;
  646. struct dentry *entry;
  647. struct dentry *d_events;
  648. d_tracer = tracing_init_dentry();
  649. if (!d_tracer)
  650. return 0;
  651. entry = debugfs_create_file("available_events", 0444, d_tracer,
  652. (void *)&show_event_seq_ops,
  653. &ftrace_avail_fops);
  654. if (!entry)
  655. pr_warning("Could not create debugfs "
  656. "'available_events' entry\n");
  657. entry = debugfs_create_file("set_event", 0644, d_tracer,
  658. (void *)&show_set_event_seq_ops,
  659. &ftrace_set_event_fops);
  660. if (!entry)
  661. pr_warning("Could not create debugfs "
  662. "'set_event' entry\n");
  663. d_events = event_trace_events_dir();
  664. if (!d_events)
  665. return 0;
  666. for_each_event(call) {
  667. /* The linker may leave blanks */
  668. if (!call->name)
  669. continue;
  670. list_add(&call->list, &ftrace_events);
  671. event_create_dir(call, d_events);
  672. }
  673. return 0;
  674. }
  675. fs_initcall(event_trace_init);