trace_events.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include "trace_output.h"
  19. #define TRACE_SYSTEM "TRACE_SYSTEM"
  20. static DEFINE_MUTEX(event_mutex);
  21. LIST_HEAD(ftrace_events);
  22. int trace_define_field(struct ftrace_event_call *call, char *type,
  23. char *name, int offset, int size)
  24. {
  25. struct ftrace_event_field *field;
  26. field = kzalloc(sizeof(*field), GFP_KERNEL);
  27. if (!field)
  28. goto err;
  29. field->name = kstrdup(name, GFP_KERNEL);
  30. if (!field->name)
  31. goto err;
  32. field->type = kstrdup(type, GFP_KERNEL);
  33. if (!field->type)
  34. goto err;
  35. field->offset = offset;
  36. field->size = size;
  37. list_add(&field->link, &call->fields);
  38. return 0;
  39. err:
  40. if (field) {
  41. kfree(field->name);
  42. kfree(field->type);
  43. }
  44. kfree(field);
  45. return -ENOMEM;
  46. }
  47. EXPORT_SYMBOL_GPL(trace_define_field);
  48. static void ftrace_clear_events(void)
  49. {
  50. struct ftrace_event_call *call;
  51. list_for_each_entry(call, &ftrace_events, list) {
  52. if (call->enabled) {
  53. call->enabled = 0;
  54. call->unregfunc();
  55. }
  56. }
  57. }
  58. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  59. int enable)
  60. {
  61. switch (enable) {
  62. case 0:
  63. if (call->enabled) {
  64. call->enabled = 0;
  65. call->unregfunc();
  66. }
  67. break;
  68. case 1:
  69. if (!call->enabled) {
  70. call->enabled = 1;
  71. call->regfunc();
  72. }
  73. break;
  74. }
  75. }
  76. static int ftrace_set_clr_event(char *buf, int set)
  77. {
  78. struct ftrace_event_call *call;
  79. char *event = NULL, *sub = NULL, *match;
  80. int ret = -EINVAL;
  81. /*
  82. * The buf format can be <subsystem>:<event-name>
  83. * *:<event-name> means any event by that name.
  84. * :<event-name> is the same.
  85. *
  86. * <subsystem>:* means all events in that subsystem
  87. * <subsystem>: means the same.
  88. *
  89. * <name> (no ':') means all events in a subsystem with
  90. * the name <name> or any event that matches <name>
  91. */
  92. match = strsep(&buf, ":");
  93. if (buf) {
  94. sub = match;
  95. event = buf;
  96. match = NULL;
  97. if (!strlen(sub) || strcmp(sub, "*") == 0)
  98. sub = NULL;
  99. if (!strlen(event) || strcmp(event, "*") == 0)
  100. event = NULL;
  101. }
  102. mutex_lock(&event_mutex);
  103. list_for_each_entry(call, &ftrace_events, list) {
  104. if (!call->name || !call->regfunc)
  105. continue;
  106. if (match &&
  107. strcmp(match, call->name) != 0 &&
  108. strcmp(match, call->system) != 0)
  109. continue;
  110. if (sub && strcmp(sub, call->system) != 0)
  111. continue;
  112. if (event && strcmp(event, call->name) != 0)
  113. continue;
  114. ftrace_event_enable_disable(call, set);
  115. ret = 0;
  116. }
  117. mutex_unlock(&event_mutex);
  118. return ret;
  119. }
  120. /* 128 should be much more than enough */
  121. #define EVENT_BUF_SIZE 127
  122. static ssize_t
  123. ftrace_event_write(struct file *file, const char __user *ubuf,
  124. size_t cnt, loff_t *ppos)
  125. {
  126. size_t read = 0;
  127. int i, set = 1;
  128. ssize_t ret;
  129. char *buf;
  130. char ch;
  131. if (!cnt || cnt < 0)
  132. return 0;
  133. ret = tracing_update_buffers();
  134. if (ret < 0)
  135. return ret;
  136. ret = get_user(ch, ubuf++);
  137. if (ret)
  138. return ret;
  139. read++;
  140. cnt--;
  141. /* skip white space */
  142. while (cnt && isspace(ch)) {
  143. ret = get_user(ch, ubuf++);
  144. if (ret)
  145. return ret;
  146. read++;
  147. cnt--;
  148. }
  149. /* Only white space found? */
  150. if (isspace(ch)) {
  151. file->f_pos += read;
  152. ret = read;
  153. return ret;
  154. }
  155. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  156. if (!buf)
  157. return -ENOMEM;
  158. if (cnt > EVENT_BUF_SIZE)
  159. cnt = EVENT_BUF_SIZE;
  160. i = 0;
  161. while (cnt && !isspace(ch)) {
  162. if (!i && ch == '!')
  163. set = 0;
  164. else
  165. buf[i++] = ch;
  166. ret = get_user(ch, ubuf++);
  167. if (ret)
  168. goto out_free;
  169. read++;
  170. cnt--;
  171. }
  172. buf[i] = 0;
  173. file->f_pos += read;
  174. ret = ftrace_set_clr_event(buf, set);
  175. if (ret)
  176. goto out_free;
  177. ret = read;
  178. out_free:
  179. kfree(buf);
  180. return ret;
  181. }
  182. static void *
  183. t_next(struct seq_file *m, void *v, loff_t *pos)
  184. {
  185. struct list_head *list = m->private;
  186. struct ftrace_event_call *call;
  187. (*pos)++;
  188. for (;;) {
  189. if (list == &ftrace_events)
  190. return NULL;
  191. call = list_entry(list, struct ftrace_event_call, list);
  192. /*
  193. * The ftrace subsystem is for showing formats only.
  194. * They can not be enabled or disabled via the event files.
  195. */
  196. if (call->regfunc)
  197. break;
  198. list = list->next;
  199. }
  200. m->private = list->next;
  201. return call;
  202. }
  203. static void *t_start(struct seq_file *m, loff_t *pos)
  204. {
  205. return t_next(m, NULL, pos);
  206. }
  207. static void *
  208. s_next(struct seq_file *m, void *v, loff_t *pos)
  209. {
  210. struct list_head *list = m->private;
  211. struct ftrace_event_call *call;
  212. (*pos)++;
  213. retry:
  214. if (list == &ftrace_events)
  215. return NULL;
  216. call = list_entry(list, struct ftrace_event_call, list);
  217. if (!call->enabled) {
  218. list = list->next;
  219. goto retry;
  220. }
  221. m->private = list->next;
  222. return call;
  223. }
  224. static void *s_start(struct seq_file *m, loff_t *pos)
  225. {
  226. return s_next(m, NULL, pos);
  227. }
  228. static int t_show(struct seq_file *m, void *v)
  229. {
  230. struct ftrace_event_call *call = v;
  231. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  232. seq_printf(m, "%s:", call->system);
  233. seq_printf(m, "%s\n", call->name);
  234. return 0;
  235. }
  236. static void t_stop(struct seq_file *m, void *p)
  237. {
  238. }
  239. static int
  240. ftrace_event_seq_open(struct inode *inode, struct file *file)
  241. {
  242. int ret;
  243. const struct seq_operations *seq_ops;
  244. if ((file->f_mode & FMODE_WRITE) &&
  245. !(file->f_flags & O_APPEND))
  246. ftrace_clear_events();
  247. seq_ops = inode->i_private;
  248. ret = seq_open(file, seq_ops);
  249. if (!ret) {
  250. struct seq_file *m = file->private_data;
  251. m->private = ftrace_events.next;
  252. }
  253. return ret;
  254. }
  255. static ssize_t
  256. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  257. loff_t *ppos)
  258. {
  259. struct ftrace_event_call *call = filp->private_data;
  260. char *buf;
  261. if (call->enabled)
  262. buf = "1\n";
  263. else
  264. buf = "0\n";
  265. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  266. }
  267. static ssize_t
  268. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  269. loff_t *ppos)
  270. {
  271. struct ftrace_event_call *call = filp->private_data;
  272. char buf[64];
  273. unsigned long val;
  274. int ret;
  275. if (cnt >= sizeof(buf))
  276. return -EINVAL;
  277. if (copy_from_user(&buf, ubuf, cnt))
  278. return -EFAULT;
  279. buf[cnt] = 0;
  280. ret = strict_strtoul(buf, 10, &val);
  281. if (ret < 0)
  282. return ret;
  283. ret = tracing_update_buffers();
  284. if (ret < 0)
  285. return ret;
  286. switch (val) {
  287. case 0:
  288. case 1:
  289. mutex_lock(&event_mutex);
  290. ftrace_event_enable_disable(call, val);
  291. mutex_unlock(&event_mutex);
  292. break;
  293. default:
  294. return -EINVAL;
  295. }
  296. *ppos += cnt;
  297. return cnt;
  298. }
  299. #undef FIELD
  300. #define FIELD(type, name) \
  301. #type, "common_" #name, offsetof(typeof(field), name), \
  302. sizeof(field.name)
  303. static int trace_write_header(struct trace_seq *s)
  304. {
  305. struct trace_entry field;
  306. /* struct trace_entry */
  307. return trace_seq_printf(s,
  308. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  309. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  310. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  311. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  312. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  313. "\n",
  314. FIELD(unsigned char, type),
  315. FIELD(unsigned char, flags),
  316. FIELD(unsigned char, preempt_count),
  317. FIELD(int, pid),
  318. FIELD(int, tgid));
  319. }
  320. static ssize_t
  321. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  322. loff_t *ppos)
  323. {
  324. struct ftrace_event_call *call = filp->private_data;
  325. struct trace_seq *s;
  326. char *buf;
  327. int r;
  328. if (*ppos)
  329. return 0;
  330. s = kmalloc(sizeof(*s), GFP_KERNEL);
  331. if (!s)
  332. return -ENOMEM;
  333. trace_seq_init(s);
  334. /* If any of the first writes fail, so will the show_format. */
  335. trace_seq_printf(s, "name: %s\n", call->name);
  336. trace_seq_printf(s, "ID: %d\n", call->id);
  337. trace_seq_printf(s, "format:\n");
  338. trace_write_header(s);
  339. r = call->show_format(s);
  340. if (!r) {
  341. /*
  342. * ug! The format output is bigger than a PAGE!!
  343. */
  344. buf = "FORMAT TOO BIG\n";
  345. r = simple_read_from_buffer(ubuf, cnt, ppos,
  346. buf, strlen(buf));
  347. goto out;
  348. }
  349. r = simple_read_from_buffer(ubuf, cnt, ppos,
  350. s->buffer, s->len);
  351. out:
  352. kfree(s);
  353. return r;
  354. }
  355. static ssize_t
  356. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  357. {
  358. struct ftrace_event_call *call = filp->private_data;
  359. struct trace_seq *s;
  360. int r;
  361. if (*ppos)
  362. return 0;
  363. s = kmalloc(sizeof(*s), GFP_KERNEL);
  364. if (!s)
  365. return -ENOMEM;
  366. trace_seq_init(s);
  367. trace_seq_printf(s, "%d\n", call->id);
  368. r = simple_read_from_buffer(ubuf, cnt, ppos,
  369. s->buffer, s->len);
  370. kfree(s);
  371. return r;
  372. }
  373. static ssize_t
  374. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  375. loff_t *ppos)
  376. {
  377. struct ftrace_event_call *call = filp->private_data;
  378. struct trace_seq *s;
  379. int r;
  380. if (*ppos)
  381. return 0;
  382. s = kmalloc(sizeof(*s), GFP_KERNEL);
  383. if (!s)
  384. return -ENOMEM;
  385. trace_seq_init(s);
  386. filter_print_preds(call, s);
  387. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  388. kfree(s);
  389. return r;
  390. }
  391. static ssize_t
  392. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  393. loff_t *ppos)
  394. {
  395. struct ftrace_event_call *call = filp->private_data;
  396. char buf[64], *pbuf = buf;
  397. struct filter_pred *pred;
  398. int err;
  399. if (cnt >= sizeof(buf))
  400. return -EINVAL;
  401. if (copy_from_user(&buf, ubuf, cnt))
  402. return -EFAULT;
  403. buf[cnt] = '\0';
  404. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  405. if (!pred)
  406. return -ENOMEM;
  407. err = filter_parse(&pbuf, pred);
  408. if (err < 0) {
  409. filter_free_pred(pred);
  410. return err;
  411. }
  412. if (pred->clear) {
  413. filter_disable_preds(call);
  414. filter_free_pred(pred);
  415. return cnt;
  416. }
  417. err = filter_add_pred(call, pred);
  418. if (err < 0) {
  419. filter_free_pred(pred);
  420. return err;
  421. }
  422. filter_free_pred(pred);
  423. *ppos += cnt;
  424. return cnt;
  425. }
  426. static ssize_t
  427. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  428. loff_t *ppos)
  429. {
  430. struct event_subsystem *system = filp->private_data;
  431. struct trace_seq *s;
  432. int r;
  433. if (*ppos)
  434. return 0;
  435. s = kmalloc(sizeof(*s), GFP_KERNEL);
  436. if (!s)
  437. return -ENOMEM;
  438. trace_seq_init(s);
  439. filter_print_subsystem_preds(system, s);
  440. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  441. kfree(s);
  442. return r;
  443. }
  444. static ssize_t
  445. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  446. loff_t *ppos)
  447. {
  448. struct event_subsystem *system = filp->private_data;
  449. char buf[64], *pbuf = buf;
  450. struct filter_pred *pred;
  451. int err;
  452. if (cnt >= sizeof(buf))
  453. return -EINVAL;
  454. if (copy_from_user(&buf, ubuf, cnt))
  455. return -EFAULT;
  456. buf[cnt] = '\0';
  457. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  458. if (!pred)
  459. return -ENOMEM;
  460. err = filter_parse(&pbuf, pred);
  461. if (err < 0) {
  462. filter_free_pred(pred);
  463. return err;
  464. }
  465. if (pred->clear) {
  466. filter_free_subsystem_preds(system);
  467. filter_free_pred(pred);
  468. return cnt;
  469. }
  470. err = filter_add_subsystem_pred(system, pred);
  471. if (err < 0) {
  472. filter_free_pred(pred);
  473. return err;
  474. }
  475. *ppos += cnt;
  476. return cnt;
  477. }
  478. static ssize_t
  479. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  480. {
  481. int (*func)(struct trace_seq *s) = filp->private_data;
  482. struct trace_seq *s;
  483. int r;
  484. if (*ppos)
  485. return 0;
  486. s = kmalloc(sizeof(*s), GFP_KERNEL);
  487. if (!s)
  488. return -ENOMEM;
  489. trace_seq_init(s);
  490. func(s);
  491. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  492. kfree(s);
  493. return r;
  494. }
  495. static const struct seq_operations show_event_seq_ops = {
  496. .start = t_start,
  497. .next = t_next,
  498. .show = t_show,
  499. .stop = t_stop,
  500. };
  501. static const struct seq_operations show_set_event_seq_ops = {
  502. .start = s_start,
  503. .next = s_next,
  504. .show = t_show,
  505. .stop = t_stop,
  506. };
  507. static const struct file_operations ftrace_avail_fops = {
  508. .open = ftrace_event_seq_open,
  509. .read = seq_read,
  510. .llseek = seq_lseek,
  511. .release = seq_release,
  512. };
  513. static const struct file_operations ftrace_set_event_fops = {
  514. .open = ftrace_event_seq_open,
  515. .read = seq_read,
  516. .write = ftrace_event_write,
  517. .llseek = seq_lseek,
  518. .release = seq_release,
  519. };
  520. static const struct file_operations ftrace_enable_fops = {
  521. .open = tracing_open_generic,
  522. .read = event_enable_read,
  523. .write = event_enable_write,
  524. };
  525. static const struct file_operations ftrace_event_format_fops = {
  526. .open = tracing_open_generic,
  527. .read = event_format_read,
  528. };
  529. static const struct file_operations ftrace_event_id_fops = {
  530. .open = tracing_open_generic,
  531. .read = event_id_read,
  532. };
  533. static const struct file_operations ftrace_event_filter_fops = {
  534. .open = tracing_open_generic,
  535. .read = event_filter_read,
  536. .write = event_filter_write,
  537. };
  538. static const struct file_operations ftrace_subsystem_filter_fops = {
  539. .open = tracing_open_generic,
  540. .read = subsystem_filter_read,
  541. .write = subsystem_filter_write,
  542. };
  543. static const struct file_operations ftrace_show_header_fops = {
  544. .open = tracing_open_generic,
  545. .read = show_header,
  546. };
  547. static struct dentry *event_trace_events_dir(void)
  548. {
  549. static struct dentry *d_tracer;
  550. static struct dentry *d_events;
  551. if (d_events)
  552. return d_events;
  553. d_tracer = tracing_init_dentry();
  554. if (!d_tracer)
  555. return NULL;
  556. d_events = debugfs_create_dir("events", d_tracer);
  557. if (!d_events)
  558. pr_warning("Could not create debugfs "
  559. "'events' directory\n");
  560. return d_events;
  561. }
  562. static LIST_HEAD(event_subsystems);
  563. static struct dentry *
  564. event_subsystem_dir(const char *name, struct dentry *d_events)
  565. {
  566. struct event_subsystem *system;
  567. struct dentry *entry;
  568. /* First see if we did not already create this dir */
  569. list_for_each_entry(system, &event_subsystems, list) {
  570. if (strcmp(system->name, name) == 0)
  571. return system->entry;
  572. }
  573. /* need to create new entry */
  574. system = kmalloc(sizeof(*system), GFP_KERNEL);
  575. if (!system) {
  576. pr_warning("No memory to create event subsystem %s\n",
  577. name);
  578. return d_events;
  579. }
  580. system->entry = debugfs_create_dir(name, d_events);
  581. if (!system->entry) {
  582. pr_warning("Could not create event subsystem %s\n",
  583. name);
  584. kfree(system);
  585. return d_events;
  586. }
  587. system->name = kstrdup(name, GFP_KERNEL);
  588. if (!system->name) {
  589. debugfs_remove(system->entry);
  590. kfree(system);
  591. return d_events;
  592. }
  593. list_add(&system->list, &event_subsystems);
  594. system->preds = NULL;
  595. system->n_preds = 0;
  596. entry = debugfs_create_file("filter", 0644, system->entry, system,
  597. &ftrace_subsystem_filter_fops);
  598. if (!entry)
  599. pr_warning("Could not create debugfs "
  600. "'%s/filter' entry\n", name);
  601. return system->entry;
  602. }
  603. static int
  604. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events)
  605. {
  606. struct dentry *entry;
  607. int ret;
  608. /*
  609. * If the trace point header did not define TRACE_SYSTEM
  610. * then the system would be called "TRACE_SYSTEM".
  611. */
  612. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  613. d_events = event_subsystem_dir(call->system, d_events);
  614. if (call->raw_init) {
  615. ret = call->raw_init();
  616. if (ret < 0) {
  617. pr_warning("Could not initialize trace point"
  618. " events/%s\n", call->name);
  619. return ret;
  620. }
  621. }
  622. call->dir = debugfs_create_dir(call->name, d_events);
  623. if (!call->dir) {
  624. pr_warning("Could not create debugfs "
  625. "'%s' directory\n", call->name);
  626. return -1;
  627. }
  628. if (call->regfunc)
  629. entry = trace_create_file("enable", 0644, call->dir, call,
  630. &ftrace_enable_fops);
  631. if (call->id)
  632. entry = trace_create_file("id", 0444, call->dir, call,
  633. &ftrace_event_id_fops);
  634. if (call->define_fields) {
  635. ret = call->define_fields();
  636. if (ret < 0) {
  637. pr_warning("Could not initialize trace point"
  638. " events/%s\n", call->name);
  639. return ret;
  640. }
  641. entry = trace_create_file("filter", 0644, call->dir, call,
  642. &ftrace_event_filter_fops);
  643. }
  644. /* A trace may not want to export its format */
  645. if (!call->show_format)
  646. return 0;
  647. entry = trace_create_file("format", 0444, call->dir, call,
  648. &ftrace_event_format_fops);
  649. return 0;
  650. }
  651. #define for_each_event(event, start, end) \
  652. for (event = start; \
  653. (unsigned long)event < (unsigned long)end; \
  654. event++)
  655. #ifdef CONFIG_MODULES
  656. static void trace_module_add_events(struct module *mod)
  657. {
  658. struct ftrace_event_call *call, *start, *end;
  659. struct dentry *d_events;
  660. start = mod->trace_events;
  661. end = mod->trace_events + mod->num_trace_events;
  662. if (start == end)
  663. return;
  664. d_events = event_trace_events_dir();
  665. if (!d_events)
  666. return;
  667. for_each_event(call, start, end) {
  668. /* The linker may leave blanks */
  669. if (!call->name)
  670. continue;
  671. call->mod = mod;
  672. list_add(&call->list, &ftrace_events);
  673. event_create_dir(call, d_events);
  674. }
  675. }
  676. static void trace_module_remove_events(struct module *mod)
  677. {
  678. struct ftrace_event_call *call, *p;
  679. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  680. if (call->mod == mod) {
  681. if (call->enabled) {
  682. call->enabled = 0;
  683. call->unregfunc();
  684. }
  685. if (call->event)
  686. unregister_ftrace_event(call->event);
  687. debugfs_remove_recursive(call->dir);
  688. list_del(&call->list);
  689. }
  690. }
  691. }
  692. static int trace_module_notify(struct notifier_block *self,
  693. unsigned long val, void *data)
  694. {
  695. struct module *mod = data;
  696. mutex_lock(&event_mutex);
  697. switch (val) {
  698. case MODULE_STATE_COMING:
  699. trace_module_add_events(mod);
  700. break;
  701. case MODULE_STATE_GOING:
  702. trace_module_remove_events(mod);
  703. break;
  704. }
  705. mutex_unlock(&event_mutex);
  706. return 0;
  707. }
  708. #else
  709. static int trace_module_notify(struct notifier_block *self,
  710. unsigned long val, void *data)
  711. {
  712. return 0;
  713. }
  714. #endif /* CONFIG_MODULES */
  715. struct notifier_block trace_module_nb = {
  716. .notifier_call = trace_module_notify,
  717. .priority = 0,
  718. };
  719. extern struct ftrace_event_call __start_ftrace_events[];
  720. extern struct ftrace_event_call __stop_ftrace_events[];
  721. static __init int event_trace_init(void)
  722. {
  723. struct ftrace_event_call *call;
  724. struct dentry *d_tracer;
  725. struct dentry *entry;
  726. struct dentry *d_events;
  727. int ret;
  728. d_tracer = tracing_init_dentry();
  729. if (!d_tracer)
  730. return 0;
  731. entry = debugfs_create_file("available_events", 0444, d_tracer,
  732. (void *)&show_event_seq_ops,
  733. &ftrace_avail_fops);
  734. if (!entry)
  735. pr_warning("Could not create debugfs "
  736. "'available_events' entry\n");
  737. entry = debugfs_create_file("set_event", 0644, d_tracer,
  738. (void *)&show_set_event_seq_ops,
  739. &ftrace_set_event_fops);
  740. if (!entry)
  741. pr_warning("Could not create debugfs "
  742. "'set_event' entry\n");
  743. d_events = event_trace_events_dir();
  744. if (!d_events)
  745. return 0;
  746. /* ring buffer internal formats */
  747. trace_create_file("header_page", 0444, d_events,
  748. ring_buffer_print_page_header,
  749. &ftrace_show_header_fops);
  750. trace_create_file("header_event", 0444, d_events,
  751. ring_buffer_print_entry_header,
  752. &ftrace_show_header_fops);
  753. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  754. /* The linker may leave blanks */
  755. if (!call->name)
  756. continue;
  757. list_add(&call->list, &ftrace_events);
  758. event_create_dir(call, d_events);
  759. }
  760. ret = register_module_notifier(&trace_module_nb);
  761. if (!ret)
  762. pr_warning("Failed to register trace events module notifier\n");
  763. return 0;
  764. }
  765. fs_initcall(event_trace_init);
  766. #ifdef CONFIG_FTRACE_STARTUP_TEST
  767. static DEFINE_SPINLOCK(test_spinlock);
  768. static DEFINE_SPINLOCK(test_spinlock_irq);
  769. static DEFINE_MUTEX(test_mutex);
  770. static __init void test_work(struct work_struct *dummy)
  771. {
  772. spin_lock(&test_spinlock);
  773. spin_lock_irq(&test_spinlock_irq);
  774. udelay(1);
  775. spin_unlock_irq(&test_spinlock_irq);
  776. spin_unlock(&test_spinlock);
  777. mutex_lock(&test_mutex);
  778. msleep(1);
  779. mutex_unlock(&test_mutex);
  780. }
  781. static __init int event_test_thread(void *unused)
  782. {
  783. void *test_malloc;
  784. test_malloc = kmalloc(1234, GFP_KERNEL);
  785. if (!test_malloc)
  786. pr_info("failed to kmalloc\n");
  787. schedule_on_each_cpu(test_work);
  788. kfree(test_malloc);
  789. set_current_state(TASK_INTERRUPTIBLE);
  790. while (!kthread_should_stop())
  791. schedule();
  792. return 0;
  793. }
  794. /*
  795. * Do various things that may trigger events.
  796. */
  797. static __init void event_test_stuff(void)
  798. {
  799. struct task_struct *test_thread;
  800. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  801. msleep(1);
  802. kthread_stop(test_thread);
  803. }
  804. /*
  805. * For every trace event defined, we will test each trace point separately,
  806. * and then by groups, and finally all trace points.
  807. */
  808. static __init void event_trace_self_tests(void)
  809. {
  810. struct ftrace_event_call *call;
  811. struct event_subsystem *system;
  812. char *sysname;
  813. int ret;
  814. pr_info("Running tests on trace events:\n");
  815. list_for_each_entry(call, &ftrace_events, list) {
  816. /* Only test those that have a regfunc */
  817. if (!call->regfunc)
  818. continue;
  819. pr_info("Testing event %s: ", call->name);
  820. /*
  821. * If an event is already enabled, someone is using
  822. * it and the self test should not be on.
  823. */
  824. if (call->enabled) {
  825. pr_warning("Enabled event during self test!\n");
  826. WARN_ON_ONCE(1);
  827. continue;
  828. }
  829. call->enabled = 1;
  830. call->regfunc();
  831. event_test_stuff();
  832. call->unregfunc();
  833. call->enabled = 0;
  834. pr_cont("OK\n");
  835. }
  836. /* Now test at the sub system level */
  837. pr_info("Running tests on trace event systems:\n");
  838. list_for_each_entry(system, &event_subsystems, list) {
  839. /* the ftrace system is special, skip it */
  840. if (strcmp(system->name, "ftrace") == 0)
  841. continue;
  842. pr_info("Testing event system %s: ", system->name);
  843. /* ftrace_set_clr_event can modify the name passed in. */
  844. sysname = kstrdup(system->name, GFP_KERNEL);
  845. if (WARN_ON(!sysname)) {
  846. pr_warning("Can't allocate memory, giving up!\n");
  847. return;
  848. }
  849. ret = ftrace_set_clr_event(sysname, 1);
  850. kfree(sysname);
  851. if (WARN_ON_ONCE(ret)) {
  852. pr_warning("error enabling system %s\n",
  853. system->name);
  854. continue;
  855. }
  856. event_test_stuff();
  857. sysname = kstrdup(system->name, GFP_KERNEL);
  858. if (WARN_ON(!sysname)) {
  859. pr_warning("Can't allocate memory, giving up!\n");
  860. return;
  861. }
  862. ret = ftrace_set_clr_event(sysname, 0);
  863. kfree(sysname);
  864. if (WARN_ON_ONCE(ret))
  865. pr_warning("error disabling system %s\n",
  866. system->name);
  867. pr_cont("OK\n");
  868. }
  869. /* Test with all events enabled */
  870. pr_info("Running tests on all trace events:\n");
  871. pr_info("Testing all events: ");
  872. sysname = kmalloc(4, GFP_KERNEL);
  873. if (WARN_ON(!sysname)) {
  874. pr_warning("Can't allocate memory, giving up!\n");
  875. return;
  876. }
  877. memcpy(sysname, "*:*", 4);
  878. ret = ftrace_set_clr_event(sysname, 1);
  879. if (WARN_ON_ONCE(ret)) {
  880. kfree(sysname);
  881. pr_warning("error enabling all events\n");
  882. return;
  883. }
  884. event_test_stuff();
  885. /* reset sysname */
  886. memcpy(sysname, "*:*", 4);
  887. ret = ftrace_set_clr_event(sysname, 0);
  888. kfree(sysname);
  889. if (WARN_ON_ONCE(ret)) {
  890. pr_warning("error disabling all events\n");
  891. return;
  892. }
  893. pr_cont("OK\n");
  894. }
  895. #ifdef CONFIG_FUNCTION_TRACER
  896. static DEFINE_PER_CPU(atomic_t, test_event_disable);
  897. static void
  898. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  899. {
  900. struct ring_buffer_event *event;
  901. struct ftrace_entry *entry;
  902. unsigned long flags;
  903. long disabled;
  904. int resched;
  905. int cpu;
  906. int pc;
  907. pc = preempt_count();
  908. resched = ftrace_preempt_disable();
  909. cpu = raw_smp_processor_id();
  910. disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
  911. if (disabled != 1)
  912. goto out;
  913. local_save_flags(flags);
  914. event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
  915. flags, pc);
  916. if (!event)
  917. goto out;
  918. entry = ring_buffer_event_data(event);
  919. entry->ip = ip;
  920. entry->parent_ip = parent_ip;
  921. trace_nowake_buffer_unlock_commit(event, flags, pc);
  922. out:
  923. atomic_dec(&per_cpu(test_event_disable, cpu));
  924. ftrace_preempt_enable(resched);
  925. }
  926. static struct ftrace_ops trace_ops __initdata =
  927. {
  928. .func = function_test_events_call,
  929. };
  930. static __init void event_trace_self_test_with_function(void)
  931. {
  932. register_ftrace_function(&trace_ops);
  933. pr_info("Running tests again, along with the function tracer\n");
  934. event_trace_self_tests();
  935. unregister_ftrace_function(&trace_ops);
  936. }
  937. #else
  938. static __init void event_trace_self_test_with_function(void)
  939. {
  940. }
  941. #endif
  942. static __init int event_trace_self_tests_init(void)
  943. {
  944. event_trace_self_tests();
  945. event_trace_self_test_with_function();
  946. return 0;
  947. }
  948. late_initcall(event_trace_self_tests_init);
  949. #endif