trace_events.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include "trace_output.h"
  19. #define TRACE_SYSTEM "TRACE_SYSTEM"
  20. static DEFINE_MUTEX(event_mutex);
  21. LIST_HEAD(ftrace_events);
  22. int trace_define_field(struct ftrace_event_call *call, char *type,
  23. char *name, int offset, int size, int is_signed)
  24. {
  25. struct ftrace_event_field *field;
  26. field = kzalloc(sizeof(*field), GFP_KERNEL);
  27. if (!field)
  28. goto err;
  29. field->name = kstrdup(name, GFP_KERNEL);
  30. if (!field->name)
  31. goto err;
  32. field->type = kstrdup(type, GFP_KERNEL);
  33. if (!field->type)
  34. goto err;
  35. field->offset = offset;
  36. field->size = size;
  37. field->is_signed = is_signed;
  38. list_add(&field->link, &call->fields);
  39. return 0;
  40. err:
  41. if (field) {
  42. kfree(field->name);
  43. kfree(field->type);
  44. }
  45. kfree(field);
  46. return -ENOMEM;
  47. }
  48. EXPORT_SYMBOL_GPL(trace_define_field);
  49. static void ftrace_clear_events(void)
  50. {
  51. struct ftrace_event_call *call;
  52. list_for_each_entry(call, &ftrace_events, list) {
  53. if (call->enabled) {
  54. call->enabled = 0;
  55. call->unregfunc();
  56. }
  57. }
  58. }
  59. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  60. int enable)
  61. {
  62. switch (enable) {
  63. case 0:
  64. if (call->enabled) {
  65. call->enabled = 0;
  66. call->unregfunc();
  67. }
  68. break;
  69. case 1:
  70. if (!call->enabled) {
  71. call->enabled = 1;
  72. call->regfunc();
  73. }
  74. break;
  75. }
  76. }
  77. static int ftrace_set_clr_event(char *buf, int set)
  78. {
  79. struct ftrace_event_call *call;
  80. char *event = NULL, *sub = NULL, *match;
  81. int ret = -EINVAL;
  82. /*
  83. * The buf format can be <subsystem>:<event-name>
  84. * *:<event-name> means any event by that name.
  85. * :<event-name> is the same.
  86. *
  87. * <subsystem>:* means all events in that subsystem
  88. * <subsystem>: means the same.
  89. *
  90. * <name> (no ':') means all events in a subsystem with
  91. * the name <name> or any event that matches <name>
  92. */
  93. match = strsep(&buf, ":");
  94. if (buf) {
  95. sub = match;
  96. event = buf;
  97. match = NULL;
  98. if (!strlen(sub) || strcmp(sub, "*") == 0)
  99. sub = NULL;
  100. if (!strlen(event) || strcmp(event, "*") == 0)
  101. event = NULL;
  102. }
  103. mutex_lock(&event_mutex);
  104. list_for_each_entry(call, &ftrace_events, list) {
  105. if (!call->name || !call->regfunc)
  106. continue;
  107. if (match &&
  108. strcmp(match, call->name) != 0 &&
  109. strcmp(match, call->system) != 0)
  110. continue;
  111. if (sub && strcmp(sub, call->system) != 0)
  112. continue;
  113. if (event && strcmp(event, call->name) != 0)
  114. continue;
  115. ftrace_event_enable_disable(call, set);
  116. ret = 0;
  117. }
  118. mutex_unlock(&event_mutex);
  119. return ret;
  120. }
  121. /* 128 should be much more than enough */
  122. #define EVENT_BUF_SIZE 127
  123. static ssize_t
  124. ftrace_event_write(struct file *file, const char __user *ubuf,
  125. size_t cnt, loff_t *ppos)
  126. {
  127. size_t read = 0;
  128. int i, set = 1;
  129. ssize_t ret;
  130. char *buf;
  131. char ch;
  132. if (!cnt || cnt < 0)
  133. return 0;
  134. ret = tracing_update_buffers();
  135. if (ret < 0)
  136. return ret;
  137. ret = get_user(ch, ubuf++);
  138. if (ret)
  139. return ret;
  140. read++;
  141. cnt--;
  142. /* skip white space */
  143. while (cnt && isspace(ch)) {
  144. ret = get_user(ch, ubuf++);
  145. if (ret)
  146. return ret;
  147. read++;
  148. cnt--;
  149. }
  150. /* Only white space found? */
  151. if (isspace(ch)) {
  152. file->f_pos += read;
  153. ret = read;
  154. return ret;
  155. }
  156. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  157. if (!buf)
  158. return -ENOMEM;
  159. if (cnt > EVENT_BUF_SIZE)
  160. cnt = EVENT_BUF_SIZE;
  161. i = 0;
  162. while (cnt && !isspace(ch)) {
  163. if (!i && ch == '!')
  164. set = 0;
  165. else
  166. buf[i++] = ch;
  167. ret = get_user(ch, ubuf++);
  168. if (ret)
  169. goto out_free;
  170. read++;
  171. cnt--;
  172. }
  173. buf[i] = 0;
  174. file->f_pos += read;
  175. ret = ftrace_set_clr_event(buf, set);
  176. if (ret)
  177. goto out_free;
  178. ret = read;
  179. out_free:
  180. kfree(buf);
  181. return ret;
  182. }
  183. static void *
  184. t_next(struct seq_file *m, void *v, loff_t *pos)
  185. {
  186. struct list_head *list = m->private;
  187. struct ftrace_event_call *call;
  188. (*pos)++;
  189. for (;;) {
  190. if (list == &ftrace_events)
  191. return NULL;
  192. call = list_entry(list, struct ftrace_event_call, list);
  193. /*
  194. * The ftrace subsystem is for showing formats only.
  195. * They can not be enabled or disabled via the event files.
  196. */
  197. if (call->regfunc)
  198. break;
  199. list = list->next;
  200. }
  201. m->private = list->next;
  202. return call;
  203. }
  204. static void *t_start(struct seq_file *m, loff_t *pos)
  205. {
  206. return t_next(m, NULL, pos);
  207. }
  208. static void *
  209. s_next(struct seq_file *m, void *v, loff_t *pos)
  210. {
  211. struct list_head *list = m->private;
  212. struct ftrace_event_call *call;
  213. (*pos)++;
  214. retry:
  215. if (list == &ftrace_events)
  216. return NULL;
  217. call = list_entry(list, struct ftrace_event_call, list);
  218. if (!call->enabled) {
  219. list = list->next;
  220. goto retry;
  221. }
  222. m->private = list->next;
  223. return call;
  224. }
  225. static void *s_start(struct seq_file *m, loff_t *pos)
  226. {
  227. return s_next(m, NULL, pos);
  228. }
  229. static int t_show(struct seq_file *m, void *v)
  230. {
  231. struct ftrace_event_call *call = v;
  232. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  233. seq_printf(m, "%s:", call->system);
  234. seq_printf(m, "%s\n", call->name);
  235. return 0;
  236. }
  237. static void t_stop(struct seq_file *m, void *p)
  238. {
  239. }
  240. static int
  241. ftrace_event_seq_open(struct inode *inode, struct file *file)
  242. {
  243. int ret;
  244. const struct seq_operations *seq_ops;
  245. if ((file->f_mode & FMODE_WRITE) &&
  246. !(file->f_flags & O_APPEND))
  247. ftrace_clear_events();
  248. seq_ops = inode->i_private;
  249. ret = seq_open(file, seq_ops);
  250. if (!ret) {
  251. struct seq_file *m = file->private_data;
  252. m->private = ftrace_events.next;
  253. }
  254. return ret;
  255. }
  256. static ssize_t
  257. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  258. loff_t *ppos)
  259. {
  260. struct ftrace_event_call *call = filp->private_data;
  261. char *buf;
  262. if (call->enabled)
  263. buf = "1\n";
  264. else
  265. buf = "0\n";
  266. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  267. }
  268. static ssize_t
  269. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  270. loff_t *ppos)
  271. {
  272. struct ftrace_event_call *call = filp->private_data;
  273. char buf[64];
  274. unsigned long val;
  275. int ret;
  276. if (cnt >= sizeof(buf))
  277. return -EINVAL;
  278. if (copy_from_user(&buf, ubuf, cnt))
  279. return -EFAULT;
  280. buf[cnt] = 0;
  281. ret = strict_strtoul(buf, 10, &val);
  282. if (ret < 0)
  283. return ret;
  284. ret = tracing_update_buffers();
  285. if (ret < 0)
  286. return ret;
  287. switch (val) {
  288. case 0:
  289. case 1:
  290. mutex_lock(&event_mutex);
  291. ftrace_event_enable_disable(call, val);
  292. mutex_unlock(&event_mutex);
  293. break;
  294. default:
  295. return -EINVAL;
  296. }
  297. *ppos += cnt;
  298. return cnt;
  299. }
  300. extern char *__bad_type_size(void);
  301. #undef FIELD
  302. #define FIELD(type, name) \
  303. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  304. #type, "common_" #name, offsetof(typeof(field), name), \
  305. sizeof(field.name)
  306. static int trace_write_header(struct trace_seq *s)
  307. {
  308. struct trace_entry field;
  309. /* struct trace_entry */
  310. return trace_seq_printf(s,
  311. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  312. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  313. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  314. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  315. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  316. "\n",
  317. FIELD(unsigned short, type),
  318. FIELD(unsigned char, flags),
  319. FIELD(unsigned char, preempt_count),
  320. FIELD(int, pid),
  321. FIELD(int, tgid));
  322. }
  323. static ssize_t
  324. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  325. loff_t *ppos)
  326. {
  327. struct ftrace_event_call *call = filp->private_data;
  328. struct trace_seq *s;
  329. char *buf;
  330. int r;
  331. if (*ppos)
  332. return 0;
  333. s = kmalloc(sizeof(*s), GFP_KERNEL);
  334. if (!s)
  335. return -ENOMEM;
  336. trace_seq_init(s);
  337. /* If any of the first writes fail, so will the show_format. */
  338. trace_seq_printf(s, "name: %s\n", call->name);
  339. trace_seq_printf(s, "ID: %d\n", call->id);
  340. trace_seq_printf(s, "format:\n");
  341. trace_write_header(s);
  342. r = call->show_format(s);
  343. if (!r) {
  344. /*
  345. * ug! The format output is bigger than a PAGE!!
  346. */
  347. buf = "FORMAT TOO BIG\n";
  348. r = simple_read_from_buffer(ubuf, cnt, ppos,
  349. buf, strlen(buf));
  350. goto out;
  351. }
  352. r = simple_read_from_buffer(ubuf, cnt, ppos,
  353. s->buffer, s->len);
  354. out:
  355. kfree(s);
  356. return r;
  357. }
  358. static ssize_t
  359. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  360. {
  361. struct ftrace_event_call *call = filp->private_data;
  362. struct trace_seq *s;
  363. int r;
  364. if (*ppos)
  365. return 0;
  366. s = kmalloc(sizeof(*s), GFP_KERNEL);
  367. if (!s)
  368. return -ENOMEM;
  369. trace_seq_init(s);
  370. trace_seq_printf(s, "%d\n", call->id);
  371. r = simple_read_from_buffer(ubuf, cnt, ppos,
  372. s->buffer, s->len);
  373. kfree(s);
  374. return r;
  375. }
  376. static ssize_t
  377. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  378. loff_t *ppos)
  379. {
  380. struct ftrace_event_call *call = filp->private_data;
  381. struct trace_seq *s;
  382. int r;
  383. if (*ppos)
  384. return 0;
  385. s = kmalloc(sizeof(*s), GFP_KERNEL);
  386. if (!s)
  387. return -ENOMEM;
  388. trace_seq_init(s);
  389. filter_print_preds(call, s);
  390. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  391. kfree(s);
  392. return r;
  393. }
  394. static ssize_t
  395. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  396. loff_t *ppos)
  397. {
  398. struct ftrace_event_call *call = filp->private_data;
  399. char buf[64], *pbuf = buf;
  400. struct filter_pred *pred;
  401. int err;
  402. if (cnt >= sizeof(buf))
  403. return -EINVAL;
  404. if (copy_from_user(&buf, ubuf, cnt))
  405. return -EFAULT;
  406. buf[cnt] = '\0';
  407. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  408. if (!pred)
  409. return -ENOMEM;
  410. err = filter_parse(&pbuf, pred);
  411. if (err < 0) {
  412. filter_free_pred(pred);
  413. return err;
  414. }
  415. if (pred->clear) {
  416. filter_disable_preds(call);
  417. filter_free_pred(pred);
  418. return cnt;
  419. }
  420. err = filter_add_pred(call, pred);
  421. if (err < 0) {
  422. filter_free_pred(pred);
  423. return err;
  424. }
  425. filter_free_pred(pred);
  426. *ppos += cnt;
  427. return cnt;
  428. }
  429. static ssize_t
  430. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  431. loff_t *ppos)
  432. {
  433. struct event_subsystem *system = filp->private_data;
  434. struct trace_seq *s;
  435. int r;
  436. if (*ppos)
  437. return 0;
  438. s = kmalloc(sizeof(*s), GFP_KERNEL);
  439. if (!s)
  440. return -ENOMEM;
  441. trace_seq_init(s);
  442. filter_print_subsystem_preds(system, s);
  443. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  444. kfree(s);
  445. return r;
  446. }
  447. static ssize_t
  448. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  449. loff_t *ppos)
  450. {
  451. struct event_subsystem *system = filp->private_data;
  452. char buf[64], *pbuf = buf;
  453. struct filter_pred *pred;
  454. int err;
  455. if (cnt >= sizeof(buf))
  456. return -EINVAL;
  457. if (copy_from_user(&buf, ubuf, cnt))
  458. return -EFAULT;
  459. buf[cnt] = '\0';
  460. pred = kzalloc(sizeof(*pred), GFP_KERNEL);
  461. if (!pred)
  462. return -ENOMEM;
  463. err = filter_parse(&pbuf, pred);
  464. if (err < 0) {
  465. filter_free_pred(pred);
  466. return err;
  467. }
  468. if (pred->clear) {
  469. filter_free_subsystem_preds(system);
  470. filter_free_pred(pred);
  471. return cnt;
  472. }
  473. err = filter_add_subsystem_pred(system, pred);
  474. if (err < 0) {
  475. filter_free_pred(pred);
  476. return err;
  477. }
  478. *ppos += cnt;
  479. return cnt;
  480. }
  481. static ssize_t
  482. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  483. {
  484. int (*func)(struct trace_seq *s) = filp->private_data;
  485. struct trace_seq *s;
  486. int r;
  487. if (*ppos)
  488. return 0;
  489. s = kmalloc(sizeof(*s), GFP_KERNEL);
  490. if (!s)
  491. return -ENOMEM;
  492. trace_seq_init(s);
  493. func(s);
  494. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  495. kfree(s);
  496. return r;
  497. }
  498. static const struct seq_operations show_event_seq_ops = {
  499. .start = t_start,
  500. .next = t_next,
  501. .show = t_show,
  502. .stop = t_stop,
  503. };
  504. static const struct seq_operations show_set_event_seq_ops = {
  505. .start = s_start,
  506. .next = s_next,
  507. .show = t_show,
  508. .stop = t_stop,
  509. };
  510. static const struct file_operations ftrace_avail_fops = {
  511. .open = ftrace_event_seq_open,
  512. .read = seq_read,
  513. .llseek = seq_lseek,
  514. .release = seq_release,
  515. };
  516. static const struct file_operations ftrace_set_event_fops = {
  517. .open = ftrace_event_seq_open,
  518. .read = seq_read,
  519. .write = ftrace_event_write,
  520. .llseek = seq_lseek,
  521. .release = seq_release,
  522. };
  523. static const struct file_operations ftrace_enable_fops = {
  524. .open = tracing_open_generic,
  525. .read = event_enable_read,
  526. .write = event_enable_write,
  527. };
  528. static const struct file_operations ftrace_event_format_fops = {
  529. .open = tracing_open_generic,
  530. .read = event_format_read,
  531. };
  532. static const struct file_operations ftrace_event_id_fops = {
  533. .open = tracing_open_generic,
  534. .read = event_id_read,
  535. };
  536. static const struct file_operations ftrace_event_filter_fops = {
  537. .open = tracing_open_generic,
  538. .read = event_filter_read,
  539. .write = event_filter_write,
  540. };
  541. static const struct file_operations ftrace_subsystem_filter_fops = {
  542. .open = tracing_open_generic,
  543. .read = subsystem_filter_read,
  544. .write = subsystem_filter_write,
  545. };
  546. static const struct file_operations ftrace_show_header_fops = {
  547. .open = tracing_open_generic,
  548. .read = show_header,
  549. };
  550. static struct dentry *event_trace_events_dir(void)
  551. {
  552. static struct dentry *d_tracer;
  553. static struct dentry *d_events;
  554. if (d_events)
  555. return d_events;
  556. d_tracer = tracing_init_dentry();
  557. if (!d_tracer)
  558. return NULL;
  559. d_events = debugfs_create_dir("events", d_tracer);
  560. if (!d_events)
  561. pr_warning("Could not create debugfs "
  562. "'events' directory\n");
  563. return d_events;
  564. }
  565. static LIST_HEAD(event_subsystems);
  566. static struct dentry *
  567. event_subsystem_dir(const char *name, struct dentry *d_events)
  568. {
  569. struct event_subsystem *system;
  570. struct dentry *entry;
  571. /* First see if we did not already create this dir */
  572. list_for_each_entry(system, &event_subsystems, list) {
  573. if (strcmp(system->name, name) == 0)
  574. return system->entry;
  575. }
  576. /* need to create new entry */
  577. system = kmalloc(sizeof(*system), GFP_KERNEL);
  578. if (!system) {
  579. pr_warning("No memory to create event subsystem %s\n",
  580. name);
  581. return d_events;
  582. }
  583. system->entry = debugfs_create_dir(name, d_events);
  584. if (!system->entry) {
  585. pr_warning("Could not create event subsystem %s\n",
  586. name);
  587. kfree(system);
  588. return d_events;
  589. }
  590. system->name = kstrdup(name, GFP_KERNEL);
  591. if (!system->name) {
  592. debugfs_remove(system->entry);
  593. kfree(system);
  594. return d_events;
  595. }
  596. list_add(&system->list, &event_subsystems);
  597. system->filter = NULL;
  598. entry = debugfs_create_file("filter", 0644, system->entry, system,
  599. &ftrace_subsystem_filter_fops);
  600. if (!entry)
  601. pr_warning("Could not create debugfs "
  602. "'%s/filter' entry\n", name);
  603. return system->entry;
  604. }
  605. static int
  606. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  607. const struct file_operations *id,
  608. const struct file_operations *enable,
  609. const struct file_operations *filter,
  610. const struct file_operations *format)
  611. {
  612. struct dentry *entry;
  613. int ret;
  614. /*
  615. * If the trace point header did not define TRACE_SYSTEM
  616. * then the system would be called "TRACE_SYSTEM".
  617. */
  618. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  619. d_events = event_subsystem_dir(call->system, d_events);
  620. if (call->raw_init) {
  621. ret = call->raw_init();
  622. if (ret < 0) {
  623. pr_warning("Could not initialize trace point"
  624. " events/%s\n", call->name);
  625. return ret;
  626. }
  627. }
  628. call->dir = debugfs_create_dir(call->name, d_events);
  629. if (!call->dir) {
  630. pr_warning("Could not create debugfs "
  631. "'%s' directory\n", call->name);
  632. return -1;
  633. }
  634. if (call->regfunc)
  635. entry = trace_create_file("enable", 0644, call->dir, call,
  636. enable);
  637. if (call->id)
  638. entry = trace_create_file("id", 0444, call->dir, call,
  639. id);
  640. if (call->define_fields) {
  641. ret = call->define_fields();
  642. if (ret < 0) {
  643. pr_warning("Could not initialize trace point"
  644. " events/%s\n", call->name);
  645. return ret;
  646. }
  647. entry = trace_create_file("filter", 0644, call->dir, call,
  648. filter);
  649. }
  650. /* A trace may not want to export its format */
  651. if (!call->show_format)
  652. return 0;
  653. entry = trace_create_file("format", 0444, call->dir, call,
  654. format);
  655. return 0;
  656. }
  657. #define for_each_event(event, start, end) \
  658. for (event = start; \
  659. (unsigned long)event < (unsigned long)end; \
  660. event++)
  661. #ifdef CONFIG_MODULES
  662. static LIST_HEAD(ftrace_module_file_list);
  663. /*
  664. * Modules must own their file_operations to keep up with
  665. * reference counting.
  666. */
  667. struct ftrace_module_file_ops {
  668. struct list_head list;
  669. struct module *mod;
  670. struct file_operations id;
  671. struct file_operations enable;
  672. struct file_operations format;
  673. struct file_operations filter;
  674. };
  675. static struct ftrace_module_file_ops *
  676. trace_create_file_ops(struct module *mod)
  677. {
  678. struct ftrace_module_file_ops *file_ops;
  679. /*
  680. * This is a bit of a PITA. To allow for correct reference
  681. * counting, modules must "own" their file_operations.
  682. * To do this, we allocate the file operations that will be
  683. * used in the event directory.
  684. */
  685. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  686. if (!file_ops)
  687. return NULL;
  688. file_ops->mod = mod;
  689. file_ops->id = ftrace_event_id_fops;
  690. file_ops->id.owner = mod;
  691. file_ops->enable = ftrace_enable_fops;
  692. file_ops->enable.owner = mod;
  693. file_ops->filter = ftrace_event_filter_fops;
  694. file_ops->filter.owner = mod;
  695. file_ops->format = ftrace_event_format_fops;
  696. file_ops->format.owner = mod;
  697. list_add(&file_ops->list, &ftrace_module_file_list);
  698. return file_ops;
  699. }
  700. static void trace_module_add_events(struct module *mod)
  701. {
  702. struct ftrace_module_file_ops *file_ops = NULL;
  703. struct ftrace_event_call *call, *start, *end;
  704. struct dentry *d_events;
  705. start = mod->trace_events;
  706. end = mod->trace_events + mod->num_trace_events;
  707. if (start == end)
  708. return;
  709. d_events = event_trace_events_dir();
  710. if (!d_events)
  711. return;
  712. for_each_event(call, start, end) {
  713. /* The linker may leave blanks */
  714. if (!call->name)
  715. continue;
  716. /*
  717. * This module has events, create file ops for this module
  718. * if not already done.
  719. */
  720. if (!file_ops) {
  721. file_ops = trace_create_file_ops(mod);
  722. if (!file_ops)
  723. return;
  724. }
  725. call->mod = mod;
  726. list_add(&call->list, &ftrace_events);
  727. event_create_dir(call, d_events,
  728. &file_ops->id, &file_ops->enable,
  729. &file_ops->filter, &file_ops->format);
  730. }
  731. }
  732. static void trace_module_remove_events(struct module *mod)
  733. {
  734. struct ftrace_module_file_ops *file_ops;
  735. struct ftrace_event_call *call, *p;
  736. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  737. if (call->mod == mod) {
  738. if (call->enabled) {
  739. call->enabled = 0;
  740. call->unregfunc();
  741. }
  742. if (call->event)
  743. unregister_ftrace_event(call->event);
  744. debugfs_remove_recursive(call->dir);
  745. list_del(&call->list);
  746. }
  747. }
  748. /* Now free the file_operations */
  749. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  750. if (file_ops->mod == mod)
  751. break;
  752. }
  753. if (&file_ops->list != &ftrace_module_file_list) {
  754. list_del(&file_ops->list);
  755. kfree(file_ops);
  756. }
  757. }
  758. static int trace_module_notify(struct notifier_block *self,
  759. unsigned long val, void *data)
  760. {
  761. struct module *mod = data;
  762. mutex_lock(&event_mutex);
  763. switch (val) {
  764. case MODULE_STATE_COMING:
  765. trace_module_add_events(mod);
  766. break;
  767. case MODULE_STATE_GOING:
  768. trace_module_remove_events(mod);
  769. break;
  770. }
  771. mutex_unlock(&event_mutex);
  772. return 0;
  773. }
  774. #else
  775. static int trace_module_notify(struct notifier_block *self,
  776. unsigned long val, void *data)
  777. {
  778. return 0;
  779. }
  780. #endif /* CONFIG_MODULES */
  781. struct notifier_block trace_module_nb = {
  782. .notifier_call = trace_module_notify,
  783. .priority = 0,
  784. };
  785. extern struct ftrace_event_call __start_ftrace_events[];
  786. extern struct ftrace_event_call __stop_ftrace_events[];
  787. static __init int event_trace_init(void)
  788. {
  789. struct ftrace_event_call *call;
  790. struct dentry *d_tracer;
  791. struct dentry *entry;
  792. struct dentry *d_events;
  793. int ret;
  794. d_tracer = tracing_init_dentry();
  795. if (!d_tracer)
  796. return 0;
  797. entry = debugfs_create_file("available_events", 0444, d_tracer,
  798. (void *)&show_event_seq_ops,
  799. &ftrace_avail_fops);
  800. if (!entry)
  801. pr_warning("Could not create debugfs "
  802. "'available_events' entry\n");
  803. entry = debugfs_create_file("set_event", 0644, d_tracer,
  804. (void *)&show_set_event_seq_ops,
  805. &ftrace_set_event_fops);
  806. if (!entry)
  807. pr_warning("Could not create debugfs "
  808. "'set_event' entry\n");
  809. d_events = event_trace_events_dir();
  810. if (!d_events)
  811. return 0;
  812. /* ring buffer internal formats */
  813. trace_create_file("header_page", 0444, d_events,
  814. ring_buffer_print_page_header,
  815. &ftrace_show_header_fops);
  816. trace_create_file("header_event", 0444, d_events,
  817. ring_buffer_print_entry_header,
  818. &ftrace_show_header_fops);
  819. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  820. /* The linker may leave blanks */
  821. if (!call->name)
  822. continue;
  823. list_add(&call->list, &ftrace_events);
  824. event_create_dir(call, d_events, &ftrace_event_id_fops,
  825. &ftrace_enable_fops, &ftrace_event_filter_fops,
  826. &ftrace_event_format_fops);
  827. }
  828. ret = register_module_notifier(&trace_module_nb);
  829. if (!ret)
  830. pr_warning("Failed to register trace events module notifier\n");
  831. return 0;
  832. }
  833. fs_initcall(event_trace_init);
  834. #ifdef CONFIG_FTRACE_STARTUP_TEST
  835. static DEFINE_SPINLOCK(test_spinlock);
  836. static DEFINE_SPINLOCK(test_spinlock_irq);
  837. static DEFINE_MUTEX(test_mutex);
  838. static __init void test_work(struct work_struct *dummy)
  839. {
  840. spin_lock(&test_spinlock);
  841. spin_lock_irq(&test_spinlock_irq);
  842. udelay(1);
  843. spin_unlock_irq(&test_spinlock_irq);
  844. spin_unlock(&test_spinlock);
  845. mutex_lock(&test_mutex);
  846. msleep(1);
  847. mutex_unlock(&test_mutex);
  848. }
  849. static __init int event_test_thread(void *unused)
  850. {
  851. void *test_malloc;
  852. test_malloc = kmalloc(1234, GFP_KERNEL);
  853. if (!test_malloc)
  854. pr_info("failed to kmalloc\n");
  855. schedule_on_each_cpu(test_work);
  856. kfree(test_malloc);
  857. set_current_state(TASK_INTERRUPTIBLE);
  858. while (!kthread_should_stop())
  859. schedule();
  860. return 0;
  861. }
  862. /*
  863. * Do various things that may trigger events.
  864. */
  865. static __init void event_test_stuff(void)
  866. {
  867. struct task_struct *test_thread;
  868. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  869. msleep(1);
  870. kthread_stop(test_thread);
  871. }
  872. /*
  873. * For every trace event defined, we will test each trace point separately,
  874. * and then by groups, and finally all trace points.
  875. */
  876. static __init void event_trace_self_tests(void)
  877. {
  878. struct ftrace_event_call *call;
  879. struct event_subsystem *system;
  880. char *sysname;
  881. int ret;
  882. pr_info("Running tests on trace events:\n");
  883. list_for_each_entry(call, &ftrace_events, list) {
  884. /* Only test those that have a regfunc */
  885. if (!call->regfunc)
  886. continue;
  887. pr_info("Testing event %s: ", call->name);
  888. /*
  889. * If an event is already enabled, someone is using
  890. * it and the self test should not be on.
  891. */
  892. if (call->enabled) {
  893. pr_warning("Enabled event during self test!\n");
  894. WARN_ON_ONCE(1);
  895. continue;
  896. }
  897. call->enabled = 1;
  898. call->regfunc();
  899. event_test_stuff();
  900. call->unregfunc();
  901. call->enabled = 0;
  902. pr_cont("OK\n");
  903. }
  904. /* Now test at the sub system level */
  905. pr_info("Running tests on trace event systems:\n");
  906. list_for_each_entry(system, &event_subsystems, list) {
  907. /* the ftrace system is special, skip it */
  908. if (strcmp(system->name, "ftrace") == 0)
  909. continue;
  910. pr_info("Testing event system %s: ", system->name);
  911. /* ftrace_set_clr_event can modify the name passed in. */
  912. sysname = kstrdup(system->name, GFP_KERNEL);
  913. if (WARN_ON(!sysname)) {
  914. pr_warning("Can't allocate memory, giving up!\n");
  915. return;
  916. }
  917. ret = ftrace_set_clr_event(sysname, 1);
  918. kfree(sysname);
  919. if (WARN_ON_ONCE(ret)) {
  920. pr_warning("error enabling system %s\n",
  921. system->name);
  922. continue;
  923. }
  924. event_test_stuff();
  925. sysname = kstrdup(system->name, GFP_KERNEL);
  926. if (WARN_ON(!sysname)) {
  927. pr_warning("Can't allocate memory, giving up!\n");
  928. return;
  929. }
  930. ret = ftrace_set_clr_event(sysname, 0);
  931. kfree(sysname);
  932. if (WARN_ON_ONCE(ret))
  933. pr_warning("error disabling system %s\n",
  934. system->name);
  935. pr_cont("OK\n");
  936. }
  937. /* Test with all events enabled */
  938. pr_info("Running tests on all trace events:\n");
  939. pr_info("Testing all events: ");
  940. sysname = kmalloc(4, GFP_KERNEL);
  941. if (WARN_ON(!sysname)) {
  942. pr_warning("Can't allocate memory, giving up!\n");
  943. return;
  944. }
  945. memcpy(sysname, "*:*", 4);
  946. ret = ftrace_set_clr_event(sysname, 1);
  947. if (WARN_ON_ONCE(ret)) {
  948. kfree(sysname);
  949. pr_warning("error enabling all events\n");
  950. return;
  951. }
  952. event_test_stuff();
  953. /* reset sysname */
  954. memcpy(sysname, "*:*", 4);
  955. ret = ftrace_set_clr_event(sysname, 0);
  956. kfree(sysname);
  957. if (WARN_ON_ONCE(ret)) {
  958. pr_warning("error disabling all events\n");
  959. return;
  960. }
  961. pr_cont("OK\n");
  962. }
  963. #ifdef CONFIG_FUNCTION_TRACER
  964. static DEFINE_PER_CPU(atomic_t, test_event_disable);
  965. static void
  966. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  967. {
  968. struct ring_buffer_event *event;
  969. struct ftrace_entry *entry;
  970. unsigned long flags;
  971. long disabled;
  972. int resched;
  973. int cpu;
  974. int pc;
  975. pc = preempt_count();
  976. resched = ftrace_preempt_disable();
  977. cpu = raw_smp_processor_id();
  978. disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
  979. if (disabled != 1)
  980. goto out;
  981. local_save_flags(flags);
  982. event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
  983. flags, pc);
  984. if (!event)
  985. goto out;
  986. entry = ring_buffer_event_data(event);
  987. entry->ip = ip;
  988. entry->parent_ip = parent_ip;
  989. trace_nowake_buffer_unlock_commit(event, flags, pc);
  990. out:
  991. atomic_dec(&per_cpu(test_event_disable, cpu));
  992. ftrace_preempt_enable(resched);
  993. }
  994. static struct ftrace_ops trace_ops __initdata =
  995. {
  996. .func = function_test_events_call,
  997. };
  998. static __init void event_trace_self_test_with_function(void)
  999. {
  1000. register_ftrace_function(&trace_ops);
  1001. pr_info("Running tests again, along with the function tracer\n");
  1002. event_trace_self_tests();
  1003. unregister_ftrace_function(&trace_ops);
  1004. }
  1005. #else
  1006. static __init void event_trace_self_test_with_function(void)
  1007. {
  1008. }
  1009. #endif
  1010. static __init int event_trace_self_tests_init(void)
  1011. {
  1012. event_trace_self_tests();
  1013. event_trace_self_test_with_function();
  1014. return 0;
  1015. }
  1016. late_initcall(event_trace_self_tests_init);
  1017. #endif