trace_events.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include "trace_output.h"
  19. #define TRACE_SYSTEM "TRACE_SYSTEM"
  20. DEFINE_MUTEX(event_mutex);
  21. LIST_HEAD(ftrace_events);
  22. int trace_define_field(struct ftrace_event_call *call, char *type,
  23. char *name, int offset, int size, int is_signed)
  24. {
  25. struct ftrace_event_field *field;
  26. field = kzalloc(sizeof(*field), GFP_KERNEL);
  27. if (!field)
  28. goto err;
  29. field->name = kstrdup(name, GFP_KERNEL);
  30. if (!field->name)
  31. goto err;
  32. field->type = kstrdup(type, GFP_KERNEL);
  33. if (!field->type)
  34. goto err;
  35. field->offset = offset;
  36. field->size = size;
  37. field->is_signed = is_signed;
  38. list_add(&field->link, &call->fields);
  39. return 0;
  40. err:
  41. if (field) {
  42. kfree(field->name);
  43. kfree(field->type);
  44. }
  45. kfree(field);
  46. return -ENOMEM;
  47. }
  48. EXPORT_SYMBOL_GPL(trace_define_field);
  49. #ifdef CONFIG_MODULES
  50. static void trace_destroy_fields(struct ftrace_event_call *call)
  51. {
  52. struct ftrace_event_field *field, *next;
  53. list_for_each_entry_safe(field, next, &call->fields, link) {
  54. list_del(&field->link);
  55. kfree(field->type);
  56. kfree(field->name);
  57. kfree(field);
  58. }
  59. }
  60. #endif /* CONFIG_MODULES */
  61. static void ftrace_clear_events(void)
  62. {
  63. struct ftrace_event_call *call;
  64. mutex_lock(&event_mutex);
  65. list_for_each_entry(call, &ftrace_events, list) {
  66. if (call->enabled) {
  67. call->enabled = 0;
  68. call->unregfunc();
  69. }
  70. }
  71. mutex_unlock(&event_mutex);
  72. }
  73. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  74. int enable)
  75. {
  76. switch (enable) {
  77. case 0:
  78. if (call->enabled) {
  79. call->enabled = 0;
  80. call->unregfunc();
  81. }
  82. break;
  83. case 1:
  84. if (!call->enabled) {
  85. call->enabled = 1;
  86. call->regfunc();
  87. }
  88. break;
  89. }
  90. }
  91. static int ftrace_set_clr_event(char *buf, int set)
  92. {
  93. struct ftrace_event_call *call;
  94. char *event = NULL, *sub = NULL, *match;
  95. int ret = -EINVAL;
  96. /*
  97. * The buf format can be <subsystem>:<event-name>
  98. * *:<event-name> means any event by that name.
  99. * :<event-name> is the same.
  100. *
  101. * <subsystem>:* means all events in that subsystem
  102. * <subsystem>: means the same.
  103. *
  104. * <name> (no ':') means all events in a subsystem with
  105. * the name <name> or any event that matches <name>
  106. */
  107. match = strsep(&buf, ":");
  108. if (buf) {
  109. sub = match;
  110. event = buf;
  111. match = NULL;
  112. if (!strlen(sub) || strcmp(sub, "*") == 0)
  113. sub = NULL;
  114. if (!strlen(event) || strcmp(event, "*") == 0)
  115. event = NULL;
  116. }
  117. mutex_lock(&event_mutex);
  118. list_for_each_entry(call, &ftrace_events, list) {
  119. if (!call->name || !call->regfunc)
  120. continue;
  121. if (match &&
  122. strcmp(match, call->name) != 0 &&
  123. strcmp(match, call->system) != 0)
  124. continue;
  125. if (sub && strcmp(sub, call->system) != 0)
  126. continue;
  127. if (event && strcmp(event, call->name) != 0)
  128. continue;
  129. ftrace_event_enable_disable(call, set);
  130. ret = 0;
  131. }
  132. mutex_unlock(&event_mutex);
  133. return ret;
  134. }
  135. /* 128 should be much more than enough */
  136. #define EVENT_BUF_SIZE 127
  137. static ssize_t
  138. ftrace_event_write(struct file *file, const char __user *ubuf,
  139. size_t cnt, loff_t *ppos)
  140. {
  141. size_t read = 0;
  142. int i, set = 1;
  143. ssize_t ret;
  144. char *buf;
  145. char ch;
  146. if (!cnt || cnt < 0)
  147. return 0;
  148. ret = tracing_update_buffers();
  149. if (ret < 0)
  150. return ret;
  151. ret = get_user(ch, ubuf++);
  152. if (ret)
  153. return ret;
  154. read++;
  155. cnt--;
  156. /* skip white space */
  157. while (cnt && isspace(ch)) {
  158. ret = get_user(ch, ubuf++);
  159. if (ret)
  160. return ret;
  161. read++;
  162. cnt--;
  163. }
  164. /* Only white space found? */
  165. if (isspace(ch)) {
  166. file->f_pos += read;
  167. ret = read;
  168. return ret;
  169. }
  170. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  171. if (!buf)
  172. return -ENOMEM;
  173. if (cnt > EVENT_BUF_SIZE)
  174. cnt = EVENT_BUF_SIZE;
  175. i = 0;
  176. while (cnt && !isspace(ch)) {
  177. if (!i && ch == '!')
  178. set = 0;
  179. else
  180. buf[i++] = ch;
  181. ret = get_user(ch, ubuf++);
  182. if (ret)
  183. goto out_free;
  184. read++;
  185. cnt--;
  186. }
  187. buf[i] = 0;
  188. file->f_pos += read;
  189. ret = ftrace_set_clr_event(buf, set);
  190. if (ret)
  191. goto out_free;
  192. ret = read;
  193. out_free:
  194. kfree(buf);
  195. return ret;
  196. }
  197. static void *
  198. t_next(struct seq_file *m, void *v, loff_t *pos)
  199. {
  200. struct list_head *list = m->private;
  201. struct ftrace_event_call *call;
  202. (*pos)++;
  203. for (;;) {
  204. if (list == &ftrace_events)
  205. return NULL;
  206. call = list_entry(list, struct ftrace_event_call, list);
  207. /*
  208. * The ftrace subsystem is for showing formats only.
  209. * They can not be enabled or disabled via the event files.
  210. */
  211. if (call->regfunc)
  212. break;
  213. list = list->next;
  214. }
  215. m->private = list->next;
  216. return call;
  217. }
  218. static void *t_start(struct seq_file *m, loff_t *pos)
  219. {
  220. mutex_lock(&event_mutex);
  221. if (*pos == 0)
  222. m->private = ftrace_events.next;
  223. return t_next(m, NULL, pos);
  224. }
  225. static void *
  226. s_next(struct seq_file *m, void *v, loff_t *pos)
  227. {
  228. struct list_head *list = m->private;
  229. struct ftrace_event_call *call;
  230. (*pos)++;
  231. retry:
  232. if (list == &ftrace_events)
  233. return NULL;
  234. call = list_entry(list, struct ftrace_event_call, list);
  235. if (!call->enabled) {
  236. list = list->next;
  237. goto retry;
  238. }
  239. m->private = list->next;
  240. return call;
  241. }
  242. static void *s_start(struct seq_file *m, loff_t *pos)
  243. {
  244. mutex_lock(&event_mutex);
  245. if (*pos == 0)
  246. m->private = ftrace_events.next;
  247. return s_next(m, NULL, pos);
  248. }
  249. static int t_show(struct seq_file *m, void *v)
  250. {
  251. struct ftrace_event_call *call = v;
  252. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  253. seq_printf(m, "%s:", call->system);
  254. seq_printf(m, "%s\n", call->name);
  255. return 0;
  256. }
  257. static void t_stop(struct seq_file *m, void *p)
  258. {
  259. mutex_unlock(&event_mutex);
  260. }
  261. static int
  262. ftrace_event_seq_open(struct inode *inode, struct file *file)
  263. {
  264. const struct seq_operations *seq_ops;
  265. if ((file->f_mode & FMODE_WRITE) &&
  266. !(file->f_flags & O_APPEND))
  267. ftrace_clear_events();
  268. seq_ops = inode->i_private;
  269. return seq_open(file, seq_ops);
  270. }
  271. static ssize_t
  272. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  273. loff_t *ppos)
  274. {
  275. struct ftrace_event_call *call = filp->private_data;
  276. char *buf;
  277. if (call->enabled)
  278. buf = "1\n";
  279. else
  280. buf = "0\n";
  281. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  282. }
  283. static ssize_t
  284. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  285. loff_t *ppos)
  286. {
  287. struct ftrace_event_call *call = filp->private_data;
  288. char buf[64];
  289. unsigned long val;
  290. int ret;
  291. if (cnt >= sizeof(buf))
  292. return -EINVAL;
  293. if (copy_from_user(&buf, ubuf, cnt))
  294. return -EFAULT;
  295. buf[cnt] = 0;
  296. ret = strict_strtoul(buf, 10, &val);
  297. if (ret < 0)
  298. return ret;
  299. ret = tracing_update_buffers();
  300. if (ret < 0)
  301. return ret;
  302. switch (val) {
  303. case 0:
  304. case 1:
  305. mutex_lock(&event_mutex);
  306. ftrace_event_enable_disable(call, val);
  307. mutex_unlock(&event_mutex);
  308. break;
  309. default:
  310. return -EINVAL;
  311. }
  312. *ppos += cnt;
  313. return cnt;
  314. }
  315. static ssize_t
  316. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  317. loff_t *ppos)
  318. {
  319. const char *system = filp->private_data;
  320. struct ftrace_event_call *call;
  321. char buf[2];
  322. int set = -1;
  323. int all = 0;
  324. int ret;
  325. if (system[0] == '*')
  326. all = 1;
  327. mutex_lock(&event_mutex);
  328. list_for_each_entry(call, &ftrace_events, list) {
  329. if (!call->name || !call->regfunc)
  330. continue;
  331. if (!all && strcmp(call->system, system) != 0)
  332. continue;
  333. /*
  334. * We need to find out if all the events are set
  335. * or if all events or cleared, or if we have
  336. * a mixture.
  337. */
  338. if (call->enabled) {
  339. switch (set) {
  340. case -1:
  341. set = 1;
  342. break;
  343. case 0:
  344. set = 2;
  345. break;
  346. }
  347. } else {
  348. switch (set) {
  349. case -1:
  350. set = 0;
  351. break;
  352. case 1:
  353. set = 2;
  354. break;
  355. }
  356. }
  357. /*
  358. * If we have a mixture, no need to look further.
  359. */
  360. if (set == 2)
  361. break;
  362. }
  363. mutex_unlock(&event_mutex);
  364. buf[1] = '\n';
  365. switch (set) {
  366. case 0:
  367. buf[0] = '0';
  368. break;
  369. case 1:
  370. buf[0] = '1';
  371. break;
  372. case 2:
  373. buf[0] = 'X';
  374. break;
  375. default:
  376. buf[0] = '?';
  377. }
  378. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  379. return ret;
  380. }
  381. static ssize_t
  382. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  383. loff_t *ppos)
  384. {
  385. const char *system = filp->private_data;
  386. unsigned long val;
  387. char *command;
  388. char buf[64];
  389. ssize_t ret;
  390. if (cnt >= sizeof(buf))
  391. return -EINVAL;
  392. if (copy_from_user(&buf, ubuf, cnt))
  393. return -EFAULT;
  394. buf[cnt] = 0;
  395. ret = strict_strtoul(buf, 10, &val);
  396. if (ret < 0)
  397. return ret;
  398. ret = tracing_update_buffers();
  399. if (ret < 0)
  400. return ret;
  401. switch (val) {
  402. case 0:
  403. case 1:
  404. break;
  405. default:
  406. return -EINVAL;
  407. }
  408. command = kstrdup(system, GFP_KERNEL);
  409. if (!command)
  410. return -ENOMEM;
  411. ret = ftrace_set_clr_event(command, val);
  412. if (ret)
  413. goto out_free;
  414. ret = cnt;
  415. out_free:
  416. kfree(command);
  417. *ppos += cnt;
  418. return ret;
  419. }
  420. extern char *__bad_type_size(void);
  421. #undef FIELD
  422. #define FIELD(type, name) \
  423. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  424. #type, "common_" #name, offsetof(typeof(field), name), \
  425. sizeof(field.name)
  426. static int trace_write_header(struct trace_seq *s)
  427. {
  428. struct trace_entry field;
  429. /* struct trace_entry */
  430. return trace_seq_printf(s,
  431. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  432. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  433. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  434. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  435. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  436. "\n",
  437. FIELD(unsigned short, type),
  438. FIELD(unsigned char, flags),
  439. FIELD(unsigned char, preempt_count),
  440. FIELD(int, pid),
  441. FIELD(int, tgid));
  442. }
  443. static ssize_t
  444. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  445. loff_t *ppos)
  446. {
  447. struct ftrace_event_call *call = filp->private_data;
  448. struct trace_seq *s;
  449. char *buf;
  450. int r;
  451. if (*ppos)
  452. return 0;
  453. s = kmalloc(sizeof(*s), GFP_KERNEL);
  454. if (!s)
  455. return -ENOMEM;
  456. trace_seq_init(s);
  457. /* If any of the first writes fail, so will the show_format. */
  458. trace_seq_printf(s, "name: %s\n", call->name);
  459. trace_seq_printf(s, "ID: %d\n", call->id);
  460. trace_seq_printf(s, "format:\n");
  461. trace_write_header(s);
  462. r = call->show_format(s);
  463. if (!r) {
  464. /*
  465. * ug! The format output is bigger than a PAGE!!
  466. */
  467. buf = "FORMAT TOO BIG\n";
  468. r = simple_read_from_buffer(ubuf, cnt, ppos,
  469. buf, strlen(buf));
  470. goto out;
  471. }
  472. r = simple_read_from_buffer(ubuf, cnt, ppos,
  473. s->buffer, s->len);
  474. out:
  475. kfree(s);
  476. return r;
  477. }
  478. static ssize_t
  479. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  480. {
  481. struct ftrace_event_call *call = filp->private_data;
  482. struct trace_seq *s;
  483. int r;
  484. if (*ppos)
  485. return 0;
  486. s = kmalloc(sizeof(*s), GFP_KERNEL);
  487. if (!s)
  488. return -ENOMEM;
  489. trace_seq_init(s);
  490. trace_seq_printf(s, "%d\n", call->id);
  491. r = simple_read_from_buffer(ubuf, cnt, ppos,
  492. s->buffer, s->len);
  493. kfree(s);
  494. return r;
  495. }
  496. static ssize_t
  497. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  498. loff_t *ppos)
  499. {
  500. struct ftrace_event_call *call = filp->private_data;
  501. struct trace_seq *s;
  502. int r;
  503. if (*ppos)
  504. return 0;
  505. s = kmalloc(sizeof(*s), GFP_KERNEL);
  506. if (!s)
  507. return -ENOMEM;
  508. trace_seq_init(s);
  509. print_event_filter(call, s);
  510. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  511. kfree(s);
  512. return r;
  513. }
  514. static ssize_t
  515. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  516. loff_t *ppos)
  517. {
  518. struct ftrace_event_call *call = filp->private_data;
  519. char *buf;
  520. int err;
  521. if (cnt >= PAGE_SIZE)
  522. return -EINVAL;
  523. buf = (char *)__get_free_page(GFP_TEMPORARY);
  524. if (!buf)
  525. return -ENOMEM;
  526. if (copy_from_user(buf, ubuf, cnt)) {
  527. free_page((unsigned long) buf);
  528. return -EFAULT;
  529. }
  530. buf[cnt] = '\0';
  531. err = apply_event_filter(call, buf);
  532. free_page((unsigned long) buf);
  533. if (err < 0)
  534. return err;
  535. *ppos += cnt;
  536. return cnt;
  537. }
  538. static ssize_t
  539. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  540. loff_t *ppos)
  541. {
  542. struct event_subsystem *system = filp->private_data;
  543. struct trace_seq *s;
  544. int r;
  545. if (*ppos)
  546. return 0;
  547. s = kmalloc(sizeof(*s), GFP_KERNEL);
  548. if (!s)
  549. return -ENOMEM;
  550. trace_seq_init(s);
  551. print_subsystem_event_filter(system, s);
  552. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  553. kfree(s);
  554. return r;
  555. }
  556. static ssize_t
  557. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  558. loff_t *ppos)
  559. {
  560. struct event_subsystem *system = filp->private_data;
  561. char *buf;
  562. int err;
  563. if (cnt >= PAGE_SIZE)
  564. return -EINVAL;
  565. buf = (char *)__get_free_page(GFP_TEMPORARY);
  566. if (!buf)
  567. return -ENOMEM;
  568. if (copy_from_user(buf, ubuf, cnt)) {
  569. free_page((unsigned long) buf);
  570. return -EFAULT;
  571. }
  572. buf[cnt] = '\0';
  573. err = apply_subsystem_event_filter(system, buf);
  574. free_page((unsigned long) buf);
  575. if (err < 0)
  576. return err;
  577. *ppos += cnt;
  578. return cnt;
  579. }
  580. static ssize_t
  581. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  582. {
  583. int (*func)(struct trace_seq *s) = filp->private_data;
  584. struct trace_seq *s;
  585. int r;
  586. if (*ppos)
  587. return 0;
  588. s = kmalloc(sizeof(*s), GFP_KERNEL);
  589. if (!s)
  590. return -ENOMEM;
  591. trace_seq_init(s);
  592. func(s);
  593. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  594. kfree(s);
  595. return r;
  596. }
  597. static const struct seq_operations show_event_seq_ops = {
  598. .start = t_start,
  599. .next = t_next,
  600. .show = t_show,
  601. .stop = t_stop,
  602. };
  603. static const struct seq_operations show_set_event_seq_ops = {
  604. .start = s_start,
  605. .next = s_next,
  606. .show = t_show,
  607. .stop = t_stop,
  608. };
  609. static const struct file_operations ftrace_avail_fops = {
  610. .open = ftrace_event_seq_open,
  611. .read = seq_read,
  612. .llseek = seq_lseek,
  613. .release = seq_release,
  614. };
  615. static const struct file_operations ftrace_set_event_fops = {
  616. .open = ftrace_event_seq_open,
  617. .read = seq_read,
  618. .write = ftrace_event_write,
  619. .llseek = seq_lseek,
  620. .release = seq_release,
  621. };
  622. static const struct file_operations ftrace_enable_fops = {
  623. .open = tracing_open_generic,
  624. .read = event_enable_read,
  625. .write = event_enable_write,
  626. };
  627. static const struct file_operations ftrace_event_format_fops = {
  628. .open = tracing_open_generic,
  629. .read = event_format_read,
  630. };
  631. static const struct file_operations ftrace_event_id_fops = {
  632. .open = tracing_open_generic,
  633. .read = event_id_read,
  634. };
  635. static const struct file_operations ftrace_event_filter_fops = {
  636. .open = tracing_open_generic,
  637. .read = event_filter_read,
  638. .write = event_filter_write,
  639. };
  640. static const struct file_operations ftrace_subsystem_filter_fops = {
  641. .open = tracing_open_generic,
  642. .read = subsystem_filter_read,
  643. .write = subsystem_filter_write,
  644. };
  645. static const struct file_operations ftrace_system_enable_fops = {
  646. .open = tracing_open_generic,
  647. .read = system_enable_read,
  648. .write = system_enable_write,
  649. };
  650. static const struct file_operations ftrace_show_header_fops = {
  651. .open = tracing_open_generic,
  652. .read = show_header,
  653. };
  654. static struct dentry *event_trace_events_dir(void)
  655. {
  656. static struct dentry *d_tracer;
  657. static struct dentry *d_events;
  658. if (d_events)
  659. return d_events;
  660. d_tracer = tracing_init_dentry();
  661. if (!d_tracer)
  662. return NULL;
  663. d_events = debugfs_create_dir("events", d_tracer);
  664. if (!d_events)
  665. pr_warning("Could not create debugfs "
  666. "'events' directory\n");
  667. return d_events;
  668. }
  669. static LIST_HEAD(event_subsystems);
  670. static struct dentry *
  671. event_subsystem_dir(const char *name, struct dentry *d_events)
  672. {
  673. struct event_subsystem *system;
  674. struct dentry *entry;
  675. /* First see if we did not already create this dir */
  676. list_for_each_entry(system, &event_subsystems, list) {
  677. if (strcmp(system->name, name) == 0)
  678. return system->entry;
  679. }
  680. /* need to create new entry */
  681. system = kmalloc(sizeof(*system), GFP_KERNEL);
  682. if (!system) {
  683. pr_warning("No memory to create event subsystem %s\n",
  684. name);
  685. return d_events;
  686. }
  687. system->entry = debugfs_create_dir(name, d_events);
  688. if (!system->entry) {
  689. pr_warning("Could not create event subsystem %s\n",
  690. name);
  691. kfree(system);
  692. return d_events;
  693. }
  694. system->name = kstrdup(name, GFP_KERNEL);
  695. if (!system->name) {
  696. debugfs_remove(system->entry);
  697. kfree(system);
  698. return d_events;
  699. }
  700. list_add(&system->list, &event_subsystems);
  701. system->filter = NULL;
  702. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  703. if (!system->filter) {
  704. pr_warning("Could not allocate filter for subsystem "
  705. "'%s'\n", name);
  706. return system->entry;
  707. }
  708. entry = debugfs_create_file("filter", 0644, system->entry, system,
  709. &ftrace_subsystem_filter_fops);
  710. if (!entry) {
  711. kfree(system->filter);
  712. system->filter = NULL;
  713. pr_warning("Could not create debugfs "
  714. "'%s/filter' entry\n", name);
  715. }
  716. entry = trace_create_file("enable", 0644, system->entry,
  717. (void *)system->name,
  718. &ftrace_system_enable_fops);
  719. return system->entry;
  720. }
  721. static int
  722. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  723. const struct file_operations *id,
  724. const struct file_operations *enable,
  725. const struct file_operations *filter,
  726. const struct file_operations *format)
  727. {
  728. struct dentry *entry;
  729. int ret;
  730. /*
  731. * If the trace point header did not define TRACE_SYSTEM
  732. * then the system would be called "TRACE_SYSTEM".
  733. */
  734. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  735. d_events = event_subsystem_dir(call->system, d_events);
  736. if (call->raw_init) {
  737. ret = call->raw_init();
  738. if (ret < 0) {
  739. pr_warning("Could not initialize trace point"
  740. " events/%s\n", call->name);
  741. return ret;
  742. }
  743. }
  744. call->dir = debugfs_create_dir(call->name, d_events);
  745. if (!call->dir) {
  746. pr_warning("Could not create debugfs "
  747. "'%s' directory\n", call->name);
  748. return -1;
  749. }
  750. if (call->regfunc)
  751. entry = trace_create_file("enable", 0644, call->dir, call,
  752. enable);
  753. if (call->id)
  754. entry = trace_create_file("id", 0444, call->dir, call,
  755. id);
  756. if (call->define_fields) {
  757. ret = call->define_fields();
  758. if (ret < 0) {
  759. pr_warning("Could not initialize trace point"
  760. " events/%s\n", call->name);
  761. return ret;
  762. }
  763. entry = trace_create_file("filter", 0644, call->dir, call,
  764. filter);
  765. }
  766. /* A trace may not want to export its format */
  767. if (!call->show_format)
  768. return 0;
  769. entry = trace_create_file("format", 0444, call->dir, call,
  770. format);
  771. return 0;
  772. }
  773. #define for_each_event(event, start, end) \
  774. for (event = start; \
  775. (unsigned long)event < (unsigned long)end; \
  776. event++)
  777. #ifdef CONFIG_MODULES
  778. static LIST_HEAD(ftrace_module_file_list);
  779. /*
  780. * Modules must own their file_operations to keep up with
  781. * reference counting.
  782. */
  783. struct ftrace_module_file_ops {
  784. struct list_head list;
  785. struct module *mod;
  786. struct file_operations id;
  787. struct file_operations enable;
  788. struct file_operations format;
  789. struct file_operations filter;
  790. };
  791. static struct ftrace_module_file_ops *
  792. trace_create_file_ops(struct module *mod)
  793. {
  794. struct ftrace_module_file_ops *file_ops;
  795. /*
  796. * This is a bit of a PITA. To allow for correct reference
  797. * counting, modules must "own" their file_operations.
  798. * To do this, we allocate the file operations that will be
  799. * used in the event directory.
  800. */
  801. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  802. if (!file_ops)
  803. return NULL;
  804. file_ops->mod = mod;
  805. file_ops->id = ftrace_event_id_fops;
  806. file_ops->id.owner = mod;
  807. file_ops->enable = ftrace_enable_fops;
  808. file_ops->enable.owner = mod;
  809. file_ops->filter = ftrace_event_filter_fops;
  810. file_ops->filter.owner = mod;
  811. file_ops->format = ftrace_event_format_fops;
  812. file_ops->format.owner = mod;
  813. list_add(&file_ops->list, &ftrace_module_file_list);
  814. return file_ops;
  815. }
  816. static void trace_module_add_events(struct module *mod)
  817. {
  818. struct ftrace_module_file_ops *file_ops = NULL;
  819. struct ftrace_event_call *call, *start, *end;
  820. struct dentry *d_events;
  821. start = mod->trace_events;
  822. end = mod->trace_events + mod->num_trace_events;
  823. if (start == end)
  824. return;
  825. d_events = event_trace_events_dir();
  826. if (!d_events)
  827. return;
  828. for_each_event(call, start, end) {
  829. /* The linker may leave blanks */
  830. if (!call->name)
  831. continue;
  832. /*
  833. * This module has events, create file ops for this module
  834. * if not already done.
  835. */
  836. if (!file_ops) {
  837. file_ops = trace_create_file_ops(mod);
  838. if (!file_ops)
  839. return;
  840. }
  841. call->mod = mod;
  842. list_add(&call->list, &ftrace_events);
  843. event_create_dir(call, d_events,
  844. &file_ops->id, &file_ops->enable,
  845. &file_ops->filter, &file_ops->format);
  846. }
  847. }
  848. static void trace_module_remove_events(struct module *mod)
  849. {
  850. struct ftrace_module_file_ops *file_ops;
  851. struct ftrace_event_call *call, *p;
  852. bool found = false;
  853. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  854. if (call->mod == mod) {
  855. found = true;
  856. if (call->enabled) {
  857. call->enabled = 0;
  858. call->unregfunc();
  859. }
  860. if (call->event)
  861. unregister_ftrace_event(call->event);
  862. debugfs_remove_recursive(call->dir);
  863. list_del(&call->list);
  864. trace_destroy_fields(call);
  865. destroy_preds(call);
  866. }
  867. }
  868. /* Now free the file_operations */
  869. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  870. if (file_ops->mod == mod)
  871. break;
  872. }
  873. if (&file_ops->list != &ftrace_module_file_list) {
  874. list_del(&file_ops->list);
  875. kfree(file_ops);
  876. }
  877. /*
  878. * It is safest to reset the ring buffer if the module being unloaded
  879. * registered any events.
  880. */
  881. if (found)
  882. tracing_reset_current_online_cpus();
  883. }
  884. static int trace_module_notify(struct notifier_block *self,
  885. unsigned long val, void *data)
  886. {
  887. struct module *mod = data;
  888. mutex_lock(&event_mutex);
  889. switch (val) {
  890. case MODULE_STATE_COMING:
  891. trace_module_add_events(mod);
  892. break;
  893. case MODULE_STATE_GOING:
  894. trace_module_remove_events(mod);
  895. break;
  896. }
  897. mutex_unlock(&event_mutex);
  898. return 0;
  899. }
  900. #else
  901. static int trace_module_notify(struct notifier_block *self,
  902. unsigned long val, void *data)
  903. {
  904. return 0;
  905. }
  906. #endif /* CONFIG_MODULES */
  907. struct notifier_block trace_module_nb = {
  908. .notifier_call = trace_module_notify,
  909. .priority = 0,
  910. };
  911. extern struct ftrace_event_call __start_ftrace_events[];
  912. extern struct ftrace_event_call __stop_ftrace_events[];
  913. static __init int event_trace_init(void)
  914. {
  915. struct ftrace_event_call *call;
  916. struct dentry *d_tracer;
  917. struct dentry *entry;
  918. struct dentry *d_events;
  919. int ret;
  920. d_tracer = tracing_init_dentry();
  921. if (!d_tracer)
  922. return 0;
  923. entry = debugfs_create_file("available_events", 0444, d_tracer,
  924. (void *)&show_event_seq_ops,
  925. &ftrace_avail_fops);
  926. if (!entry)
  927. pr_warning("Could not create debugfs "
  928. "'available_events' entry\n");
  929. entry = debugfs_create_file("set_event", 0644, d_tracer,
  930. (void *)&show_set_event_seq_ops,
  931. &ftrace_set_event_fops);
  932. if (!entry)
  933. pr_warning("Could not create debugfs "
  934. "'set_event' entry\n");
  935. d_events = event_trace_events_dir();
  936. if (!d_events)
  937. return 0;
  938. /* ring buffer internal formats */
  939. trace_create_file("header_page", 0444, d_events,
  940. ring_buffer_print_page_header,
  941. &ftrace_show_header_fops);
  942. trace_create_file("header_event", 0444, d_events,
  943. ring_buffer_print_entry_header,
  944. &ftrace_show_header_fops);
  945. trace_create_file("enable", 0644, d_events,
  946. "*:*", &ftrace_system_enable_fops);
  947. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  948. /* The linker may leave blanks */
  949. if (!call->name)
  950. continue;
  951. list_add(&call->list, &ftrace_events);
  952. event_create_dir(call, d_events, &ftrace_event_id_fops,
  953. &ftrace_enable_fops, &ftrace_event_filter_fops,
  954. &ftrace_event_format_fops);
  955. }
  956. ret = register_module_notifier(&trace_module_nb);
  957. if (!ret)
  958. pr_warning("Failed to register trace events module notifier\n");
  959. return 0;
  960. }
  961. fs_initcall(event_trace_init);
  962. #ifdef CONFIG_FTRACE_STARTUP_TEST
  963. static DEFINE_SPINLOCK(test_spinlock);
  964. static DEFINE_SPINLOCK(test_spinlock_irq);
  965. static DEFINE_MUTEX(test_mutex);
  966. static __init void test_work(struct work_struct *dummy)
  967. {
  968. spin_lock(&test_spinlock);
  969. spin_lock_irq(&test_spinlock_irq);
  970. udelay(1);
  971. spin_unlock_irq(&test_spinlock_irq);
  972. spin_unlock(&test_spinlock);
  973. mutex_lock(&test_mutex);
  974. msleep(1);
  975. mutex_unlock(&test_mutex);
  976. }
  977. static __init int event_test_thread(void *unused)
  978. {
  979. void *test_malloc;
  980. test_malloc = kmalloc(1234, GFP_KERNEL);
  981. if (!test_malloc)
  982. pr_info("failed to kmalloc\n");
  983. schedule_on_each_cpu(test_work);
  984. kfree(test_malloc);
  985. set_current_state(TASK_INTERRUPTIBLE);
  986. while (!kthread_should_stop())
  987. schedule();
  988. return 0;
  989. }
  990. /*
  991. * Do various things that may trigger events.
  992. */
  993. static __init void event_test_stuff(void)
  994. {
  995. struct task_struct *test_thread;
  996. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  997. msleep(1);
  998. kthread_stop(test_thread);
  999. }
  1000. /*
  1001. * For every trace event defined, we will test each trace point separately,
  1002. * and then by groups, and finally all trace points.
  1003. */
  1004. static __init void event_trace_self_tests(void)
  1005. {
  1006. struct ftrace_event_call *call;
  1007. struct event_subsystem *system;
  1008. char *sysname;
  1009. int ret;
  1010. pr_info("Running tests on trace events:\n");
  1011. list_for_each_entry(call, &ftrace_events, list) {
  1012. /* Only test those that have a regfunc */
  1013. if (!call->regfunc)
  1014. continue;
  1015. pr_info("Testing event %s: ", call->name);
  1016. /*
  1017. * If an event is already enabled, someone is using
  1018. * it and the self test should not be on.
  1019. */
  1020. if (call->enabled) {
  1021. pr_warning("Enabled event during self test!\n");
  1022. WARN_ON_ONCE(1);
  1023. continue;
  1024. }
  1025. call->enabled = 1;
  1026. call->regfunc();
  1027. event_test_stuff();
  1028. call->unregfunc();
  1029. call->enabled = 0;
  1030. pr_cont("OK\n");
  1031. }
  1032. /* Now test at the sub system level */
  1033. pr_info("Running tests on trace event systems:\n");
  1034. list_for_each_entry(system, &event_subsystems, list) {
  1035. /* the ftrace system is special, skip it */
  1036. if (strcmp(system->name, "ftrace") == 0)
  1037. continue;
  1038. pr_info("Testing event system %s: ", system->name);
  1039. /* ftrace_set_clr_event can modify the name passed in. */
  1040. sysname = kstrdup(system->name, GFP_KERNEL);
  1041. if (WARN_ON(!sysname)) {
  1042. pr_warning("Can't allocate memory, giving up!\n");
  1043. return;
  1044. }
  1045. ret = ftrace_set_clr_event(sysname, 1);
  1046. kfree(sysname);
  1047. if (WARN_ON_ONCE(ret)) {
  1048. pr_warning("error enabling system %s\n",
  1049. system->name);
  1050. continue;
  1051. }
  1052. event_test_stuff();
  1053. sysname = kstrdup(system->name, GFP_KERNEL);
  1054. if (WARN_ON(!sysname)) {
  1055. pr_warning("Can't allocate memory, giving up!\n");
  1056. return;
  1057. }
  1058. ret = ftrace_set_clr_event(sysname, 0);
  1059. kfree(sysname);
  1060. if (WARN_ON_ONCE(ret))
  1061. pr_warning("error disabling system %s\n",
  1062. system->name);
  1063. pr_cont("OK\n");
  1064. }
  1065. /* Test with all events enabled */
  1066. pr_info("Running tests on all trace events:\n");
  1067. pr_info("Testing all events: ");
  1068. sysname = kmalloc(4, GFP_KERNEL);
  1069. if (WARN_ON(!sysname)) {
  1070. pr_warning("Can't allocate memory, giving up!\n");
  1071. return;
  1072. }
  1073. memcpy(sysname, "*:*", 4);
  1074. ret = ftrace_set_clr_event(sysname, 1);
  1075. if (WARN_ON_ONCE(ret)) {
  1076. kfree(sysname);
  1077. pr_warning("error enabling all events\n");
  1078. return;
  1079. }
  1080. event_test_stuff();
  1081. /* reset sysname */
  1082. memcpy(sysname, "*:*", 4);
  1083. ret = ftrace_set_clr_event(sysname, 0);
  1084. kfree(sysname);
  1085. if (WARN_ON_ONCE(ret)) {
  1086. pr_warning("error disabling all events\n");
  1087. return;
  1088. }
  1089. pr_cont("OK\n");
  1090. }
  1091. #ifdef CONFIG_FUNCTION_TRACER
  1092. static DEFINE_PER_CPU(atomic_t, test_event_disable);
  1093. static void
  1094. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1095. {
  1096. struct ring_buffer_event *event;
  1097. struct ftrace_entry *entry;
  1098. unsigned long flags;
  1099. long disabled;
  1100. int resched;
  1101. int cpu;
  1102. int pc;
  1103. pc = preempt_count();
  1104. resched = ftrace_preempt_disable();
  1105. cpu = raw_smp_processor_id();
  1106. disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
  1107. if (disabled != 1)
  1108. goto out;
  1109. local_save_flags(flags);
  1110. event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
  1111. flags, pc);
  1112. if (!event)
  1113. goto out;
  1114. entry = ring_buffer_event_data(event);
  1115. entry->ip = ip;
  1116. entry->parent_ip = parent_ip;
  1117. trace_nowake_buffer_unlock_commit(event, flags, pc);
  1118. out:
  1119. atomic_dec(&per_cpu(test_event_disable, cpu));
  1120. ftrace_preempt_enable(resched);
  1121. }
  1122. static struct ftrace_ops trace_ops __initdata =
  1123. {
  1124. .func = function_test_events_call,
  1125. };
  1126. static __init void event_trace_self_test_with_function(void)
  1127. {
  1128. register_ftrace_function(&trace_ops);
  1129. pr_info("Running tests again, along with the function tracer\n");
  1130. event_trace_self_tests();
  1131. unregister_ftrace_function(&trace_ops);
  1132. }
  1133. #else
  1134. static __init void event_trace_self_test_with_function(void)
  1135. {
  1136. }
  1137. #endif
  1138. static __init int event_trace_self_tests_init(void)
  1139. {
  1140. event_trace_self_tests();
  1141. event_trace_self_test_with_function();
  1142. return 0;
  1143. }
  1144. late_initcall(event_trace_self_tests_init);
  1145. #endif