trace_events.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include "trace_output.h"
  19. #define TRACE_SYSTEM "TRACE_SYSTEM"
  20. DEFINE_MUTEX(event_mutex);
  21. LIST_HEAD(ftrace_events);
  22. int trace_define_field(struct ftrace_event_call *call, char *type,
  23. char *name, int offset, int size, int is_signed)
  24. {
  25. struct ftrace_event_field *field;
  26. field = kzalloc(sizeof(*field), GFP_KERNEL);
  27. if (!field)
  28. goto err;
  29. field->name = kstrdup(name, GFP_KERNEL);
  30. if (!field->name)
  31. goto err;
  32. field->type = kstrdup(type, GFP_KERNEL);
  33. if (!field->type)
  34. goto err;
  35. field->offset = offset;
  36. field->size = size;
  37. field->is_signed = is_signed;
  38. list_add(&field->link, &call->fields);
  39. return 0;
  40. err:
  41. if (field) {
  42. kfree(field->name);
  43. kfree(field->type);
  44. }
  45. kfree(field);
  46. return -ENOMEM;
  47. }
  48. EXPORT_SYMBOL_GPL(trace_define_field);
  49. #ifdef CONFIG_MODULES
  50. static void trace_destroy_fields(struct ftrace_event_call *call)
  51. {
  52. struct ftrace_event_field *field, *next;
  53. list_for_each_entry_safe(field, next, &call->fields, link) {
  54. list_del(&field->link);
  55. kfree(field->type);
  56. kfree(field->name);
  57. kfree(field);
  58. }
  59. }
  60. #endif /* CONFIG_MODULES */
  61. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  62. int enable)
  63. {
  64. switch (enable) {
  65. case 0:
  66. if (call->enabled) {
  67. call->enabled = 0;
  68. tracing_stop_cmdline_record();
  69. call->unregfunc();
  70. }
  71. break;
  72. case 1:
  73. if (!call->enabled) {
  74. call->enabled = 1;
  75. tracing_start_cmdline_record();
  76. call->regfunc();
  77. }
  78. break;
  79. }
  80. }
  81. static void ftrace_clear_events(void)
  82. {
  83. struct ftrace_event_call *call;
  84. mutex_lock(&event_mutex);
  85. list_for_each_entry(call, &ftrace_events, list) {
  86. ftrace_event_enable_disable(call, 0);
  87. }
  88. mutex_unlock(&event_mutex);
  89. }
  90. /*
  91. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  92. */
  93. static int __ftrace_set_clr_event(const char *match, const char *sub,
  94. const char *event, int set)
  95. {
  96. struct ftrace_event_call *call;
  97. int ret = -EINVAL;
  98. mutex_lock(&event_mutex);
  99. list_for_each_entry(call, &ftrace_events, list) {
  100. if (!call->name || !call->regfunc)
  101. continue;
  102. if (match &&
  103. strcmp(match, call->name) != 0 &&
  104. strcmp(match, call->system) != 0)
  105. continue;
  106. if (sub && strcmp(sub, call->system) != 0)
  107. continue;
  108. if (event && strcmp(event, call->name) != 0)
  109. continue;
  110. ftrace_event_enable_disable(call, set);
  111. ret = 0;
  112. }
  113. mutex_unlock(&event_mutex);
  114. return ret;
  115. }
  116. static int ftrace_set_clr_event(char *buf, int set)
  117. {
  118. char *event = NULL, *sub = NULL, *match;
  119. /*
  120. * The buf format can be <subsystem>:<event-name>
  121. * *:<event-name> means any event by that name.
  122. * :<event-name> is the same.
  123. *
  124. * <subsystem>:* means all events in that subsystem
  125. * <subsystem>: means the same.
  126. *
  127. * <name> (no ':') means all events in a subsystem with
  128. * the name <name> or any event that matches <name>
  129. */
  130. match = strsep(&buf, ":");
  131. if (buf) {
  132. sub = match;
  133. event = buf;
  134. match = NULL;
  135. if (!strlen(sub) || strcmp(sub, "*") == 0)
  136. sub = NULL;
  137. if (!strlen(event) || strcmp(event, "*") == 0)
  138. event = NULL;
  139. }
  140. return __ftrace_set_clr_event(match, sub, event, set);
  141. }
  142. /**
  143. * trace_set_clr_event - enable or disable an event
  144. * @system: system name to match (NULL for any system)
  145. * @event: event name to match (NULL for all events, within system)
  146. * @set: 1 to enable, 0 to disable
  147. *
  148. * This is a way for other parts of the kernel to enable or disable
  149. * event recording.
  150. *
  151. * Returns 0 on success, -EINVAL if the parameters do not match any
  152. * registered events.
  153. */
  154. int trace_set_clr_event(const char *system, const char *event, int set)
  155. {
  156. return __ftrace_set_clr_event(NULL, system, event, set);
  157. }
  158. /* 128 should be much more than enough */
  159. #define EVENT_BUF_SIZE 127
  160. static ssize_t
  161. ftrace_event_write(struct file *file, const char __user *ubuf,
  162. size_t cnt, loff_t *ppos)
  163. {
  164. size_t read = 0;
  165. int i, set = 1;
  166. ssize_t ret;
  167. char *buf;
  168. char ch;
  169. if (!cnt || cnt < 0)
  170. return 0;
  171. ret = tracing_update_buffers();
  172. if (ret < 0)
  173. return ret;
  174. ret = get_user(ch, ubuf++);
  175. if (ret)
  176. return ret;
  177. read++;
  178. cnt--;
  179. /* skip white space */
  180. while (cnt && isspace(ch)) {
  181. ret = get_user(ch, ubuf++);
  182. if (ret)
  183. return ret;
  184. read++;
  185. cnt--;
  186. }
  187. /* Only white space found? */
  188. if (isspace(ch)) {
  189. file->f_pos += read;
  190. ret = read;
  191. return ret;
  192. }
  193. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  194. if (!buf)
  195. return -ENOMEM;
  196. if (cnt > EVENT_BUF_SIZE)
  197. cnt = EVENT_BUF_SIZE;
  198. i = 0;
  199. while (cnt && !isspace(ch)) {
  200. if (!i && ch == '!')
  201. set = 0;
  202. else
  203. buf[i++] = ch;
  204. ret = get_user(ch, ubuf++);
  205. if (ret)
  206. goto out_free;
  207. read++;
  208. cnt--;
  209. }
  210. buf[i] = 0;
  211. file->f_pos += read;
  212. ret = ftrace_set_clr_event(buf, set);
  213. if (ret)
  214. goto out_free;
  215. ret = read;
  216. out_free:
  217. kfree(buf);
  218. return ret;
  219. }
  220. static void *
  221. t_next(struct seq_file *m, void *v, loff_t *pos)
  222. {
  223. struct list_head *list = m->private;
  224. struct ftrace_event_call *call;
  225. (*pos)++;
  226. for (;;) {
  227. if (list == &ftrace_events)
  228. return NULL;
  229. call = list_entry(list, struct ftrace_event_call, list);
  230. /*
  231. * The ftrace subsystem is for showing formats only.
  232. * They can not be enabled or disabled via the event files.
  233. */
  234. if (call->regfunc)
  235. break;
  236. list = list->next;
  237. }
  238. m->private = list->next;
  239. return call;
  240. }
  241. static void *t_start(struct seq_file *m, loff_t *pos)
  242. {
  243. mutex_lock(&event_mutex);
  244. if (*pos == 0)
  245. m->private = ftrace_events.next;
  246. return t_next(m, NULL, pos);
  247. }
  248. static void *
  249. s_next(struct seq_file *m, void *v, loff_t *pos)
  250. {
  251. struct list_head *list = m->private;
  252. struct ftrace_event_call *call;
  253. (*pos)++;
  254. retry:
  255. if (list == &ftrace_events)
  256. return NULL;
  257. call = list_entry(list, struct ftrace_event_call, list);
  258. if (!call->enabled) {
  259. list = list->next;
  260. goto retry;
  261. }
  262. m->private = list->next;
  263. return call;
  264. }
  265. static void *s_start(struct seq_file *m, loff_t *pos)
  266. {
  267. mutex_lock(&event_mutex);
  268. if (*pos == 0)
  269. m->private = ftrace_events.next;
  270. return s_next(m, NULL, pos);
  271. }
  272. static int t_show(struct seq_file *m, void *v)
  273. {
  274. struct ftrace_event_call *call = v;
  275. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  276. seq_printf(m, "%s:", call->system);
  277. seq_printf(m, "%s\n", call->name);
  278. return 0;
  279. }
  280. static void t_stop(struct seq_file *m, void *p)
  281. {
  282. mutex_unlock(&event_mutex);
  283. }
  284. static int
  285. ftrace_event_seq_open(struct inode *inode, struct file *file)
  286. {
  287. const struct seq_operations *seq_ops;
  288. if ((file->f_mode & FMODE_WRITE) &&
  289. !(file->f_flags & O_APPEND))
  290. ftrace_clear_events();
  291. seq_ops = inode->i_private;
  292. return seq_open(file, seq_ops);
  293. }
  294. static ssize_t
  295. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  296. loff_t *ppos)
  297. {
  298. struct ftrace_event_call *call = filp->private_data;
  299. char *buf;
  300. if (call->enabled)
  301. buf = "1\n";
  302. else
  303. buf = "0\n";
  304. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  305. }
  306. static ssize_t
  307. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  308. loff_t *ppos)
  309. {
  310. struct ftrace_event_call *call = filp->private_data;
  311. char buf[64];
  312. unsigned long val;
  313. int ret;
  314. if (cnt >= sizeof(buf))
  315. return -EINVAL;
  316. if (copy_from_user(&buf, ubuf, cnt))
  317. return -EFAULT;
  318. buf[cnt] = 0;
  319. ret = strict_strtoul(buf, 10, &val);
  320. if (ret < 0)
  321. return ret;
  322. ret = tracing_update_buffers();
  323. if (ret < 0)
  324. return ret;
  325. switch (val) {
  326. case 0:
  327. case 1:
  328. mutex_lock(&event_mutex);
  329. ftrace_event_enable_disable(call, val);
  330. mutex_unlock(&event_mutex);
  331. break;
  332. default:
  333. return -EINVAL;
  334. }
  335. *ppos += cnt;
  336. return cnt;
  337. }
  338. static ssize_t
  339. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  340. loff_t *ppos)
  341. {
  342. const char set_to_char[4] = { '?', '0', '1', 'X' };
  343. const char *system = filp->private_data;
  344. struct ftrace_event_call *call;
  345. char buf[2];
  346. int set = 0;
  347. int ret;
  348. mutex_lock(&event_mutex);
  349. list_for_each_entry(call, &ftrace_events, list) {
  350. if (!call->name || !call->regfunc)
  351. continue;
  352. if (system && strcmp(call->system, system) != 0)
  353. continue;
  354. /*
  355. * We need to find out if all the events are set
  356. * or if all events or cleared, or if we have
  357. * a mixture.
  358. */
  359. set |= (1 << !!call->enabled);
  360. /*
  361. * If we have a mixture, no need to look further.
  362. */
  363. if (set == 3)
  364. break;
  365. }
  366. mutex_unlock(&event_mutex);
  367. buf[0] = set_to_char[set];
  368. buf[1] = '\n';
  369. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  370. return ret;
  371. }
  372. static ssize_t
  373. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  374. loff_t *ppos)
  375. {
  376. const char *system = filp->private_data;
  377. unsigned long val;
  378. char buf[64];
  379. ssize_t ret;
  380. if (cnt >= sizeof(buf))
  381. return -EINVAL;
  382. if (copy_from_user(&buf, ubuf, cnt))
  383. return -EFAULT;
  384. buf[cnt] = 0;
  385. ret = strict_strtoul(buf, 10, &val);
  386. if (ret < 0)
  387. return ret;
  388. ret = tracing_update_buffers();
  389. if (ret < 0)
  390. return ret;
  391. if (val != 0 && val != 1)
  392. return -EINVAL;
  393. ret = __ftrace_set_clr_event(NULL, system, NULL, val);
  394. if (ret)
  395. goto out;
  396. ret = cnt;
  397. out:
  398. *ppos += cnt;
  399. return ret;
  400. }
  401. extern char *__bad_type_size(void);
  402. #undef FIELD
  403. #define FIELD(type, name) \
  404. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  405. #type, "common_" #name, offsetof(typeof(field), name), \
  406. sizeof(field.name)
  407. static int trace_write_header(struct trace_seq *s)
  408. {
  409. struct trace_entry field;
  410. /* struct trace_entry */
  411. return trace_seq_printf(s,
  412. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  413. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  414. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  415. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  416. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  417. "\n",
  418. FIELD(unsigned short, type),
  419. FIELD(unsigned char, flags),
  420. FIELD(unsigned char, preempt_count),
  421. FIELD(int, pid),
  422. FIELD(int, tgid));
  423. }
  424. static ssize_t
  425. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  426. loff_t *ppos)
  427. {
  428. struct ftrace_event_call *call = filp->private_data;
  429. struct trace_seq *s;
  430. char *buf;
  431. int r;
  432. if (*ppos)
  433. return 0;
  434. s = kmalloc(sizeof(*s), GFP_KERNEL);
  435. if (!s)
  436. return -ENOMEM;
  437. trace_seq_init(s);
  438. /* If any of the first writes fail, so will the show_format. */
  439. trace_seq_printf(s, "name: %s\n", call->name);
  440. trace_seq_printf(s, "ID: %d\n", call->id);
  441. trace_seq_printf(s, "format:\n");
  442. trace_write_header(s);
  443. r = call->show_format(s);
  444. if (!r) {
  445. /*
  446. * ug! The format output is bigger than a PAGE!!
  447. */
  448. buf = "FORMAT TOO BIG\n";
  449. r = simple_read_from_buffer(ubuf, cnt, ppos,
  450. buf, strlen(buf));
  451. goto out;
  452. }
  453. r = simple_read_from_buffer(ubuf, cnt, ppos,
  454. s->buffer, s->len);
  455. out:
  456. kfree(s);
  457. return r;
  458. }
  459. static ssize_t
  460. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  461. {
  462. struct ftrace_event_call *call = filp->private_data;
  463. struct trace_seq *s;
  464. int r;
  465. if (*ppos)
  466. return 0;
  467. s = kmalloc(sizeof(*s), GFP_KERNEL);
  468. if (!s)
  469. return -ENOMEM;
  470. trace_seq_init(s);
  471. trace_seq_printf(s, "%d\n", call->id);
  472. r = simple_read_from_buffer(ubuf, cnt, ppos,
  473. s->buffer, s->len);
  474. kfree(s);
  475. return r;
  476. }
  477. static ssize_t
  478. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  479. loff_t *ppos)
  480. {
  481. struct ftrace_event_call *call = filp->private_data;
  482. struct trace_seq *s;
  483. int r;
  484. if (*ppos)
  485. return 0;
  486. s = kmalloc(sizeof(*s), GFP_KERNEL);
  487. if (!s)
  488. return -ENOMEM;
  489. trace_seq_init(s);
  490. print_event_filter(call, s);
  491. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  492. kfree(s);
  493. return r;
  494. }
  495. static ssize_t
  496. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  497. loff_t *ppos)
  498. {
  499. struct ftrace_event_call *call = filp->private_data;
  500. char *buf;
  501. int err;
  502. if (cnt >= PAGE_SIZE)
  503. return -EINVAL;
  504. buf = (char *)__get_free_page(GFP_TEMPORARY);
  505. if (!buf)
  506. return -ENOMEM;
  507. if (copy_from_user(buf, ubuf, cnt)) {
  508. free_page((unsigned long) buf);
  509. return -EFAULT;
  510. }
  511. buf[cnt] = '\0';
  512. err = apply_event_filter(call, buf);
  513. free_page((unsigned long) buf);
  514. if (err < 0)
  515. return err;
  516. *ppos += cnt;
  517. return cnt;
  518. }
  519. static ssize_t
  520. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  521. loff_t *ppos)
  522. {
  523. struct event_subsystem *system = filp->private_data;
  524. struct trace_seq *s;
  525. int r;
  526. if (*ppos)
  527. return 0;
  528. s = kmalloc(sizeof(*s), GFP_KERNEL);
  529. if (!s)
  530. return -ENOMEM;
  531. trace_seq_init(s);
  532. print_subsystem_event_filter(system, s);
  533. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  534. kfree(s);
  535. return r;
  536. }
  537. static ssize_t
  538. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  539. loff_t *ppos)
  540. {
  541. struct event_subsystem *system = filp->private_data;
  542. char *buf;
  543. int err;
  544. if (cnt >= PAGE_SIZE)
  545. return -EINVAL;
  546. buf = (char *)__get_free_page(GFP_TEMPORARY);
  547. if (!buf)
  548. return -ENOMEM;
  549. if (copy_from_user(buf, ubuf, cnt)) {
  550. free_page((unsigned long) buf);
  551. return -EFAULT;
  552. }
  553. buf[cnt] = '\0';
  554. err = apply_subsystem_event_filter(system, buf);
  555. free_page((unsigned long) buf);
  556. if (err < 0)
  557. return err;
  558. *ppos += cnt;
  559. return cnt;
  560. }
  561. static ssize_t
  562. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  563. {
  564. int (*func)(struct trace_seq *s) = filp->private_data;
  565. struct trace_seq *s;
  566. int r;
  567. if (*ppos)
  568. return 0;
  569. s = kmalloc(sizeof(*s), GFP_KERNEL);
  570. if (!s)
  571. return -ENOMEM;
  572. trace_seq_init(s);
  573. func(s);
  574. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  575. kfree(s);
  576. return r;
  577. }
  578. static const struct seq_operations show_event_seq_ops = {
  579. .start = t_start,
  580. .next = t_next,
  581. .show = t_show,
  582. .stop = t_stop,
  583. };
  584. static const struct seq_operations show_set_event_seq_ops = {
  585. .start = s_start,
  586. .next = s_next,
  587. .show = t_show,
  588. .stop = t_stop,
  589. };
  590. static const struct file_operations ftrace_avail_fops = {
  591. .open = ftrace_event_seq_open,
  592. .read = seq_read,
  593. .llseek = seq_lseek,
  594. .release = seq_release,
  595. };
  596. static const struct file_operations ftrace_set_event_fops = {
  597. .open = ftrace_event_seq_open,
  598. .read = seq_read,
  599. .write = ftrace_event_write,
  600. .llseek = seq_lseek,
  601. .release = seq_release,
  602. };
  603. static const struct file_operations ftrace_enable_fops = {
  604. .open = tracing_open_generic,
  605. .read = event_enable_read,
  606. .write = event_enable_write,
  607. };
  608. static const struct file_operations ftrace_event_format_fops = {
  609. .open = tracing_open_generic,
  610. .read = event_format_read,
  611. };
  612. static const struct file_operations ftrace_event_id_fops = {
  613. .open = tracing_open_generic,
  614. .read = event_id_read,
  615. };
  616. static const struct file_operations ftrace_event_filter_fops = {
  617. .open = tracing_open_generic,
  618. .read = event_filter_read,
  619. .write = event_filter_write,
  620. };
  621. static const struct file_operations ftrace_subsystem_filter_fops = {
  622. .open = tracing_open_generic,
  623. .read = subsystem_filter_read,
  624. .write = subsystem_filter_write,
  625. };
  626. static const struct file_operations ftrace_system_enable_fops = {
  627. .open = tracing_open_generic,
  628. .read = system_enable_read,
  629. .write = system_enable_write,
  630. };
  631. static const struct file_operations ftrace_show_header_fops = {
  632. .open = tracing_open_generic,
  633. .read = show_header,
  634. };
  635. static struct dentry *event_trace_events_dir(void)
  636. {
  637. static struct dentry *d_tracer;
  638. static struct dentry *d_events;
  639. if (d_events)
  640. return d_events;
  641. d_tracer = tracing_init_dentry();
  642. if (!d_tracer)
  643. return NULL;
  644. d_events = debugfs_create_dir("events", d_tracer);
  645. if (!d_events)
  646. pr_warning("Could not create debugfs "
  647. "'events' directory\n");
  648. return d_events;
  649. }
  650. static LIST_HEAD(event_subsystems);
  651. static struct dentry *
  652. event_subsystem_dir(const char *name, struct dentry *d_events)
  653. {
  654. struct event_subsystem *system;
  655. struct dentry *entry;
  656. /* First see if we did not already create this dir */
  657. list_for_each_entry(system, &event_subsystems, list) {
  658. if (strcmp(system->name, name) == 0)
  659. return system->entry;
  660. }
  661. /* need to create new entry */
  662. system = kmalloc(sizeof(*system), GFP_KERNEL);
  663. if (!system) {
  664. pr_warning("No memory to create event subsystem %s\n",
  665. name);
  666. return d_events;
  667. }
  668. system->entry = debugfs_create_dir(name, d_events);
  669. if (!system->entry) {
  670. pr_warning("Could not create event subsystem %s\n",
  671. name);
  672. kfree(system);
  673. return d_events;
  674. }
  675. system->name = kstrdup(name, GFP_KERNEL);
  676. if (!system->name) {
  677. debugfs_remove(system->entry);
  678. kfree(system);
  679. return d_events;
  680. }
  681. list_add(&system->list, &event_subsystems);
  682. system->filter = NULL;
  683. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  684. if (!system->filter) {
  685. pr_warning("Could not allocate filter for subsystem "
  686. "'%s'\n", name);
  687. return system->entry;
  688. }
  689. entry = debugfs_create_file("filter", 0644, system->entry, system,
  690. &ftrace_subsystem_filter_fops);
  691. if (!entry) {
  692. kfree(system->filter);
  693. system->filter = NULL;
  694. pr_warning("Could not create debugfs "
  695. "'%s/filter' entry\n", name);
  696. }
  697. entry = trace_create_file("enable", 0644, system->entry,
  698. (void *)system->name,
  699. &ftrace_system_enable_fops);
  700. return system->entry;
  701. }
  702. static int
  703. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  704. const struct file_operations *id,
  705. const struct file_operations *enable,
  706. const struct file_operations *filter,
  707. const struct file_operations *format)
  708. {
  709. struct dentry *entry;
  710. int ret;
  711. /*
  712. * If the trace point header did not define TRACE_SYSTEM
  713. * then the system would be called "TRACE_SYSTEM".
  714. */
  715. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  716. d_events = event_subsystem_dir(call->system, d_events);
  717. if (call->raw_init) {
  718. ret = call->raw_init();
  719. if (ret < 0) {
  720. pr_warning("Could not initialize trace point"
  721. " events/%s\n", call->name);
  722. return ret;
  723. }
  724. }
  725. call->dir = debugfs_create_dir(call->name, d_events);
  726. if (!call->dir) {
  727. pr_warning("Could not create debugfs "
  728. "'%s' directory\n", call->name);
  729. return -1;
  730. }
  731. if (call->regfunc)
  732. entry = trace_create_file("enable", 0644, call->dir, call,
  733. enable);
  734. if (call->id)
  735. entry = trace_create_file("id", 0444, call->dir, call,
  736. id);
  737. if (call->define_fields) {
  738. ret = call->define_fields();
  739. if (ret < 0) {
  740. pr_warning("Could not initialize trace point"
  741. " events/%s\n", call->name);
  742. return ret;
  743. }
  744. entry = trace_create_file("filter", 0644, call->dir, call,
  745. filter);
  746. }
  747. /* A trace may not want to export its format */
  748. if (!call->show_format)
  749. return 0;
  750. entry = trace_create_file("format", 0444, call->dir, call,
  751. format);
  752. return 0;
  753. }
  754. #define for_each_event(event, start, end) \
  755. for (event = start; \
  756. (unsigned long)event < (unsigned long)end; \
  757. event++)
  758. #ifdef CONFIG_MODULES
  759. static LIST_HEAD(ftrace_module_file_list);
  760. /*
  761. * Modules must own their file_operations to keep up with
  762. * reference counting.
  763. */
  764. struct ftrace_module_file_ops {
  765. struct list_head list;
  766. struct module *mod;
  767. struct file_operations id;
  768. struct file_operations enable;
  769. struct file_operations format;
  770. struct file_operations filter;
  771. };
  772. static struct ftrace_module_file_ops *
  773. trace_create_file_ops(struct module *mod)
  774. {
  775. struct ftrace_module_file_ops *file_ops;
  776. /*
  777. * This is a bit of a PITA. To allow for correct reference
  778. * counting, modules must "own" their file_operations.
  779. * To do this, we allocate the file operations that will be
  780. * used in the event directory.
  781. */
  782. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  783. if (!file_ops)
  784. return NULL;
  785. file_ops->mod = mod;
  786. file_ops->id = ftrace_event_id_fops;
  787. file_ops->id.owner = mod;
  788. file_ops->enable = ftrace_enable_fops;
  789. file_ops->enable.owner = mod;
  790. file_ops->filter = ftrace_event_filter_fops;
  791. file_ops->filter.owner = mod;
  792. file_ops->format = ftrace_event_format_fops;
  793. file_ops->format.owner = mod;
  794. list_add(&file_ops->list, &ftrace_module_file_list);
  795. return file_ops;
  796. }
  797. static void trace_module_add_events(struct module *mod)
  798. {
  799. struct ftrace_module_file_ops *file_ops = NULL;
  800. struct ftrace_event_call *call, *start, *end;
  801. struct dentry *d_events;
  802. start = mod->trace_events;
  803. end = mod->trace_events + mod->num_trace_events;
  804. if (start == end)
  805. return;
  806. d_events = event_trace_events_dir();
  807. if (!d_events)
  808. return;
  809. for_each_event(call, start, end) {
  810. /* The linker may leave blanks */
  811. if (!call->name)
  812. continue;
  813. /*
  814. * This module has events, create file ops for this module
  815. * if not already done.
  816. */
  817. if (!file_ops) {
  818. file_ops = trace_create_file_ops(mod);
  819. if (!file_ops)
  820. return;
  821. }
  822. call->mod = mod;
  823. list_add(&call->list, &ftrace_events);
  824. event_create_dir(call, d_events,
  825. &file_ops->id, &file_ops->enable,
  826. &file_ops->filter, &file_ops->format);
  827. }
  828. }
  829. static void trace_module_remove_events(struct module *mod)
  830. {
  831. struct ftrace_module_file_ops *file_ops;
  832. struct ftrace_event_call *call, *p;
  833. bool found = false;
  834. down_write(&trace_event_mutex);
  835. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  836. if (call->mod == mod) {
  837. found = true;
  838. ftrace_event_enable_disable(call, 0);
  839. if (call->event)
  840. __unregister_ftrace_event(call->event);
  841. debugfs_remove_recursive(call->dir);
  842. list_del(&call->list);
  843. trace_destroy_fields(call);
  844. destroy_preds(call);
  845. }
  846. }
  847. /* Now free the file_operations */
  848. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  849. if (file_ops->mod == mod)
  850. break;
  851. }
  852. if (&file_ops->list != &ftrace_module_file_list) {
  853. list_del(&file_ops->list);
  854. kfree(file_ops);
  855. }
  856. /*
  857. * It is safest to reset the ring buffer if the module being unloaded
  858. * registered any events.
  859. */
  860. if (found)
  861. tracing_reset_current_online_cpus();
  862. up_write(&trace_event_mutex);
  863. }
  864. static int trace_module_notify(struct notifier_block *self,
  865. unsigned long val, void *data)
  866. {
  867. struct module *mod = data;
  868. mutex_lock(&event_mutex);
  869. switch (val) {
  870. case MODULE_STATE_COMING:
  871. trace_module_add_events(mod);
  872. break;
  873. case MODULE_STATE_GOING:
  874. trace_module_remove_events(mod);
  875. break;
  876. }
  877. mutex_unlock(&event_mutex);
  878. return 0;
  879. }
  880. #else
  881. static int trace_module_notify(struct notifier_block *self,
  882. unsigned long val, void *data)
  883. {
  884. return 0;
  885. }
  886. #endif /* CONFIG_MODULES */
  887. struct notifier_block trace_module_nb = {
  888. .notifier_call = trace_module_notify,
  889. .priority = 0,
  890. };
  891. extern struct ftrace_event_call __start_ftrace_events[];
  892. extern struct ftrace_event_call __stop_ftrace_events[];
  893. static __init int event_trace_init(void)
  894. {
  895. struct ftrace_event_call *call;
  896. struct dentry *d_tracer;
  897. struct dentry *entry;
  898. struct dentry *d_events;
  899. int ret;
  900. d_tracer = tracing_init_dentry();
  901. if (!d_tracer)
  902. return 0;
  903. entry = debugfs_create_file("available_events", 0444, d_tracer,
  904. (void *)&show_event_seq_ops,
  905. &ftrace_avail_fops);
  906. if (!entry)
  907. pr_warning("Could not create debugfs "
  908. "'available_events' entry\n");
  909. entry = debugfs_create_file("set_event", 0644, d_tracer,
  910. (void *)&show_set_event_seq_ops,
  911. &ftrace_set_event_fops);
  912. if (!entry)
  913. pr_warning("Could not create debugfs "
  914. "'set_event' entry\n");
  915. d_events = event_trace_events_dir();
  916. if (!d_events)
  917. return 0;
  918. /* ring buffer internal formats */
  919. trace_create_file("header_page", 0444, d_events,
  920. ring_buffer_print_page_header,
  921. &ftrace_show_header_fops);
  922. trace_create_file("header_event", 0444, d_events,
  923. ring_buffer_print_entry_header,
  924. &ftrace_show_header_fops);
  925. trace_create_file("enable", 0644, d_events,
  926. NULL, &ftrace_system_enable_fops);
  927. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  928. /* The linker may leave blanks */
  929. if (!call->name)
  930. continue;
  931. list_add(&call->list, &ftrace_events);
  932. event_create_dir(call, d_events, &ftrace_event_id_fops,
  933. &ftrace_enable_fops, &ftrace_event_filter_fops,
  934. &ftrace_event_format_fops);
  935. }
  936. ret = register_module_notifier(&trace_module_nb);
  937. if (ret)
  938. pr_warning("Failed to register trace events module notifier\n");
  939. return 0;
  940. }
  941. fs_initcall(event_trace_init);
  942. #ifdef CONFIG_FTRACE_STARTUP_TEST
  943. static DEFINE_SPINLOCK(test_spinlock);
  944. static DEFINE_SPINLOCK(test_spinlock_irq);
  945. static DEFINE_MUTEX(test_mutex);
  946. static __init void test_work(struct work_struct *dummy)
  947. {
  948. spin_lock(&test_spinlock);
  949. spin_lock_irq(&test_spinlock_irq);
  950. udelay(1);
  951. spin_unlock_irq(&test_spinlock_irq);
  952. spin_unlock(&test_spinlock);
  953. mutex_lock(&test_mutex);
  954. msleep(1);
  955. mutex_unlock(&test_mutex);
  956. }
  957. static __init int event_test_thread(void *unused)
  958. {
  959. void *test_malloc;
  960. test_malloc = kmalloc(1234, GFP_KERNEL);
  961. if (!test_malloc)
  962. pr_info("failed to kmalloc\n");
  963. schedule_on_each_cpu(test_work);
  964. kfree(test_malloc);
  965. set_current_state(TASK_INTERRUPTIBLE);
  966. while (!kthread_should_stop())
  967. schedule();
  968. return 0;
  969. }
  970. /*
  971. * Do various things that may trigger events.
  972. */
  973. static __init void event_test_stuff(void)
  974. {
  975. struct task_struct *test_thread;
  976. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  977. msleep(1);
  978. kthread_stop(test_thread);
  979. }
  980. /*
  981. * For every trace event defined, we will test each trace point separately,
  982. * and then by groups, and finally all trace points.
  983. */
  984. static __init void event_trace_self_tests(void)
  985. {
  986. struct ftrace_event_call *call;
  987. struct event_subsystem *system;
  988. int ret;
  989. pr_info("Running tests on trace events:\n");
  990. list_for_each_entry(call, &ftrace_events, list) {
  991. /* Only test those that have a regfunc */
  992. if (!call->regfunc)
  993. continue;
  994. pr_info("Testing event %s: ", call->name);
  995. /*
  996. * If an event is already enabled, someone is using
  997. * it and the self test should not be on.
  998. */
  999. if (call->enabled) {
  1000. pr_warning("Enabled event during self test!\n");
  1001. WARN_ON_ONCE(1);
  1002. continue;
  1003. }
  1004. ftrace_event_enable_disable(call, 1);
  1005. event_test_stuff();
  1006. ftrace_event_enable_disable(call, 0);
  1007. pr_cont("OK\n");
  1008. }
  1009. /* Now test at the sub system level */
  1010. pr_info("Running tests on trace event systems:\n");
  1011. list_for_each_entry(system, &event_subsystems, list) {
  1012. /* the ftrace system is special, skip it */
  1013. if (strcmp(system->name, "ftrace") == 0)
  1014. continue;
  1015. pr_info("Testing event system %s: ", system->name);
  1016. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
  1017. if (WARN_ON_ONCE(ret)) {
  1018. pr_warning("error enabling system %s\n",
  1019. system->name);
  1020. continue;
  1021. }
  1022. event_test_stuff();
  1023. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
  1024. if (WARN_ON_ONCE(ret))
  1025. pr_warning("error disabling system %s\n",
  1026. system->name);
  1027. pr_cont("OK\n");
  1028. }
  1029. /* Test with all events enabled */
  1030. pr_info("Running tests on all trace events:\n");
  1031. pr_info("Testing all events: ");
  1032. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
  1033. if (WARN_ON_ONCE(ret)) {
  1034. pr_warning("error enabling all events\n");
  1035. return;
  1036. }
  1037. event_test_stuff();
  1038. /* reset sysname */
  1039. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
  1040. if (WARN_ON_ONCE(ret)) {
  1041. pr_warning("error disabling all events\n");
  1042. return;
  1043. }
  1044. pr_cont("OK\n");
  1045. }
  1046. #ifdef CONFIG_FUNCTION_TRACER
  1047. static DEFINE_PER_CPU(atomic_t, test_event_disable);
  1048. static void
  1049. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1050. {
  1051. struct ring_buffer_event *event;
  1052. struct ftrace_entry *entry;
  1053. unsigned long flags;
  1054. long disabled;
  1055. int resched;
  1056. int cpu;
  1057. int pc;
  1058. pc = preempt_count();
  1059. resched = ftrace_preempt_disable();
  1060. cpu = raw_smp_processor_id();
  1061. disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
  1062. if (disabled != 1)
  1063. goto out;
  1064. local_save_flags(flags);
  1065. event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
  1066. flags, pc);
  1067. if (!event)
  1068. goto out;
  1069. entry = ring_buffer_event_data(event);
  1070. entry->ip = ip;
  1071. entry->parent_ip = parent_ip;
  1072. trace_nowake_buffer_unlock_commit(event, flags, pc);
  1073. out:
  1074. atomic_dec(&per_cpu(test_event_disable, cpu));
  1075. ftrace_preempt_enable(resched);
  1076. }
  1077. static struct ftrace_ops trace_ops __initdata =
  1078. {
  1079. .func = function_test_events_call,
  1080. };
  1081. static __init void event_trace_self_test_with_function(void)
  1082. {
  1083. register_ftrace_function(&trace_ops);
  1084. pr_info("Running tests again, along with the function tracer\n");
  1085. event_trace_self_tests();
  1086. unregister_ftrace_function(&trace_ops);
  1087. }
  1088. #else
  1089. static __init void event_trace_self_test_with_function(void)
  1090. {
  1091. }
  1092. #endif
  1093. static __init int event_trace_self_tests_init(void)
  1094. {
  1095. event_trace_self_tests();
  1096. event_trace_self_test_with_function();
  1097. return 0;
  1098. }
  1099. late_initcall(event_trace_self_tests_init);
  1100. #endif