trace_events.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include "trace_output.h"
  19. #define TRACE_SYSTEM "TRACE_SYSTEM"
  20. DEFINE_MUTEX(event_mutex);
  21. LIST_HEAD(ftrace_events);
  22. int trace_define_field(struct ftrace_event_call *call, char *type,
  23. char *name, int offset, int size, int is_signed)
  24. {
  25. struct ftrace_event_field *field;
  26. field = kzalloc(sizeof(*field), GFP_KERNEL);
  27. if (!field)
  28. goto err;
  29. field->name = kstrdup(name, GFP_KERNEL);
  30. if (!field->name)
  31. goto err;
  32. field->type = kstrdup(type, GFP_KERNEL);
  33. if (!field->type)
  34. goto err;
  35. field->offset = offset;
  36. field->size = size;
  37. field->is_signed = is_signed;
  38. list_add(&field->link, &call->fields);
  39. return 0;
  40. err:
  41. if (field) {
  42. kfree(field->name);
  43. kfree(field->type);
  44. }
  45. kfree(field);
  46. return -ENOMEM;
  47. }
  48. EXPORT_SYMBOL_GPL(trace_define_field);
  49. #ifdef CONFIG_MODULES
  50. static void trace_destroy_fields(struct ftrace_event_call *call)
  51. {
  52. struct ftrace_event_field *field, *next;
  53. list_for_each_entry_safe(field, next, &call->fields, link) {
  54. list_del(&field->link);
  55. kfree(field->type);
  56. kfree(field->name);
  57. kfree(field);
  58. }
  59. }
  60. #endif /* CONFIG_MODULES */
  61. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  62. int enable)
  63. {
  64. switch (enable) {
  65. case 0:
  66. if (call->enabled) {
  67. call->enabled = 0;
  68. tracing_stop_cmdline_record();
  69. call->unregfunc();
  70. }
  71. break;
  72. case 1:
  73. if (!call->enabled) {
  74. call->enabled = 1;
  75. tracing_start_cmdline_record();
  76. call->regfunc();
  77. }
  78. break;
  79. }
  80. }
  81. static void ftrace_clear_events(void)
  82. {
  83. struct ftrace_event_call *call;
  84. mutex_lock(&event_mutex);
  85. list_for_each_entry(call, &ftrace_events, list) {
  86. ftrace_event_enable_disable(call, 0);
  87. }
  88. mutex_unlock(&event_mutex);
  89. }
  90. /*
  91. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  92. */
  93. static int __ftrace_set_clr_event(const char *match, const char *sub,
  94. const char *event, int set)
  95. {
  96. struct ftrace_event_call *call;
  97. int ret = -EINVAL;
  98. mutex_lock(&event_mutex);
  99. list_for_each_entry(call, &ftrace_events, list) {
  100. if (!call->name || !call->regfunc)
  101. continue;
  102. if (match &&
  103. strcmp(match, call->name) != 0 &&
  104. strcmp(match, call->system) != 0)
  105. continue;
  106. if (sub && strcmp(sub, call->system) != 0)
  107. continue;
  108. if (event && strcmp(event, call->name) != 0)
  109. continue;
  110. ftrace_event_enable_disable(call, set);
  111. ret = 0;
  112. }
  113. mutex_unlock(&event_mutex);
  114. return ret;
  115. }
  116. static int ftrace_set_clr_event(char *buf, int set)
  117. {
  118. char *event = NULL, *sub = NULL, *match;
  119. /*
  120. * The buf format can be <subsystem>:<event-name>
  121. * *:<event-name> means any event by that name.
  122. * :<event-name> is the same.
  123. *
  124. * <subsystem>:* means all events in that subsystem
  125. * <subsystem>: means the same.
  126. *
  127. * <name> (no ':') means all events in a subsystem with
  128. * the name <name> or any event that matches <name>
  129. */
  130. match = strsep(&buf, ":");
  131. if (buf) {
  132. sub = match;
  133. event = buf;
  134. match = NULL;
  135. if (!strlen(sub) || strcmp(sub, "*") == 0)
  136. sub = NULL;
  137. if (!strlen(event) || strcmp(event, "*") == 0)
  138. event = NULL;
  139. }
  140. return __ftrace_set_clr_event(match, sub, event, set);
  141. }
  142. /**
  143. * trace_set_clr_event - enable or disable an event
  144. * @system: system name to match (NULL for any system)
  145. * @event: event name to match (NULL for all events, within system)
  146. * @set: 1 to enable, 0 to disable
  147. *
  148. * This is a way for other parts of the kernel to enable or disable
  149. * event recording.
  150. *
  151. * Returns 0 on success, -EINVAL if the parameters do not match any
  152. * registered events.
  153. */
  154. int trace_set_clr_event(const char *system, const char *event, int set)
  155. {
  156. return __ftrace_set_clr_event(NULL, system, event, set);
  157. }
  158. /* 128 should be much more than enough */
  159. #define EVENT_BUF_SIZE 127
  160. static ssize_t
  161. ftrace_event_write(struct file *file, const char __user *ubuf,
  162. size_t cnt, loff_t *ppos)
  163. {
  164. size_t read = 0;
  165. int i, set = 1;
  166. ssize_t ret;
  167. char *buf;
  168. char ch;
  169. if (!cnt || cnt < 0)
  170. return 0;
  171. ret = tracing_update_buffers();
  172. if (ret < 0)
  173. return ret;
  174. ret = get_user(ch, ubuf++);
  175. if (ret)
  176. return ret;
  177. read++;
  178. cnt--;
  179. /* skip white space */
  180. while (cnt && isspace(ch)) {
  181. ret = get_user(ch, ubuf++);
  182. if (ret)
  183. return ret;
  184. read++;
  185. cnt--;
  186. }
  187. /* Only white space found? */
  188. if (isspace(ch)) {
  189. file->f_pos += read;
  190. ret = read;
  191. return ret;
  192. }
  193. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  194. if (!buf)
  195. return -ENOMEM;
  196. if (cnt > EVENT_BUF_SIZE)
  197. cnt = EVENT_BUF_SIZE;
  198. i = 0;
  199. while (cnt && !isspace(ch)) {
  200. if (!i && ch == '!')
  201. set = 0;
  202. else
  203. buf[i++] = ch;
  204. ret = get_user(ch, ubuf++);
  205. if (ret)
  206. goto out_free;
  207. read++;
  208. cnt--;
  209. }
  210. buf[i] = 0;
  211. file->f_pos += read;
  212. ret = ftrace_set_clr_event(buf, set);
  213. if (ret)
  214. goto out_free;
  215. ret = read;
  216. out_free:
  217. kfree(buf);
  218. return ret;
  219. }
  220. static void *
  221. t_next(struct seq_file *m, void *v, loff_t *pos)
  222. {
  223. struct list_head *list = m->private;
  224. struct ftrace_event_call *call;
  225. (*pos)++;
  226. for (;;) {
  227. if (list == &ftrace_events)
  228. return NULL;
  229. call = list_entry(list, struct ftrace_event_call, list);
  230. /*
  231. * The ftrace subsystem is for showing formats only.
  232. * They can not be enabled or disabled via the event files.
  233. */
  234. if (call->regfunc)
  235. break;
  236. list = list->next;
  237. }
  238. m->private = list->next;
  239. return call;
  240. }
  241. static void *t_start(struct seq_file *m, loff_t *pos)
  242. {
  243. struct ftrace_event_call *call = NULL;
  244. loff_t l;
  245. mutex_lock(&event_mutex);
  246. m->private = ftrace_events.next;
  247. for (l = 0; l <= *pos; ) {
  248. call = t_next(m, NULL, &l);
  249. if (!call)
  250. break;
  251. }
  252. return call;
  253. }
  254. static void *
  255. s_next(struct seq_file *m, void *v, loff_t *pos)
  256. {
  257. struct list_head *list = m->private;
  258. struct ftrace_event_call *call;
  259. (*pos)++;
  260. retry:
  261. if (list == &ftrace_events)
  262. return NULL;
  263. call = list_entry(list, struct ftrace_event_call, list);
  264. if (!call->enabled) {
  265. list = list->next;
  266. goto retry;
  267. }
  268. m->private = list->next;
  269. return call;
  270. }
  271. static void *s_start(struct seq_file *m, loff_t *pos)
  272. {
  273. struct ftrace_event_call *call = NULL;
  274. loff_t l;
  275. mutex_lock(&event_mutex);
  276. m->private = ftrace_events.next;
  277. for (l = 0; l <= *pos; ) {
  278. call = s_next(m, NULL, &l);
  279. if (!call)
  280. break;
  281. }
  282. return call;
  283. }
  284. static int t_show(struct seq_file *m, void *v)
  285. {
  286. struct ftrace_event_call *call = v;
  287. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  288. seq_printf(m, "%s:", call->system);
  289. seq_printf(m, "%s\n", call->name);
  290. return 0;
  291. }
  292. static void t_stop(struct seq_file *m, void *p)
  293. {
  294. mutex_unlock(&event_mutex);
  295. }
  296. static int
  297. ftrace_event_seq_open(struct inode *inode, struct file *file)
  298. {
  299. const struct seq_operations *seq_ops;
  300. if ((file->f_mode & FMODE_WRITE) &&
  301. !(file->f_flags & O_APPEND))
  302. ftrace_clear_events();
  303. seq_ops = inode->i_private;
  304. return seq_open(file, seq_ops);
  305. }
  306. static ssize_t
  307. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  308. loff_t *ppos)
  309. {
  310. struct ftrace_event_call *call = filp->private_data;
  311. char *buf;
  312. if (call->enabled)
  313. buf = "1\n";
  314. else
  315. buf = "0\n";
  316. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  317. }
  318. static ssize_t
  319. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  320. loff_t *ppos)
  321. {
  322. struct ftrace_event_call *call = filp->private_data;
  323. char buf[64];
  324. unsigned long val;
  325. int ret;
  326. if (cnt >= sizeof(buf))
  327. return -EINVAL;
  328. if (copy_from_user(&buf, ubuf, cnt))
  329. return -EFAULT;
  330. buf[cnt] = 0;
  331. ret = strict_strtoul(buf, 10, &val);
  332. if (ret < 0)
  333. return ret;
  334. ret = tracing_update_buffers();
  335. if (ret < 0)
  336. return ret;
  337. switch (val) {
  338. case 0:
  339. case 1:
  340. mutex_lock(&event_mutex);
  341. ftrace_event_enable_disable(call, val);
  342. mutex_unlock(&event_mutex);
  343. break;
  344. default:
  345. return -EINVAL;
  346. }
  347. *ppos += cnt;
  348. return cnt;
  349. }
  350. static ssize_t
  351. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  352. loff_t *ppos)
  353. {
  354. const char set_to_char[4] = { '?', '0', '1', 'X' };
  355. const char *system = filp->private_data;
  356. struct ftrace_event_call *call;
  357. char buf[2];
  358. int set = 0;
  359. int ret;
  360. mutex_lock(&event_mutex);
  361. list_for_each_entry(call, &ftrace_events, list) {
  362. if (!call->name || !call->regfunc)
  363. continue;
  364. if (system && strcmp(call->system, system) != 0)
  365. continue;
  366. /*
  367. * We need to find out if all the events are set
  368. * or if all events or cleared, or if we have
  369. * a mixture.
  370. */
  371. set |= (1 << !!call->enabled);
  372. /*
  373. * If we have a mixture, no need to look further.
  374. */
  375. if (set == 3)
  376. break;
  377. }
  378. mutex_unlock(&event_mutex);
  379. buf[0] = set_to_char[set];
  380. buf[1] = '\n';
  381. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  382. return ret;
  383. }
  384. static ssize_t
  385. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  386. loff_t *ppos)
  387. {
  388. const char *system = filp->private_data;
  389. unsigned long val;
  390. char buf[64];
  391. ssize_t ret;
  392. if (cnt >= sizeof(buf))
  393. return -EINVAL;
  394. if (copy_from_user(&buf, ubuf, cnt))
  395. return -EFAULT;
  396. buf[cnt] = 0;
  397. ret = strict_strtoul(buf, 10, &val);
  398. if (ret < 0)
  399. return ret;
  400. ret = tracing_update_buffers();
  401. if (ret < 0)
  402. return ret;
  403. if (val != 0 && val != 1)
  404. return -EINVAL;
  405. ret = __ftrace_set_clr_event(NULL, system, NULL, val);
  406. if (ret)
  407. goto out;
  408. ret = cnt;
  409. out:
  410. *ppos += cnt;
  411. return ret;
  412. }
  413. extern char *__bad_type_size(void);
  414. #undef FIELD
  415. #define FIELD(type, name) \
  416. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  417. #type, "common_" #name, offsetof(typeof(field), name), \
  418. sizeof(field.name)
  419. static int trace_write_header(struct trace_seq *s)
  420. {
  421. struct trace_entry field;
  422. /* struct trace_entry */
  423. return trace_seq_printf(s,
  424. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  425. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  426. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  427. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  428. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  429. "\n",
  430. FIELD(unsigned short, type),
  431. FIELD(unsigned char, flags),
  432. FIELD(unsigned char, preempt_count),
  433. FIELD(int, pid),
  434. FIELD(int, tgid));
  435. }
  436. static ssize_t
  437. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  438. loff_t *ppos)
  439. {
  440. struct ftrace_event_call *call = filp->private_data;
  441. struct trace_seq *s;
  442. char *buf;
  443. int r;
  444. if (*ppos)
  445. return 0;
  446. s = kmalloc(sizeof(*s), GFP_KERNEL);
  447. if (!s)
  448. return -ENOMEM;
  449. trace_seq_init(s);
  450. /* If any of the first writes fail, so will the show_format. */
  451. trace_seq_printf(s, "name: %s\n", call->name);
  452. trace_seq_printf(s, "ID: %d\n", call->id);
  453. trace_seq_printf(s, "format:\n");
  454. trace_write_header(s);
  455. r = call->show_format(s);
  456. if (!r) {
  457. /*
  458. * ug! The format output is bigger than a PAGE!!
  459. */
  460. buf = "FORMAT TOO BIG\n";
  461. r = simple_read_from_buffer(ubuf, cnt, ppos,
  462. buf, strlen(buf));
  463. goto out;
  464. }
  465. r = simple_read_from_buffer(ubuf, cnt, ppos,
  466. s->buffer, s->len);
  467. out:
  468. kfree(s);
  469. return r;
  470. }
  471. static ssize_t
  472. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  473. {
  474. struct ftrace_event_call *call = filp->private_data;
  475. struct trace_seq *s;
  476. int r;
  477. if (*ppos)
  478. return 0;
  479. s = kmalloc(sizeof(*s), GFP_KERNEL);
  480. if (!s)
  481. return -ENOMEM;
  482. trace_seq_init(s);
  483. trace_seq_printf(s, "%d\n", call->id);
  484. r = simple_read_from_buffer(ubuf, cnt, ppos,
  485. s->buffer, s->len);
  486. kfree(s);
  487. return r;
  488. }
  489. static ssize_t
  490. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  491. loff_t *ppos)
  492. {
  493. struct ftrace_event_call *call = filp->private_data;
  494. struct trace_seq *s;
  495. int r;
  496. if (*ppos)
  497. return 0;
  498. s = kmalloc(sizeof(*s), GFP_KERNEL);
  499. if (!s)
  500. return -ENOMEM;
  501. trace_seq_init(s);
  502. print_event_filter(call, s);
  503. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  504. kfree(s);
  505. return r;
  506. }
  507. static ssize_t
  508. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  509. loff_t *ppos)
  510. {
  511. struct ftrace_event_call *call = filp->private_data;
  512. char *buf;
  513. int err;
  514. if (cnt >= PAGE_SIZE)
  515. return -EINVAL;
  516. buf = (char *)__get_free_page(GFP_TEMPORARY);
  517. if (!buf)
  518. return -ENOMEM;
  519. if (copy_from_user(buf, ubuf, cnt)) {
  520. free_page((unsigned long) buf);
  521. return -EFAULT;
  522. }
  523. buf[cnt] = '\0';
  524. err = apply_event_filter(call, buf);
  525. free_page((unsigned long) buf);
  526. if (err < 0)
  527. return err;
  528. *ppos += cnt;
  529. return cnt;
  530. }
  531. static ssize_t
  532. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  533. loff_t *ppos)
  534. {
  535. struct event_subsystem *system = filp->private_data;
  536. struct trace_seq *s;
  537. int r;
  538. if (*ppos)
  539. return 0;
  540. s = kmalloc(sizeof(*s), GFP_KERNEL);
  541. if (!s)
  542. return -ENOMEM;
  543. trace_seq_init(s);
  544. print_subsystem_event_filter(system, s);
  545. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  546. kfree(s);
  547. return r;
  548. }
  549. static ssize_t
  550. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  551. loff_t *ppos)
  552. {
  553. struct event_subsystem *system = filp->private_data;
  554. char *buf;
  555. int err;
  556. if (cnt >= PAGE_SIZE)
  557. return -EINVAL;
  558. buf = (char *)__get_free_page(GFP_TEMPORARY);
  559. if (!buf)
  560. return -ENOMEM;
  561. if (copy_from_user(buf, ubuf, cnt)) {
  562. free_page((unsigned long) buf);
  563. return -EFAULT;
  564. }
  565. buf[cnt] = '\0';
  566. err = apply_subsystem_event_filter(system, buf);
  567. free_page((unsigned long) buf);
  568. if (err < 0)
  569. return err;
  570. *ppos += cnt;
  571. return cnt;
  572. }
  573. static ssize_t
  574. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  575. {
  576. int (*func)(struct trace_seq *s) = filp->private_data;
  577. struct trace_seq *s;
  578. int r;
  579. if (*ppos)
  580. return 0;
  581. s = kmalloc(sizeof(*s), GFP_KERNEL);
  582. if (!s)
  583. return -ENOMEM;
  584. trace_seq_init(s);
  585. func(s);
  586. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  587. kfree(s);
  588. return r;
  589. }
  590. static const struct seq_operations show_event_seq_ops = {
  591. .start = t_start,
  592. .next = t_next,
  593. .show = t_show,
  594. .stop = t_stop,
  595. };
  596. static const struct seq_operations show_set_event_seq_ops = {
  597. .start = s_start,
  598. .next = s_next,
  599. .show = t_show,
  600. .stop = t_stop,
  601. };
  602. static const struct file_operations ftrace_avail_fops = {
  603. .open = ftrace_event_seq_open,
  604. .read = seq_read,
  605. .llseek = seq_lseek,
  606. .release = seq_release,
  607. };
  608. static const struct file_operations ftrace_set_event_fops = {
  609. .open = ftrace_event_seq_open,
  610. .read = seq_read,
  611. .write = ftrace_event_write,
  612. .llseek = seq_lseek,
  613. .release = seq_release,
  614. };
  615. static const struct file_operations ftrace_enable_fops = {
  616. .open = tracing_open_generic,
  617. .read = event_enable_read,
  618. .write = event_enable_write,
  619. };
  620. static const struct file_operations ftrace_event_format_fops = {
  621. .open = tracing_open_generic,
  622. .read = event_format_read,
  623. };
  624. static const struct file_operations ftrace_event_id_fops = {
  625. .open = tracing_open_generic,
  626. .read = event_id_read,
  627. };
  628. static const struct file_operations ftrace_event_filter_fops = {
  629. .open = tracing_open_generic,
  630. .read = event_filter_read,
  631. .write = event_filter_write,
  632. };
  633. static const struct file_operations ftrace_subsystem_filter_fops = {
  634. .open = tracing_open_generic,
  635. .read = subsystem_filter_read,
  636. .write = subsystem_filter_write,
  637. };
  638. static const struct file_operations ftrace_system_enable_fops = {
  639. .open = tracing_open_generic,
  640. .read = system_enable_read,
  641. .write = system_enable_write,
  642. };
  643. static const struct file_operations ftrace_show_header_fops = {
  644. .open = tracing_open_generic,
  645. .read = show_header,
  646. };
  647. static struct dentry *event_trace_events_dir(void)
  648. {
  649. static struct dentry *d_tracer;
  650. static struct dentry *d_events;
  651. if (d_events)
  652. return d_events;
  653. d_tracer = tracing_init_dentry();
  654. if (!d_tracer)
  655. return NULL;
  656. d_events = debugfs_create_dir("events", d_tracer);
  657. if (!d_events)
  658. pr_warning("Could not create debugfs "
  659. "'events' directory\n");
  660. return d_events;
  661. }
  662. static LIST_HEAD(event_subsystems);
  663. static struct dentry *
  664. event_subsystem_dir(const char *name, struct dentry *d_events)
  665. {
  666. struct event_subsystem *system;
  667. struct dentry *entry;
  668. /* First see if we did not already create this dir */
  669. list_for_each_entry(system, &event_subsystems, list) {
  670. if (strcmp(system->name, name) == 0)
  671. return system->entry;
  672. }
  673. /* need to create new entry */
  674. system = kmalloc(sizeof(*system), GFP_KERNEL);
  675. if (!system) {
  676. pr_warning("No memory to create event subsystem %s\n",
  677. name);
  678. return d_events;
  679. }
  680. system->entry = debugfs_create_dir(name, d_events);
  681. if (!system->entry) {
  682. pr_warning("Could not create event subsystem %s\n",
  683. name);
  684. kfree(system);
  685. return d_events;
  686. }
  687. system->name = kstrdup(name, GFP_KERNEL);
  688. if (!system->name) {
  689. debugfs_remove(system->entry);
  690. kfree(system);
  691. return d_events;
  692. }
  693. list_add(&system->list, &event_subsystems);
  694. system->filter = NULL;
  695. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  696. if (!system->filter) {
  697. pr_warning("Could not allocate filter for subsystem "
  698. "'%s'\n", name);
  699. return system->entry;
  700. }
  701. entry = debugfs_create_file("filter", 0644, system->entry, system,
  702. &ftrace_subsystem_filter_fops);
  703. if (!entry) {
  704. kfree(system->filter);
  705. system->filter = NULL;
  706. pr_warning("Could not create debugfs "
  707. "'%s/filter' entry\n", name);
  708. }
  709. entry = trace_create_file("enable", 0644, system->entry,
  710. (void *)system->name,
  711. &ftrace_system_enable_fops);
  712. return system->entry;
  713. }
  714. static int
  715. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  716. const struct file_operations *id,
  717. const struct file_operations *enable,
  718. const struct file_operations *filter,
  719. const struct file_operations *format)
  720. {
  721. struct dentry *entry;
  722. int ret;
  723. /*
  724. * If the trace point header did not define TRACE_SYSTEM
  725. * then the system would be called "TRACE_SYSTEM".
  726. */
  727. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  728. d_events = event_subsystem_dir(call->system, d_events);
  729. if (call->raw_init) {
  730. ret = call->raw_init();
  731. if (ret < 0) {
  732. pr_warning("Could not initialize trace point"
  733. " events/%s\n", call->name);
  734. return ret;
  735. }
  736. }
  737. call->dir = debugfs_create_dir(call->name, d_events);
  738. if (!call->dir) {
  739. pr_warning("Could not create debugfs "
  740. "'%s' directory\n", call->name);
  741. return -1;
  742. }
  743. if (call->regfunc)
  744. entry = trace_create_file("enable", 0644, call->dir, call,
  745. enable);
  746. if (call->id)
  747. entry = trace_create_file("id", 0444, call->dir, call,
  748. id);
  749. if (call->define_fields) {
  750. ret = call->define_fields();
  751. if (ret < 0) {
  752. pr_warning("Could not initialize trace point"
  753. " events/%s\n", call->name);
  754. return ret;
  755. }
  756. entry = trace_create_file("filter", 0644, call->dir, call,
  757. filter);
  758. }
  759. /* A trace may not want to export its format */
  760. if (!call->show_format)
  761. return 0;
  762. entry = trace_create_file("format", 0444, call->dir, call,
  763. format);
  764. return 0;
  765. }
  766. #define for_each_event(event, start, end) \
  767. for (event = start; \
  768. (unsigned long)event < (unsigned long)end; \
  769. event++)
  770. #ifdef CONFIG_MODULES
  771. static LIST_HEAD(ftrace_module_file_list);
  772. /*
  773. * Modules must own their file_operations to keep up with
  774. * reference counting.
  775. */
  776. struct ftrace_module_file_ops {
  777. struct list_head list;
  778. struct module *mod;
  779. struct file_operations id;
  780. struct file_operations enable;
  781. struct file_operations format;
  782. struct file_operations filter;
  783. };
  784. static struct ftrace_module_file_ops *
  785. trace_create_file_ops(struct module *mod)
  786. {
  787. struct ftrace_module_file_ops *file_ops;
  788. /*
  789. * This is a bit of a PITA. To allow for correct reference
  790. * counting, modules must "own" their file_operations.
  791. * To do this, we allocate the file operations that will be
  792. * used in the event directory.
  793. */
  794. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  795. if (!file_ops)
  796. return NULL;
  797. file_ops->mod = mod;
  798. file_ops->id = ftrace_event_id_fops;
  799. file_ops->id.owner = mod;
  800. file_ops->enable = ftrace_enable_fops;
  801. file_ops->enable.owner = mod;
  802. file_ops->filter = ftrace_event_filter_fops;
  803. file_ops->filter.owner = mod;
  804. file_ops->format = ftrace_event_format_fops;
  805. file_ops->format.owner = mod;
  806. list_add(&file_ops->list, &ftrace_module_file_list);
  807. return file_ops;
  808. }
  809. static void trace_module_add_events(struct module *mod)
  810. {
  811. struct ftrace_module_file_ops *file_ops = NULL;
  812. struct ftrace_event_call *call, *start, *end;
  813. struct dentry *d_events;
  814. start = mod->trace_events;
  815. end = mod->trace_events + mod->num_trace_events;
  816. if (start == end)
  817. return;
  818. d_events = event_trace_events_dir();
  819. if (!d_events)
  820. return;
  821. for_each_event(call, start, end) {
  822. /* The linker may leave blanks */
  823. if (!call->name)
  824. continue;
  825. /*
  826. * This module has events, create file ops for this module
  827. * if not already done.
  828. */
  829. if (!file_ops) {
  830. file_ops = trace_create_file_ops(mod);
  831. if (!file_ops)
  832. return;
  833. }
  834. call->mod = mod;
  835. list_add(&call->list, &ftrace_events);
  836. event_create_dir(call, d_events,
  837. &file_ops->id, &file_ops->enable,
  838. &file_ops->filter, &file_ops->format);
  839. }
  840. }
  841. static void trace_module_remove_events(struct module *mod)
  842. {
  843. struct ftrace_module_file_ops *file_ops;
  844. struct ftrace_event_call *call, *p;
  845. bool found = false;
  846. down_write(&trace_event_mutex);
  847. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  848. if (call->mod == mod) {
  849. found = true;
  850. ftrace_event_enable_disable(call, 0);
  851. if (call->event)
  852. __unregister_ftrace_event(call->event);
  853. debugfs_remove_recursive(call->dir);
  854. list_del(&call->list);
  855. trace_destroy_fields(call);
  856. destroy_preds(call);
  857. }
  858. }
  859. /* Now free the file_operations */
  860. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  861. if (file_ops->mod == mod)
  862. break;
  863. }
  864. if (&file_ops->list != &ftrace_module_file_list) {
  865. list_del(&file_ops->list);
  866. kfree(file_ops);
  867. }
  868. /*
  869. * It is safest to reset the ring buffer if the module being unloaded
  870. * registered any events.
  871. */
  872. if (found)
  873. tracing_reset_current_online_cpus();
  874. up_write(&trace_event_mutex);
  875. }
  876. static int trace_module_notify(struct notifier_block *self,
  877. unsigned long val, void *data)
  878. {
  879. struct module *mod = data;
  880. mutex_lock(&event_mutex);
  881. switch (val) {
  882. case MODULE_STATE_COMING:
  883. trace_module_add_events(mod);
  884. break;
  885. case MODULE_STATE_GOING:
  886. trace_module_remove_events(mod);
  887. break;
  888. }
  889. mutex_unlock(&event_mutex);
  890. return 0;
  891. }
  892. #else
  893. static int trace_module_notify(struct notifier_block *self,
  894. unsigned long val, void *data)
  895. {
  896. return 0;
  897. }
  898. #endif /* CONFIG_MODULES */
  899. struct notifier_block trace_module_nb = {
  900. .notifier_call = trace_module_notify,
  901. .priority = 0,
  902. };
  903. extern struct ftrace_event_call __start_ftrace_events[];
  904. extern struct ftrace_event_call __stop_ftrace_events[];
  905. static __init int event_trace_init(void)
  906. {
  907. struct ftrace_event_call *call;
  908. struct dentry *d_tracer;
  909. struct dentry *entry;
  910. struct dentry *d_events;
  911. int ret;
  912. d_tracer = tracing_init_dentry();
  913. if (!d_tracer)
  914. return 0;
  915. entry = debugfs_create_file("available_events", 0444, d_tracer,
  916. (void *)&show_event_seq_ops,
  917. &ftrace_avail_fops);
  918. if (!entry)
  919. pr_warning("Could not create debugfs "
  920. "'available_events' entry\n");
  921. entry = debugfs_create_file("set_event", 0644, d_tracer,
  922. (void *)&show_set_event_seq_ops,
  923. &ftrace_set_event_fops);
  924. if (!entry)
  925. pr_warning("Could not create debugfs "
  926. "'set_event' entry\n");
  927. d_events = event_trace_events_dir();
  928. if (!d_events)
  929. return 0;
  930. /* ring buffer internal formats */
  931. trace_create_file("header_page", 0444, d_events,
  932. ring_buffer_print_page_header,
  933. &ftrace_show_header_fops);
  934. trace_create_file("header_event", 0444, d_events,
  935. ring_buffer_print_entry_header,
  936. &ftrace_show_header_fops);
  937. trace_create_file("enable", 0644, d_events,
  938. NULL, &ftrace_system_enable_fops);
  939. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  940. /* The linker may leave blanks */
  941. if (!call->name)
  942. continue;
  943. list_add(&call->list, &ftrace_events);
  944. event_create_dir(call, d_events, &ftrace_event_id_fops,
  945. &ftrace_enable_fops, &ftrace_event_filter_fops,
  946. &ftrace_event_format_fops);
  947. }
  948. ret = register_module_notifier(&trace_module_nb);
  949. if (ret)
  950. pr_warning("Failed to register trace events module notifier\n");
  951. return 0;
  952. }
  953. fs_initcall(event_trace_init);
  954. #ifdef CONFIG_FTRACE_STARTUP_TEST
  955. static DEFINE_SPINLOCK(test_spinlock);
  956. static DEFINE_SPINLOCK(test_spinlock_irq);
  957. static DEFINE_MUTEX(test_mutex);
  958. static __init void test_work(struct work_struct *dummy)
  959. {
  960. spin_lock(&test_spinlock);
  961. spin_lock_irq(&test_spinlock_irq);
  962. udelay(1);
  963. spin_unlock_irq(&test_spinlock_irq);
  964. spin_unlock(&test_spinlock);
  965. mutex_lock(&test_mutex);
  966. msleep(1);
  967. mutex_unlock(&test_mutex);
  968. }
  969. static __init int event_test_thread(void *unused)
  970. {
  971. void *test_malloc;
  972. test_malloc = kmalloc(1234, GFP_KERNEL);
  973. if (!test_malloc)
  974. pr_info("failed to kmalloc\n");
  975. schedule_on_each_cpu(test_work);
  976. kfree(test_malloc);
  977. set_current_state(TASK_INTERRUPTIBLE);
  978. while (!kthread_should_stop())
  979. schedule();
  980. return 0;
  981. }
  982. /*
  983. * Do various things that may trigger events.
  984. */
  985. static __init void event_test_stuff(void)
  986. {
  987. struct task_struct *test_thread;
  988. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  989. msleep(1);
  990. kthread_stop(test_thread);
  991. }
  992. /*
  993. * For every trace event defined, we will test each trace point separately,
  994. * and then by groups, and finally all trace points.
  995. */
  996. static __init void event_trace_self_tests(void)
  997. {
  998. struct ftrace_event_call *call;
  999. struct event_subsystem *system;
  1000. int ret;
  1001. pr_info("Running tests on trace events:\n");
  1002. list_for_each_entry(call, &ftrace_events, list) {
  1003. /* Only test those that have a regfunc */
  1004. if (!call->regfunc)
  1005. continue;
  1006. pr_info("Testing event %s: ", call->name);
  1007. /*
  1008. * If an event is already enabled, someone is using
  1009. * it and the self test should not be on.
  1010. */
  1011. if (call->enabled) {
  1012. pr_warning("Enabled event during self test!\n");
  1013. WARN_ON_ONCE(1);
  1014. continue;
  1015. }
  1016. ftrace_event_enable_disable(call, 1);
  1017. event_test_stuff();
  1018. ftrace_event_enable_disable(call, 0);
  1019. pr_cont("OK\n");
  1020. }
  1021. /* Now test at the sub system level */
  1022. pr_info("Running tests on trace event systems:\n");
  1023. list_for_each_entry(system, &event_subsystems, list) {
  1024. /* the ftrace system is special, skip it */
  1025. if (strcmp(system->name, "ftrace") == 0)
  1026. continue;
  1027. pr_info("Testing event system %s: ", system->name);
  1028. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
  1029. if (WARN_ON_ONCE(ret)) {
  1030. pr_warning("error enabling system %s\n",
  1031. system->name);
  1032. continue;
  1033. }
  1034. event_test_stuff();
  1035. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
  1036. if (WARN_ON_ONCE(ret))
  1037. pr_warning("error disabling system %s\n",
  1038. system->name);
  1039. pr_cont("OK\n");
  1040. }
  1041. /* Test with all events enabled */
  1042. pr_info("Running tests on all trace events:\n");
  1043. pr_info("Testing all events: ");
  1044. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
  1045. if (WARN_ON_ONCE(ret)) {
  1046. pr_warning("error enabling all events\n");
  1047. return;
  1048. }
  1049. event_test_stuff();
  1050. /* reset sysname */
  1051. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
  1052. if (WARN_ON_ONCE(ret)) {
  1053. pr_warning("error disabling all events\n");
  1054. return;
  1055. }
  1056. pr_cont("OK\n");
  1057. }
  1058. #ifdef CONFIG_FUNCTION_TRACER
  1059. static DEFINE_PER_CPU(atomic_t, test_event_disable);
  1060. static void
  1061. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1062. {
  1063. struct ring_buffer_event *event;
  1064. struct ftrace_entry *entry;
  1065. unsigned long flags;
  1066. long disabled;
  1067. int resched;
  1068. int cpu;
  1069. int pc;
  1070. pc = preempt_count();
  1071. resched = ftrace_preempt_disable();
  1072. cpu = raw_smp_processor_id();
  1073. disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
  1074. if (disabled != 1)
  1075. goto out;
  1076. local_save_flags(flags);
  1077. event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
  1078. flags, pc);
  1079. if (!event)
  1080. goto out;
  1081. entry = ring_buffer_event_data(event);
  1082. entry->ip = ip;
  1083. entry->parent_ip = parent_ip;
  1084. trace_nowake_buffer_unlock_commit(event, flags, pc);
  1085. out:
  1086. atomic_dec(&per_cpu(test_event_disable, cpu));
  1087. ftrace_preempt_enable(resched);
  1088. }
  1089. static struct ftrace_ops trace_ops __initdata =
  1090. {
  1091. .func = function_test_events_call,
  1092. };
  1093. static __init void event_trace_self_test_with_function(void)
  1094. {
  1095. register_ftrace_function(&trace_ops);
  1096. pr_info("Running tests again, along with the function tracer\n");
  1097. event_trace_self_tests();
  1098. unregister_ftrace_function(&trace_ops);
  1099. }
  1100. #else
  1101. static __init void event_trace_self_test_with_function(void)
  1102. {
  1103. }
  1104. #endif
  1105. static __init int event_trace_self_tests_init(void)
  1106. {
  1107. event_trace_self_tests();
  1108. event_trace_self_test_with_function();
  1109. return 0;
  1110. }
  1111. late_initcall(event_trace_self_tests_init);
  1112. #endif