trace_events.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include <asm/setup.h>
  19. #include "trace_output.h"
  20. #undef TRACE_SYSTEM
  21. #define TRACE_SYSTEM "TRACE_SYSTEM"
  22. DEFINE_MUTEX(event_mutex);
  23. LIST_HEAD(ftrace_events);
  24. int trace_define_field(struct ftrace_event_call *call, const char *type,
  25. const char *name, int offset, int size, int is_signed,
  26. int filter_type)
  27. {
  28. struct ftrace_event_field *field;
  29. field = kzalloc(sizeof(*field), GFP_KERNEL);
  30. if (!field)
  31. goto err;
  32. field->name = kstrdup(name, GFP_KERNEL);
  33. if (!field->name)
  34. goto err;
  35. field->type = kstrdup(type, GFP_KERNEL);
  36. if (!field->type)
  37. goto err;
  38. if (filter_type == FILTER_OTHER)
  39. field->filter_type = filter_assign_type(type);
  40. else
  41. field->filter_type = filter_type;
  42. field->offset = offset;
  43. field->size = size;
  44. field->is_signed = is_signed;
  45. list_add(&field->link, &call->fields);
  46. return 0;
  47. err:
  48. if (field)
  49. kfree(field->name);
  50. kfree(field);
  51. return -ENOMEM;
  52. }
  53. EXPORT_SYMBOL_GPL(trace_define_field);
  54. #define __common_field(type, item) \
  55. ret = trace_define_field(call, #type, "common_" #item, \
  56. offsetof(typeof(ent), item), \
  57. sizeof(ent.item), \
  58. is_signed_type(type), FILTER_OTHER); \
  59. if (ret) \
  60. return ret;
  61. static int trace_define_common_fields(struct ftrace_event_call *call)
  62. {
  63. int ret;
  64. struct trace_entry ent;
  65. __common_field(unsigned short, type);
  66. __common_field(unsigned char, flags);
  67. __common_field(unsigned char, preempt_count);
  68. __common_field(int, pid);
  69. __common_field(int, lock_depth);
  70. return ret;
  71. }
  72. void trace_destroy_fields(struct ftrace_event_call *call)
  73. {
  74. struct ftrace_event_field *field, *next;
  75. list_for_each_entry_safe(field, next, &call->fields, link) {
  76. list_del(&field->link);
  77. kfree(field->type);
  78. kfree(field->name);
  79. kfree(field);
  80. }
  81. }
  82. int trace_event_raw_init(struct ftrace_event_call *call)
  83. {
  84. int id;
  85. id = register_ftrace_event(call->event);
  86. if (!id)
  87. return -ENODEV;
  88. call->id = id;
  89. INIT_LIST_HEAD(&call->fields);
  90. return 0;
  91. }
  92. EXPORT_SYMBOL_GPL(trace_event_raw_init);
  93. static int ftrace_event_enable_disable(struct ftrace_event_call *call,
  94. int enable)
  95. {
  96. int ret = 0;
  97. switch (enable) {
  98. case 0:
  99. if (call->enabled) {
  100. call->enabled = 0;
  101. tracing_stop_cmdline_record();
  102. call->unregfunc(call);
  103. }
  104. break;
  105. case 1:
  106. if (!call->enabled) {
  107. tracing_start_cmdline_record();
  108. ret = call->regfunc(call);
  109. if (ret) {
  110. tracing_stop_cmdline_record();
  111. pr_info("event trace: Could not enable event "
  112. "%s\n", call->name);
  113. break;
  114. }
  115. call->enabled = 1;
  116. }
  117. break;
  118. }
  119. return ret;
  120. }
  121. static void ftrace_clear_events(void)
  122. {
  123. struct ftrace_event_call *call;
  124. mutex_lock(&event_mutex);
  125. list_for_each_entry(call, &ftrace_events, list) {
  126. ftrace_event_enable_disable(call, 0);
  127. }
  128. mutex_unlock(&event_mutex);
  129. }
  130. /*
  131. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  132. */
  133. static int __ftrace_set_clr_event(const char *match, const char *sub,
  134. const char *event, int set)
  135. {
  136. struct ftrace_event_call *call;
  137. int ret = -EINVAL;
  138. mutex_lock(&event_mutex);
  139. list_for_each_entry(call, &ftrace_events, list) {
  140. if (!call->name || !call->regfunc)
  141. continue;
  142. if (match &&
  143. strcmp(match, call->name) != 0 &&
  144. strcmp(match, call->system) != 0)
  145. continue;
  146. if (sub && strcmp(sub, call->system) != 0)
  147. continue;
  148. if (event && strcmp(event, call->name) != 0)
  149. continue;
  150. ftrace_event_enable_disable(call, set);
  151. ret = 0;
  152. }
  153. mutex_unlock(&event_mutex);
  154. return ret;
  155. }
  156. static int ftrace_set_clr_event(char *buf, int set)
  157. {
  158. char *event = NULL, *sub = NULL, *match;
  159. /*
  160. * The buf format can be <subsystem>:<event-name>
  161. * *:<event-name> means any event by that name.
  162. * :<event-name> is the same.
  163. *
  164. * <subsystem>:* means all events in that subsystem
  165. * <subsystem>: means the same.
  166. *
  167. * <name> (no ':') means all events in a subsystem with
  168. * the name <name> or any event that matches <name>
  169. */
  170. match = strsep(&buf, ":");
  171. if (buf) {
  172. sub = match;
  173. event = buf;
  174. match = NULL;
  175. if (!strlen(sub) || strcmp(sub, "*") == 0)
  176. sub = NULL;
  177. if (!strlen(event) || strcmp(event, "*") == 0)
  178. event = NULL;
  179. }
  180. return __ftrace_set_clr_event(match, sub, event, set);
  181. }
  182. /**
  183. * trace_set_clr_event - enable or disable an event
  184. * @system: system name to match (NULL for any system)
  185. * @event: event name to match (NULL for all events, within system)
  186. * @set: 1 to enable, 0 to disable
  187. *
  188. * This is a way for other parts of the kernel to enable or disable
  189. * event recording.
  190. *
  191. * Returns 0 on success, -EINVAL if the parameters do not match any
  192. * registered events.
  193. */
  194. int trace_set_clr_event(const char *system, const char *event, int set)
  195. {
  196. return __ftrace_set_clr_event(NULL, system, event, set);
  197. }
  198. /* 128 should be much more than enough */
  199. #define EVENT_BUF_SIZE 127
  200. static ssize_t
  201. ftrace_event_write(struct file *file, const char __user *ubuf,
  202. size_t cnt, loff_t *ppos)
  203. {
  204. struct trace_parser parser;
  205. ssize_t read, ret;
  206. if (!cnt)
  207. return 0;
  208. ret = tracing_update_buffers();
  209. if (ret < 0)
  210. return ret;
  211. if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
  212. return -ENOMEM;
  213. read = trace_get_user(&parser, ubuf, cnt, ppos);
  214. if (read >= 0 && trace_parser_loaded((&parser))) {
  215. int set = 1;
  216. if (*parser.buffer == '!')
  217. set = 0;
  218. parser.buffer[parser.idx] = 0;
  219. ret = ftrace_set_clr_event(parser.buffer + !set, set);
  220. if (ret)
  221. goto out_put;
  222. }
  223. ret = read;
  224. out_put:
  225. trace_parser_put(&parser);
  226. return ret;
  227. }
  228. static void *
  229. t_next(struct seq_file *m, void *v, loff_t *pos)
  230. {
  231. struct ftrace_event_call *call = v;
  232. (*pos)++;
  233. list_for_each_entry_continue(call, &ftrace_events, list) {
  234. /*
  235. * The ftrace subsystem is for showing formats only.
  236. * They can not be enabled or disabled via the event files.
  237. */
  238. if (call->regfunc)
  239. return call;
  240. }
  241. return NULL;
  242. }
  243. static void *t_start(struct seq_file *m, loff_t *pos)
  244. {
  245. struct ftrace_event_call *call;
  246. loff_t l;
  247. mutex_lock(&event_mutex);
  248. call = list_entry(&ftrace_events, struct ftrace_event_call, list);
  249. for (l = 0; l <= *pos; ) {
  250. call = t_next(m, call, &l);
  251. if (!call)
  252. break;
  253. }
  254. return call;
  255. }
  256. static void *
  257. s_next(struct seq_file *m, void *v, loff_t *pos)
  258. {
  259. struct ftrace_event_call *call = v;
  260. (*pos)++;
  261. list_for_each_entry_continue(call, &ftrace_events, list) {
  262. if (call->enabled)
  263. return call;
  264. }
  265. return NULL;
  266. }
  267. static void *s_start(struct seq_file *m, loff_t *pos)
  268. {
  269. struct ftrace_event_call *call;
  270. loff_t l;
  271. mutex_lock(&event_mutex);
  272. call = list_entry(&ftrace_events, struct ftrace_event_call, list);
  273. for (l = 0; l <= *pos; ) {
  274. call = s_next(m, call, &l);
  275. if (!call)
  276. break;
  277. }
  278. return call;
  279. }
  280. static int t_show(struct seq_file *m, void *v)
  281. {
  282. struct ftrace_event_call *call = v;
  283. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  284. seq_printf(m, "%s:", call->system);
  285. seq_printf(m, "%s\n", call->name);
  286. return 0;
  287. }
  288. static void t_stop(struct seq_file *m, void *p)
  289. {
  290. mutex_unlock(&event_mutex);
  291. }
  292. static int
  293. ftrace_event_seq_open(struct inode *inode, struct file *file)
  294. {
  295. const struct seq_operations *seq_ops;
  296. if ((file->f_mode & FMODE_WRITE) &&
  297. (file->f_flags & O_TRUNC))
  298. ftrace_clear_events();
  299. seq_ops = inode->i_private;
  300. return seq_open(file, seq_ops);
  301. }
  302. static ssize_t
  303. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  304. loff_t *ppos)
  305. {
  306. struct ftrace_event_call *call = filp->private_data;
  307. char *buf;
  308. if (call->enabled)
  309. buf = "1\n";
  310. else
  311. buf = "0\n";
  312. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  313. }
  314. static ssize_t
  315. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  316. loff_t *ppos)
  317. {
  318. struct ftrace_event_call *call = filp->private_data;
  319. char buf[64];
  320. unsigned long val;
  321. int ret;
  322. if (cnt >= sizeof(buf))
  323. return -EINVAL;
  324. if (copy_from_user(&buf, ubuf, cnt))
  325. return -EFAULT;
  326. buf[cnt] = 0;
  327. ret = strict_strtoul(buf, 10, &val);
  328. if (ret < 0)
  329. return ret;
  330. ret = tracing_update_buffers();
  331. if (ret < 0)
  332. return ret;
  333. switch (val) {
  334. case 0:
  335. case 1:
  336. mutex_lock(&event_mutex);
  337. ret = ftrace_event_enable_disable(call, val);
  338. mutex_unlock(&event_mutex);
  339. break;
  340. default:
  341. return -EINVAL;
  342. }
  343. *ppos += cnt;
  344. return ret ? ret : cnt;
  345. }
  346. static ssize_t
  347. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  348. loff_t *ppos)
  349. {
  350. const char set_to_char[4] = { '?', '0', '1', 'X' };
  351. const char *system = filp->private_data;
  352. struct ftrace_event_call *call;
  353. char buf[2];
  354. int set = 0;
  355. int ret;
  356. mutex_lock(&event_mutex);
  357. list_for_each_entry(call, &ftrace_events, list) {
  358. if (!call->name || !call->regfunc)
  359. continue;
  360. if (system && strcmp(call->system, system) != 0)
  361. continue;
  362. /*
  363. * We need to find out if all the events are set
  364. * or if all events or cleared, or if we have
  365. * a mixture.
  366. */
  367. set |= (1 << !!call->enabled);
  368. /*
  369. * If we have a mixture, no need to look further.
  370. */
  371. if (set == 3)
  372. break;
  373. }
  374. mutex_unlock(&event_mutex);
  375. buf[0] = set_to_char[set];
  376. buf[1] = '\n';
  377. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  378. return ret;
  379. }
  380. static ssize_t
  381. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  382. loff_t *ppos)
  383. {
  384. const char *system = filp->private_data;
  385. unsigned long val;
  386. char buf[64];
  387. ssize_t ret;
  388. if (cnt >= sizeof(buf))
  389. return -EINVAL;
  390. if (copy_from_user(&buf, ubuf, cnt))
  391. return -EFAULT;
  392. buf[cnt] = 0;
  393. ret = strict_strtoul(buf, 10, &val);
  394. if (ret < 0)
  395. return ret;
  396. ret = tracing_update_buffers();
  397. if (ret < 0)
  398. return ret;
  399. if (val != 0 && val != 1)
  400. return -EINVAL;
  401. ret = __ftrace_set_clr_event(NULL, system, NULL, val);
  402. if (ret)
  403. goto out;
  404. ret = cnt;
  405. out:
  406. *ppos += cnt;
  407. return ret;
  408. }
  409. static ssize_t
  410. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  411. loff_t *ppos)
  412. {
  413. struct ftrace_event_call *call = filp->private_data;
  414. struct ftrace_event_field *field;
  415. struct trace_seq *s;
  416. int common_field_count = 5;
  417. char *buf;
  418. int r = 0;
  419. if (*ppos)
  420. return 0;
  421. s = kmalloc(sizeof(*s), GFP_KERNEL);
  422. if (!s)
  423. return -ENOMEM;
  424. trace_seq_init(s);
  425. trace_seq_printf(s, "name: %s\n", call->name);
  426. trace_seq_printf(s, "ID: %d\n", call->id);
  427. trace_seq_printf(s, "format:\n");
  428. list_for_each_entry_reverse(field, &call->fields, link) {
  429. /*
  430. * Smartly shows the array type(except dynamic array).
  431. * Normal:
  432. * field:TYPE VAR
  433. * If TYPE := TYPE[LEN], it is shown:
  434. * field:TYPE VAR[LEN]
  435. */
  436. const char *array_descriptor = strchr(field->type, '[');
  437. if (!strncmp(field->type, "__data_loc", 10))
  438. array_descriptor = NULL;
  439. if (!array_descriptor) {
  440. r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;"
  441. "\tsize:%u;\tsigned:%d;\n",
  442. field->type, field->name, field->offset,
  443. field->size, !!field->is_signed);
  444. } else {
  445. r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;"
  446. "\tsize:%u;\tsigned:%d;\n",
  447. (int)(array_descriptor - field->type),
  448. field->type, field->name,
  449. array_descriptor, field->offset,
  450. field->size, !!field->is_signed);
  451. }
  452. if (--common_field_count == 0)
  453. r = trace_seq_printf(s, "\n");
  454. if (!r)
  455. break;
  456. }
  457. if (r)
  458. r = trace_seq_printf(s, "\nprint fmt: %s\n",
  459. call->print_fmt);
  460. if (!r) {
  461. /*
  462. * ug! The format output is bigger than a PAGE!!
  463. */
  464. buf = "FORMAT TOO BIG\n";
  465. r = simple_read_from_buffer(ubuf, cnt, ppos,
  466. buf, strlen(buf));
  467. goto out;
  468. }
  469. r = simple_read_from_buffer(ubuf, cnt, ppos,
  470. s->buffer, s->len);
  471. out:
  472. kfree(s);
  473. return r;
  474. }
  475. static ssize_t
  476. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  477. {
  478. struct ftrace_event_call *call = filp->private_data;
  479. struct trace_seq *s;
  480. int r;
  481. if (*ppos)
  482. return 0;
  483. s = kmalloc(sizeof(*s), GFP_KERNEL);
  484. if (!s)
  485. return -ENOMEM;
  486. trace_seq_init(s);
  487. trace_seq_printf(s, "%d\n", call->id);
  488. r = simple_read_from_buffer(ubuf, cnt, ppos,
  489. s->buffer, s->len);
  490. kfree(s);
  491. return r;
  492. }
  493. static ssize_t
  494. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  495. loff_t *ppos)
  496. {
  497. struct ftrace_event_call *call = filp->private_data;
  498. struct trace_seq *s;
  499. int r;
  500. if (*ppos)
  501. return 0;
  502. s = kmalloc(sizeof(*s), GFP_KERNEL);
  503. if (!s)
  504. return -ENOMEM;
  505. trace_seq_init(s);
  506. print_event_filter(call, s);
  507. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  508. kfree(s);
  509. return r;
  510. }
  511. static ssize_t
  512. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  513. loff_t *ppos)
  514. {
  515. struct ftrace_event_call *call = filp->private_data;
  516. char *buf;
  517. int err;
  518. if (cnt >= PAGE_SIZE)
  519. return -EINVAL;
  520. buf = (char *)__get_free_page(GFP_TEMPORARY);
  521. if (!buf)
  522. return -ENOMEM;
  523. if (copy_from_user(buf, ubuf, cnt)) {
  524. free_page((unsigned long) buf);
  525. return -EFAULT;
  526. }
  527. buf[cnt] = '\0';
  528. err = apply_event_filter(call, buf);
  529. free_page((unsigned long) buf);
  530. if (err < 0)
  531. return err;
  532. *ppos += cnt;
  533. return cnt;
  534. }
  535. static ssize_t
  536. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  537. loff_t *ppos)
  538. {
  539. struct event_subsystem *system = filp->private_data;
  540. struct trace_seq *s;
  541. int r;
  542. if (*ppos)
  543. return 0;
  544. s = kmalloc(sizeof(*s), GFP_KERNEL);
  545. if (!s)
  546. return -ENOMEM;
  547. trace_seq_init(s);
  548. print_subsystem_event_filter(system, s);
  549. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  550. kfree(s);
  551. return r;
  552. }
  553. static ssize_t
  554. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  555. loff_t *ppos)
  556. {
  557. struct event_subsystem *system = filp->private_data;
  558. char *buf;
  559. int err;
  560. if (cnt >= PAGE_SIZE)
  561. return -EINVAL;
  562. buf = (char *)__get_free_page(GFP_TEMPORARY);
  563. if (!buf)
  564. return -ENOMEM;
  565. if (copy_from_user(buf, ubuf, cnt)) {
  566. free_page((unsigned long) buf);
  567. return -EFAULT;
  568. }
  569. buf[cnt] = '\0';
  570. err = apply_subsystem_event_filter(system, buf);
  571. free_page((unsigned long) buf);
  572. if (err < 0)
  573. return err;
  574. *ppos += cnt;
  575. return cnt;
  576. }
  577. static ssize_t
  578. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  579. {
  580. int (*func)(struct trace_seq *s) = filp->private_data;
  581. struct trace_seq *s;
  582. int r;
  583. if (*ppos)
  584. return 0;
  585. s = kmalloc(sizeof(*s), GFP_KERNEL);
  586. if (!s)
  587. return -ENOMEM;
  588. trace_seq_init(s);
  589. func(s);
  590. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  591. kfree(s);
  592. return r;
  593. }
  594. static const struct seq_operations show_event_seq_ops = {
  595. .start = t_start,
  596. .next = t_next,
  597. .show = t_show,
  598. .stop = t_stop,
  599. };
  600. static const struct seq_operations show_set_event_seq_ops = {
  601. .start = s_start,
  602. .next = s_next,
  603. .show = t_show,
  604. .stop = t_stop,
  605. };
  606. static const struct file_operations ftrace_avail_fops = {
  607. .open = ftrace_event_seq_open,
  608. .read = seq_read,
  609. .llseek = seq_lseek,
  610. .release = seq_release,
  611. };
  612. static const struct file_operations ftrace_set_event_fops = {
  613. .open = ftrace_event_seq_open,
  614. .read = seq_read,
  615. .write = ftrace_event_write,
  616. .llseek = seq_lseek,
  617. .release = seq_release,
  618. };
  619. static const struct file_operations ftrace_enable_fops = {
  620. .open = tracing_open_generic,
  621. .read = event_enable_read,
  622. .write = event_enable_write,
  623. };
  624. static const struct file_operations ftrace_event_format_fops = {
  625. .open = tracing_open_generic,
  626. .read = event_format_read,
  627. };
  628. static const struct file_operations ftrace_event_id_fops = {
  629. .open = tracing_open_generic,
  630. .read = event_id_read,
  631. };
  632. static const struct file_operations ftrace_event_filter_fops = {
  633. .open = tracing_open_generic,
  634. .read = event_filter_read,
  635. .write = event_filter_write,
  636. };
  637. static const struct file_operations ftrace_subsystem_filter_fops = {
  638. .open = tracing_open_generic,
  639. .read = subsystem_filter_read,
  640. .write = subsystem_filter_write,
  641. };
  642. static const struct file_operations ftrace_system_enable_fops = {
  643. .open = tracing_open_generic,
  644. .read = system_enable_read,
  645. .write = system_enable_write,
  646. };
  647. static const struct file_operations ftrace_show_header_fops = {
  648. .open = tracing_open_generic,
  649. .read = show_header,
  650. };
  651. static struct dentry *event_trace_events_dir(void)
  652. {
  653. static struct dentry *d_tracer;
  654. static struct dentry *d_events;
  655. if (d_events)
  656. return d_events;
  657. d_tracer = tracing_init_dentry();
  658. if (!d_tracer)
  659. return NULL;
  660. d_events = debugfs_create_dir("events", d_tracer);
  661. if (!d_events)
  662. pr_warning("Could not create debugfs "
  663. "'events' directory\n");
  664. return d_events;
  665. }
  666. static LIST_HEAD(event_subsystems);
  667. static struct dentry *
  668. event_subsystem_dir(const char *name, struct dentry *d_events)
  669. {
  670. struct event_subsystem *system;
  671. struct dentry *entry;
  672. /* First see if we did not already create this dir */
  673. list_for_each_entry(system, &event_subsystems, list) {
  674. if (strcmp(system->name, name) == 0) {
  675. system->nr_events++;
  676. return system->entry;
  677. }
  678. }
  679. /* need to create new entry */
  680. system = kmalloc(sizeof(*system), GFP_KERNEL);
  681. if (!system) {
  682. pr_warning("No memory to create event subsystem %s\n",
  683. name);
  684. return d_events;
  685. }
  686. system->entry = debugfs_create_dir(name, d_events);
  687. if (!system->entry) {
  688. pr_warning("Could not create event subsystem %s\n",
  689. name);
  690. kfree(system);
  691. return d_events;
  692. }
  693. system->nr_events = 1;
  694. system->name = kstrdup(name, GFP_KERNEL);
  695. if (!system->name) {
  696. debugfs_remove(system->entry);
  697. kfree(system);
  698. return d_events;
  699. }
  700. list_add(&system->list, &event_subsystems);
  701. system->filter = NULL;
  702. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  703. if (!system->filter) {
  704. pr_warning("Could not allocate filter for subsystem "
  705. "'%s'\n", name);
  706. return system->entry;
  707. }
  708. entry = debugfs_create_file("filter", 0644, system->entry, system,
  709. &ftrace_subsystem_filter_fops);
  710. if (!entry) {
  711. kfree(system->filter);
  712. system->filter = NULL;
  713. pr_warning("Could not create debugfs "
  714. "'%s/filter' entry\n", name);
  715. }
  716. trace_create_file("enable", 0644, system->entry,
  717. (void *)system->name,
  718. &ftrace_system_enable_fops);
  719. return system->entry;
  720. }
  721. static int
  722. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  723. const struct file_operations *id,
  724. const struct file_operations *enable,
  725. const struct file_operations *filter,
  726. const struct file_operations *format)
  727. {
  728. int ret;
  729. /*
  730. * If the trace point header did not define TRACE_SYSTEM
  731. * then the system would be called "TRACE_SYSTEM".
  732. */
  733. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  734. d_events = event_subsystem_dir(call->system, d_events);
  735. call->dir = debugfs_create_dir(call->name, d_events);
  736. if (!call->dir) {
  737. pr_warning("Could not create debugfs "
  738. "'%s' directory\n", call->name);
  739. return -1;
  740. }
  741. if (call->regfunc)
  742. trace_create_file("enable", 0644, call->dir, call,
  743. enable);
  744. if (call->id && call->profile_enable)
  745. trace_create_file("id", 0444, call->dir, call,
  746. id);
  747. if (call->define_fields) {
  748. ret = trace_define_common_fields(call);
  749. if (!ret)
  750. ret = call->define_fields(call);
  751. if (ret < 0) {
  752. pr_warning("Could not initialize trace point"
  753. " events/%s\n", call->name);
  754. return ret;
  755. }
  756. trace_create_file("filter", 0644, call->dir, call,
  757. filter);
  758. }
  759. trace_create_file("format", 0444, call->dir, call,
  760. format);
  761. return 0;
  762. }
  763. static int __trace_add_event_call(struct ftrace_event_call *call)
  764. {
  765. struct dentry *d_events;
  766. int ret;
  767. if (!call->name)
  768. return -EINVAL;
  769. if (call->raw_init) {
  770. ret = call->raw_init(call);
  771. if (ret < 0) {
  772. if (ret != -ENOSYS)
  773. pr_warning("Could not initialize trace "
  774. "events/%s\n", call->name);
  775. return ret;
  776. }
  777. }
  778. d_events = event_trace_events_dir();
  779. if (!d_events)
  780. return -ENOENT;
  781. ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
  782. &ftrace_enable_fops, &ftrace_event_filter_fops,
  783. &ftrace_event_format_fops);
  784. if (!ret)
  785. list_add(&call->list, &ftrace_events);
  786. return ret;
  787. }
  788. /* Add an additional event_call dynamically */
  789. int trace_add_event_call(struct ftrace_event_call *call)
  790. {
  791. int ret;
  792. mutex_lock(&event_mutex);
  793. ret = __trace_add_event_call(call);
  794. mutex_unlock(&event_mutex);
  795. return ret;
  796. }
  797. static void remove_subsystem_dir(const char *name)
  798. {
  799. struct event_subsystem *system;
  800. if (strcmp(name, TRACE_SYSTEM) == 0)
  801. return;
  802. list_for_each_entry(system, &event_subsystems, list) {
  803. if (strcmp(system->name, name) == 0) {
  804. if (!--system->nr_events) {
  805. struct event_filter *filter = system->filter;
  806. debugfs_remove_recursive(system->entry);
  807. list_del(&system->list);
  808. if (filter) {
  809. kfree(filter->filter_string);
  810. kfree(filter);
  811. }
  812. kfree(system->name);
  813. kfree(system);
  814. }
  815. break;
  816. }
  817. }
  818. }
  819. /*
  820. * Must be called under locking both of event_mutex and trace_event_mutex.
  821. */
  822. static void __trace_remove_event_call(struct ftrace_event_call *call)
  823. {
  824. ftrace_event_enable_disable(call, 0);
  825. if (call->event)
  826. __unregister_ftrace_event(call->event);
  827. debugfs_remove_recursive(call->dir);
  828. list_del(&call->list);
  829. trace_destroy_fields(call);
  830. destroy_preds(call);
  831. remove_subsystem_dir(call->system);
  832. }
  833. /* Remove an event_call */
  834. void trace_remove_event_call(struct ftrace_event_call *call)
  835. {
  836. mutex_lock(&event_mutex);
  837. down_write(&trace_event_mutex);
  838. __trace_remove_event_call(call);
  839. up_write(&trace_event_mutex);
  840. mutex_unlock(&event_mutex);
  841. }
  842. #define for_each_event(event, start, end) \
  843. for (event = start; \
  844. (unsigned long)event < (unsigned long)end; \
  845. event++)
  846. #ifdef CONFIG_MODULES
  847. static LIST_HEAD(ftrace_module_file_list);
  848. /*
  849. * Modules must own their file_operations to keep up with
  850. * reference counting.
  851. */
  852. struct ftrace_module_file_ops {
  853. struct list_head list;
  854. struct module *mod;
  855. struct file_operations id;
  856. struct file_operations enable;
  857. struct file_operations format;
  858. struct file_operations filter;
  859. };
  860. static struct ftrace_module_file_ops *
  861. trace_create_file_ops(struct module *mod)
  862. {
  863. struct ftrace_module_file_ops *file_ops;
  864. /*
  865. * This is a bit of a PITA. To allow for correct reference
  866. * counting, modules must "own" their file_operations.
  867. * To do this, we allocate the file operations that will be
  868. * used in the event directory.
  869. */
  870. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  871. if (!file_ops)
  872. return NULL;
  873. file_ops->mod = mod;
  874. file_ops->id = ftrace_event_id_fops;
  875. file_ops->id.owner = mod;
  876. file_ops->enable = ftrace_enable_fops;
  877. file_ops->enable.owner = mod;
  878. file_ops->filter = ftrace_event_filter_fops;
  879. file_ops->filter.owner = mod;
  880. file_ops->format = ftrace_event_format_fops;
  881. file_ops->format.owner = mod;
  882. list_add(&file_ops->list, &ftrace_module_file_list);
  883. return file_ops;
  884. }
  885. static void trace_module_add_events(struct module *mod)
  886. {
  887. struct ftrace_module_file_ops *file_ops = NULL;
  888. struct ftrace_event_call *call, *start, *end;
  889. struct dentry *d_events;
  890. int ret;
  891. start = mod->trace_events;
  892. end = mod->trace_events + mod->num_trace_events;
  893. if (start == end)
  894. return;
  895. d_events = event_trace_events_dir();
  896. if (!d_events)
  897. return;
  898. for_each_event(call, start, end) {
  899. /* The linker may leave blanks */
  900. if (!call->name)
  901. continue;
  902. if (call->raw_init) {
  903. ret = call->raw_init(call);
  904. if (ret < 0) {
  905. if (ret != -ENOSYS)
  906. pr_warning("Could not initialize trace "
  907. "point events/%s\n", call->name);
  908. continue;
  909. }
  910. }
  911. /*
  912. * This module has events, create file ops for this module
  913. * if not already done.
  914. */
  915. if (!file_ops) {
  916. file_ops = trace_create_file_ops(mod);
  917. if (!file_ops)
  918. return;
  919. }
  920. call->mod = mod;
  921. ret = event_create_dir(call, d_events,
  922. &file_ops->id, &file_ops->enable,
  923. &file_ops->filter, &file_ops->format);
  924. if (!ret)
  925. list_add(&call->list, &ftrace_events);
  926. }
  927. }
  928. static void trace_module_remove_events(struct module *mod)
  929. {
  930. struct ftrace_module_file_ops *file_ops;
  931. struct ftrace_event_call *call, *p;
  932. bool found = false;
  933. down_write(&trace_event_mutex);
  934. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  935. if (call->mod == mod) {
  936. found = true;
  937. __trace_remove_event_call(call);
  938. }
  939. }
  940. /* Now free the file_operations */
  941. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  942. if (file_ops->mod == mod)
  943. break;
  944. }
  945. if (&file_ops->list != &ftrace_module_file_list) {
  946. list_del(&file_ops->list);
  947. kfree(file_ops);
  948. }
  949. /*
  950. * It is safest to reset the ring buffer if the module being unloaded
  951. * registered any events.
  952. */
  953. if (found)
  954. tracing_reset_current_online_cpus();
  955. up_write(&trace_event_mutex);
  956. }
  957. static int trace_module_notify(struct notifier_block *self,
  958. unsigned long val, void *data)
  959. {
  960. struct module *mod = data;
  961. mutex_lock(&event_mutex);
  962. switch (val) {
  963. case MODULE_STATE_COMING:
  964. trace_module_add_events(mod);
  965. break;
  966. case MODULE_STATE_GOING:
  967. trace_module_remove_events(mod);
  968. break;
  969. }
  970. mutex_unlock(&event_mutex);
  971. return 0;
  972. }
  973. #else
  974. static int trace_module_notify(struct notifier_block *self,
  975. unsigned long val, void *data)
  976. {
  977. return 0;
  978. }
  979. #endif /* CONFIG_MODULES */
  980. static struct notifier_block trace_module_nb = {
  981. .notifier_call = trace_module_notify,
  982. .priority = 0,
  983. };
  984. extern struct ftrace_event_call __start_ftrace_events[];
  985. extern struct ftrace_event_call __stop_ftrace_events[];
  986. static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
  987. static __init int setup_trace_event(char *str)
  988. {
  989. strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
  990. ring_buffer_expanded = 1;
  991. tracing_selftest_disabled = 1;
  992. return 1;
  993. }
  994. __setup("trace_event=", setup_trace_event);
  995. static __init int event_trace_init(void)
  996. {
  997. struct ftrace_event_call *call;
  998. struct dentry *d_tracer;
  999. struct dentry *entry;
  1000. struct dentry *d_events;
  1001. int ret;
  1002. char *buf = bootup_event_buf;
  1003. char *token;
  1004. d_tracer = tracing_init_dentry();
  1005. if (!d_tracer)
  1006. return 0;
  1007. entry = debugfs_create_file("available_events", 0444, d_tracer,
  1008. (void *)&show_event_seq_ops,
  1009. &ftrace_avail_fops);
  1010. if (!entry)
  1011. pr_warning("Could not create debugfs "
  1012. "'available_events' entry\n");
  1013. entry = debugfs_create_file("set_event", 0644, d_tracer,
  1014. (void *)&show_set_event_seq_ops,
  1015. &ftrace_set_event_fops);
  1016. if (!entry)
  1017. pr_warning("Could not create debugfs "
  1018. "'set_event' entry\n");
  1019. d_events = event_trace_events_dir();
  1020. if (!d_events)
  1021. return 0;
  1022. /* ring buffer internal formats */
  1023. trace_create_file("header_page", 0444, d_events,
  1024. ring_buffer_print_page_header,
  1025. &ftrace_show_header_fops);
  1026. trace_create_file("header_event", 0444, d_events,
  1027. ring_buffer_print_entry_header,
  1028. &ftrace_show_header_fops);
  1029. trace_create_file("enable", 0644, d_events,
  1030. NULL, &ftrace_system_enable_fops);
  1031. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  1032. /* The linker may leave blanks */
  1033. if (!call->name)
  1034. continue;
  1035. if (call->raw_init) {
  1036. ret = call->raw_init(call);
  1037. if (ret < 0) {
  1038. if (ret != -ENOSYS)
  1039. pr_warning("Could not initialize trace "
  1040. "point events/%s\n", call->name);
  1041. continue;
  1042. }
  1043. }
  1044. ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
  1045. &ftrace_enable_fops,
  1046. &ftrace_event_filter_fops,
  1047. &ftrace_event_format_fops);
  1048. if (!ret)
  1049. list_add(&call->list, &ftrace_events);
  1050. }
  1051. while (true) {
  1052. token = strsep(&buf, ",");
  1053. if (!token)
  1054. break;
  1055. if (!*token)
  1056. continue;
  1057. ret = ftrace_set_clr_event(token, 1);
  1058. if (ret)
  1059. pr_warning("Failed to enable trace event: %s\n", token);
  1060. }
  1061. ret = register_module_notifier(&trace_module_nb);
  1062. if (ret)
  1063. pr_warning("Failed to register trace events module notifier\n");
  1064. return 0;
  1065. }
  1066. fs_initcall(event_trace_init);
  1067. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1068. static DEFINE_SPINLOCK(test_spinlock);
  1069. static DEFINE_SPINLOCK(test_spinlock_irq);
  1070. static DEFINE_MUTEX(test_mutex);
  1071. static __init void test_work(struct work_struct *dummy)
  1072. {
  1073. spin_lock(&test_spinlock);
  1074. spin_lock_irq(&test_spinlock_irq);
  1075. udelay(1);
  1076. spin_unlock_irq(&test_spinlock_irq);
  1077. spin_unlock(&test_spinlock);
  1078. mutex_lock(&test_mutex);
  1079. msleep(1);
  1080. mutex_unlock(&test_mutex);
  1081. }
  1082. static __init int event_test_thread(void *unused)
  1083. {
  1084. void *test_malloc;
  1085. test_malloc = kmalloc(1234, GFP_KERNEL);
  1086. if (!test_malloc)
  1087. pr_info("failed to kmalloc\n");
  1088. schedule_on_each_cpu(test_work);
  1089. kfree(test_malloc);
  1090. set_current_state(TASK_INTERRUPTIBLE);
  1091. while (!kthread_should_stop())
  1092. schedule();
  1093. return 0;
  1094. }
  1095. /*
  1096. * Do various things that may trigger events.
  1097. */
  1098. static __init void event_test_stuff(void)
  1099. {
  1100. struct task_struct *test_thread;
  1101. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  1102. msleep(1);
  1103. kthread_stop(test_thread);
  1104. }
  1105. /*
  1106. * For every trace event defined, we will test each trace point separately,
  1107. * and then by groups, and finally all trace points.
  1108. */
  1109. static __init void event_trace_self_tests(void)
  1110. {
  1111. struct ftrace_event_call *call;
  1112. struct event_subsystem *system;
  1113. int ret;
  1114. pr_info("Running tests on trace events:\n");
  1115. list_for_each_entry(call, &ftrace_events, list) {
  1116. /* Only test those that have a regfunc */
  1117. if (!call->regfunc)
  1118. continue;
  1119. /*
  1120. * Testing syscall events here is pretty useless, but
  1121. * we still do it if configured. But this is time consuming.
  1122. * What we really need is a user thread to perform the
  1123. * syscalls as we test.
  1124. */
  1125. #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
  1126. if (call->system &&
  1127. strcmp(call->system, "syscalls") == 0)
  1128. continue;
  1129. #endif
  1130. pr_info("Testing event %s: ", call->name);
  1131. /*
  1132. * If an event is already enabled, someone is using
  1133. * it and the self test should not be on.
  1134. */
  1135. if (call->enabled) {
  1136. pr_warning("Enabled event during self test!\n");
  1137. WARN_ON_ONCE(1);
  1138. continue;
  1139. }
  1140. ftrace_event_enable_disable(call, 1);
  1141. event_test_stuff();
  1142. ftrace_event_enable_disable(call, 0);
  1143. pr_cont("OK\n");
  1144. }
  1145. /* Now test at the sub system level */
  1146. pr_info("Running tests on trace event systems:\n");
  1147. list_for_each_entry(system, &event_subsystems, list) {
  1148. /* the ftrace system is special, skip it */
  1149. if (strcmp(system->name, "ftrace") == 0)
  1150. continue;
  1151. pr_info("Testing event system %s: ", system->name);
  1152. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
  1153. if (WARN_ON_ONCE(ret)) {
  1154. pr_warning("error enabling system %s\n",
  1155. system->name);
  1156. continue;
  1157. }
  1158. event_test_stuff();
  1159. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
  1160. if (WARN_ON_ONCE(ret))
  1161. pr_warning("error disabling system %s\n",
  1162. system->name);
  1163. pr_cont("OK\n");
  1164. }
  1165. /* Test with all events enabled */
  1166. pr_info("Running tests on all trace events:\n");
  1167. pr_info("Testing all events: ");
  1168. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
  1169. if (WARN_ON_ONCE(ret)) {
  1170. pr_warning("error enabling all events\n");
  1171. return;
  1172. }
  1173. event_test_stuff();
  1174. /* reset sysname */
  1175. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
  1176. if (WARN_ON_ONCE(ret)) {
  1177. pr_warning("error disabling all events\n");
  1178. return;
  1179. }
  1180. pr_cont("OK\n");
  1181. }
  1182. #ifdef CONFIG_FUNCTION_TRACER
  1183. static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
  1184. static void
  1185. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1186. {
  1187. struct ring_buffer_event *event;
  1188. struct ring_buffer *buffer;
  1189. struct ftrace_entry *entry;
  1190. unsigned long flags;
  1191. long disabled;
  1192. int resched;
  1193. int cpu;
  1194. int pc;
  1195. pc = preempt_count();
  1196. resched = ftrace_preempt_disable();
  1197. cpu = raw_smp_processor_id();
  1198. disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
  1199. if (disabled != 1)
  1200. goto out;
  1201. local_save_flags(flags);
  1202. event = trace_current_buffer_lock_reserve(&buffer,
  1203. TRACE_FN, sizeof(*entry),
  1204. flags, pc);
  1205. if (!event)
  1206. goto out;
  1207. entry = ring_buffer_event_data(event);
  1208. entry->ip = ip;
  1209. entry->parent_ip = parent_ip;
  1210. trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
  1211. out:
  1212. atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
  1213. ftrace_preempt_enable(resched);
  1214. }
  1215. static struct ftrace_ops trace_ops __initdata =
  1216. {
  1217. .func = function_test_events_call,
  1218. };
  1219. static __init void event_trace_self_test_with_function(void)
  1220. {
  1221. register_ftrace_function(&trace_ops);
  1222. pr_info("Running tests again, along with the function tracer\n");
  1223. event_trace_self_tests();
  1224. unregister_ftrace_function(&trace_ops);
  1225. }
  1226. #else
  1227. static __init void event_trace_self_test_with_function(void)
  1228. {
  1229. }
  1230. #endif
  1231. static __init int event_trace_self_tests_init(void)
  1232. {
  1233. if (!tracing_selftest_disabled) {
  1234. event_trace_self_tests();
  1235. event_trace_self_test_with_function();
  1236. }
  1237. return 0;
  1238. }
  1239. late_initcall(event_trace_self_tests_init);
  1240. #endif