trace_events.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include <asm/setup.h>
  19. #include "trace_output.h"
  20. #undef TRACE_SYSTEM
  21. #define TRACE_SYSTEM "TRACE_SYSTEM"
  22. DEFINE_MUTEX(event_mutex);
  23. LIST_HEAD(ftrace_events);
  24. int trace_define_field(struct ftrace_event_call *call, const char *type,
  25. const char *name, int offset, int size, int is_signed,
  26. int filter_type)
  27. {
  28. struct ftrace_event_field *field;
  29. field = kzalloc(sizeof(*field), GFP_KERNEL);
  30. if (!field)
  31. goto err;
  32. field->name = kstrdup(name, GFP_KERNEL);
  33. if (!field->name)
  34. goto err;
  35. field->type = kstrdup(type, GFP_KERNEL);
  36. if (!field->type)
  37. goto err;
  38. if (filter_type == FILTER_OTHER)
  39. field->filter_type = filter_assign_type(type);
  40. else
  41. field->filter_type = filter_type;
  42. field->offset = offset;
  43. field->size = size;
  44. field->is_signed = is_signed;
  45. list_add(&field->link, &call->fields);
  46. return 0;
  47. err:
  48. if (field) {
  49. kfree(field->name);
  50. kfree(field->type);
  51. }
  52. kfree(field);
  53. return -ENOMEM;
  54. }
  55. EXPORT_SYMBOL_GPL(trace_define_field);
  56. #define __common_field(type, item) \
  57. ret = trace_define_field(call, #type, "common_" #item, \
  58. offsetof(typeof(ent), item), \
  59. sizeof(ent.item), \
  60. is_signed_type(type), FILTER_OTHER); \
  61. if (ret) \
  62. return ret;
  63. int trace_define_common_fields(struct ftrace_event_call *call)
  64. {
  65. int ret;
  66. struct trace_entry ent;
  67. __common_field(unsigned short, type);
  68. __common_field(unsigned char, flags);
  69. __common_field(unsigned char, preempt_count);
  70. __common_field(int, pid);
  71. __common_field(int, lock_depth);
  72. return ret;
  73. }
  74. EXPORT_SYMBOL_GPL(trace_define_common_fields);
  75. void trace_destroy_fields(struct ftrace_event_call *call)
  76. {
  77. struct ftrace_event_field *field, *next;
  78. list_for_each_entry_safe(field, next, &call->fields, link) {
  79. list_del(&field->link);
  80. kfree(field->type);
  81. kfree(field->name);
  82. kfree(field);
  83. }
  84. }
  85. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  86. int enable)
  87. {
  88. switch (enable) {
  89. case 0:
  90. if (call->enabled) {
  91. call->enabled = 0;
  92. tracing_stop_cmdline_record();
  93. call->unregfunc(call);
  94. }
  95. break;
  96. case 1:
  97. if (!call->enabled) {
  98. call->enabled = 1;
  99. tracing_start_cmdline_record();
  100. call->regfunc(call);
  101. }
  102. break;
  103. }
  104. }
  105. static void ftrace_clear_events(void)
  106. {
  107. struct ftrace_event_call *call;
  108. mutex_lock(&event_mutex);
  109. list_for_each_entry(call, &ftrace_events, list) {
  110. ftrace_event_enable_disable(call, 0);
  111. }
  112. mutex_unlock(&event_mutex);
  113. }
  114. /*
  115. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  116. */
  117. static int __ftrace_set_clr_event(const char *match, const char *sub,
  118. const char *event, int set)
  119. {
  120. struct ftrace_event_call *call;
  121. int ret = -EINVAL;
  122. mutex_lock(&event_mutex);
  123. list_for_each_entry(call, &ftrace_events, list) {
  124. if (!call->name || !call->regfunc)
  125. continue;
  126. if (match &&
  127. strcmp(match, call->name) != 0 &&
  128. strcmp(match, call->system) != 0)
  129. continue;
  130. if (sub && strcmp(sub, call->system) != 0)
  131. continue;
  132. if (event && strcmp(event, call->name) != 0)
  133. continue;
  134. ftrace_event_enable_disable(call, set);
  135. ret = 0;
  136. }
  137. mutex_unlock(&event_mutex);
  138. return ret;
  139. }
  140. static int ftrace_set_clr_event(char *buf, int set)
  141. {
  142. char *event = NULL, *sub = NULL, *match;
  143. /*
  144. * The buf format can be <subsystem>:<event-name>
  145. * *:<event-name> means any event by that name.
  146. * :<event-name> is the same.
  147. *
  148. * <subsystem>:* means all events in that subsystem
  149. * <subsystem>: means the same.
  150. *
  151. * <name> (no ':') means all events in a subsystem with
  152. * the name <name> or any event that matches <name>
  153. */
  154. match = strsep(&buf, ":");
  155. if (buf) {
  156. sub = match;
  157. event = buf;
  158. match = NULL;
  159. if (!strlen(sub) || strcmp(sub, "*") == 0)
  160. sub = NULL;
  161. if (!strlen(event) || strcmp(event, "*") == 0)
  162. event = NULL;
  163. }
  164. return __ftrace_set_clr_event(match, sub, event, set);
  165. }
  166. /**
  167. * trace_set_clr_event - enable or disable an event
  168. * @system: system name to match (NULL for any system)
  169. * @event: event name to match (NULL for all events, within system)
  170. * @set: 1 to enable, 0 to disable
  171. *
  172. * This is a way for other parts of the kernel to enable or disable
  173. * event recording.
  174. *
  175. * Returns 0 on success, -EINVAL if the parameters do not match any
  176. * registered events.
  177. */
  178. int trace_set_clr_event(const char *system, const char *event, int set)
  179. {
  180. return __ftrace_set_clr_event(NULL, system, event, set);
  181. }
  182. /* 128 should be much more than enough */
  183. #define EVENT_BUF_SIZE 127
  184. static ssize_t
  185. ftrace_event_write(struct file *file, const char __user *ubuf,
  186. size_t cnt, loff_t *ppos)
  187. {
  188. struct trace_parser parser;
  189. ssize_t read, ret;
  190. if (!cnt)
  191. return 0;
  192. ret = tracing_update_buffers();
  193. if (ret < 0)
  194. return ret;
  195. if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
  196. return -ENOMEM;
  197. read = trace_get_user(&parser, ubuf, cnt, ppos);
  198. if (read >= 0 && trace_parser_loaded((&parser))) {
  199. int set = 1;
  200. if (*parser.buffer == '!')
  201. set = 0;
  202. parser.buffer[parser.idx] = 0;
  203. ret = ftrace_set_clr_event(parser.buffer + !set, set);
  204. if (ret)
  205. goto out_put;
  206. }
  207. ret = read;
  208. out_put:
  209. trace_parser_put(&parser);
  210. return ret;
  211. }
  212. static void *
  213. t_next(struct seq_file *m, void *v, loff_t *pos)
  214. {
  215. struct ftrace_event_call *call = v;
  216. (*pos)++;
  217. list_for_each_entry_continue(call, &ftrace_events, list) {
  218. /*
  219. * The ftrace subsystem is for showing formats only.
  220. * They can not be enabled or disabled via the event files.
  221. */
  222. if (call->regfunc)
  223. return call;
  224. }
  225. return NULL;
  226. }
  227. static void *t_start(struct seq_file *m, loff_t *pos)
  228. {
  229. struct ftrace_event_call *call;
  230. loff_t l;
  231. mutex_lock(&event_mutex);
  232. call = list_entry(&ftrace_events, struct ftrace_event_call, list);
  233. for (l = 0; l <= *pos; ) {
  234. call = t_next(m, call, &l);
  235. if (!call)
  236. break;
  237. }
  238. return call;
  239. }
  240. static void *
  241. s_next(struct seq_file *m, void *v, loff_t *pos)
  242. {
  243. struct ftrace_event_call *call = v;
  244. (*pos)++;
  245. list_for_each_entry_continue(call, &ftrace_events, list) {
  246. if (call->enabled)
  247. return call;
  248. }
  249. return NULL;
  250. }
  251. static void *s_start(struct seq_file *m, loff_t *pos)
  252. {
  253. struct ftrace_event_call *call;
  254. loff_t l;
  255. mutex_lock(&event_mutex);
  256. call = list_entry(&ftrace_events, struct ftrace_event_call, list);
  257. for (l = 0; l <= *pos; ) {
  258. call = s_next(m, call, &l);
  259. if (!call)
  260. break;
  261. }
  262. return call;
  263. }
  264. static int t_show(struct seq_file *m, void *v)
  265. {
  266. struct ftrace_event_call *call = v;
  267. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  268. seq_printf(m, "%s:", call->system);
  269. seq_printf(m, "%s\n", call->name);
  270. return 0;
  271. }
  272. static void t_stop(struct seq_file *m, void *p)
  273. {
  274. mutex_unlock(&event_mutex);
  275. }
  276. static int
  277. ftrace_event_seq_open(struct inode *inode, struct file *file)
  278. {
  279. const struct seq_operations *seq_ops;
  280. if ((file->f_mode & FMODE_WRITE) &&
  281. (file->f_flags & O_TRUNC))
  282. ftrace_clear_events();
  283. seq_ops = inode->i_private;
  284. return seq_open(file, seq_ops);
  285. }
  286. static ssize_t
  287. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  288. loff_t *ppos)
  289. {
  290. struct ftrace_event_call *call = filp->private_data;
  291. char *buf;
  292. if (call->enabled)
  293. buf = "1\n";
  294. else
  295. buf = "0\n";
  296. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  297. }
  298. static ssize_t
  299. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  300. loff_t *ppos)
  301. {
  302. struct ftrace_event_call *call = filp->private_data;
  303. char buf[64];
  304. unsigned long val;
  305. int ret;
  306. if (cnt >= sizeof(buf))
  307. return -EINVAL;
  308. if (copy_from_user(&buf, ubuf, cnt))
  309. return -EFAULT;
  310. buf[cnt] = 0;
  311. ret = strict_strtoul(buf, 10, &val);
  312. if (ret < 0)
  313. return ret;
  314. ret = tracing_update_buffers();
  315. if (ret < 0)
  316. return ret;
  317. switch (val) {
  318. case 0:
  319. case 1:
  320. mutex_lock(&event_mutex);
  321. ftrace_event_enable_disable(call, val);
  322. mutex_unlock(&event_mutex);
  323. break;
  324. default:
  325. return -EINVAL;
  326. }
  327. *ppos += cnt;
  328. return cnt;
  329. }
  330. static ssize_t
  331. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  332. loff_t *ppos)
  333. {
  334. const char set_to_char[4] = { '?', '0', '1', 'X' };
  335. const char *system = filp->private_data;
  336. struct ftrace_event_call *call;
  337. char buf[2];
  338. int set = 0;
  339. int ret;
  340. mutex_lock(&event_mutex);
  341. list_for_each_entry(call, &ftrace_events, list) {
  342. if (!call->name || !call->regfunc)
  343. continue;
  344. if (system && strcmp(call->system, system) != 0)
  345. continue;
  346. /*
  347. * We need to find out if all the events are set
  348. * or if all events or cleared, or if we have
  349. * a mixture.
  350. */
  351. set |= (1 << !!call->enabled);
  352. /*
  353. * If we have a mixture, no need to look further.
  354. */
  355. if (set == 3)
  356. break;
  357. }
  358. mutex_unlock(&event_mutex);
  359. buf[0] = set_to_char[set];
  360. buf[1] = '\n';
  361. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  362. return ret;
  363. }
  364. static ssize_t
  365. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  366. loff_t *ppos)
  367. {
  368. const char *system = filp->private_data;
  369. unsigned long val;
  370. char buf[64];
  371. ssize_t ret;
  372. if (cnt >= sizeof(buf))
  373. return -EINVAL;
  374. if (copy_from_user(&buf, ubuf, cnt))
  375. return -EFAULT;
  376. buf[cnt] = 0;
  377. ret = strict_strtoul(buf, 10, &val);
  378. if (ret < 0)
  379. return ret;
  380. ret = tracing_update_buffers();
  381. if (ret < 0)
  382. return ret;
  383. if (val != 0 && val != 1)
  384. return -EINVAL;
  385. ret = __ftrace_set_clr_event(NULL, system, NULL, val);
  386. if (ret)
  387. goto out;
  388. ret = cnt;
  389. out:
  390. *ppos += cnt;
  391. return ret;
  392. }
  393. extern char *__bad_type_size(void);
  394. #undef FIELD
  395. #define FIELD(type, name) \
  396. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  397. #type, "common_" #name, offsetof(typeof(field), name), \
  398. sizeof(field.name), is_signed_type(type)
  399. static int trace_write_header(struct trace_seq *s)
  400. {
  401. struct trace_entry field;
  402. /* struct trace_entry */
  403. return trace_seq_printf(s,
  404. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
  405. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
  406. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
  407. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
  408. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
  409. "\n",
  410. FIELD(unsigned short, type),
  411. FIELD(unsigned char, flags),
  412. FIELD(unsigned char, preempt_count),
  413. FIELD(int, pid),
  414. FIELD(int, lock_depth));
  415. }
  416. static ssize_t
  417. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  418. loff_t *ppos)
  419. {
  420. struct ftrace_event_call *call = filp->private_data;
  421. struct trace_seq *s;
  422. char *buf;
  423. int r;
  424. if (*ppos)
  425. return 0;
  426. s = kmalloc(sizeof(*s), GFP_KERNEL);
  427. if (!s)
  428. return -ENOMEM;
  429. trace_seq_init(s);
  430. /* If any of the first writes fail, so will the show_format. */
  431. trace_seq_printf(s, "name: %s\n", call->name);
  432. trace_seq_printf(s, "ID: %d\n", call->id);
  433. trace_seq_printf(s, "format:\n");
  434. trace_write_header(s);
  435. r = call->show_format(call, s);
  436. if (!r) {
  437. /*
  438. * ug! The format output is bigger than a PAGE!!
  439. */
  440. buf = "FORMAT TOO BIG\n";
  441. r = simple_read_from_buffer(ubuf, cnt, ppos,
  442. buf, strlen(buf));
  443. goto out;
  444. }
  445. r = simple_read_from_buffer(ubuf, cnt, ppos,
  446. s->buffer, s->len);
  447. out:
  448. kfree(s);
  449. return r;
  450. }
  451. static ssize_t
  452. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  453. {
  454. struct ftrace_event_call *call = filp->private_data;
  455. struct trace_seq *s;
  456. int r;
  457. if (*ppos)
  458. return 0;
  459. s = kmalloc(sizeof(*s), GFP_KERNEL);
  460. if (!s)
  461. return -ENOMEM;
  462. trace_seq_init(s);
  463. trace_seq_printf(s, "%d\n", call->id);
  464. r = simple_read_from_buffer(ubuf, cnt, ppos,
  465. s->buffer, s->len);
  466. kfree(s);
  467. return r;
  468. }
  469. static ssize_t
  470. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  471. loff_t *ppos)
  472. {
  473. struct ftrace_event_call *call = filp->private_data;
  474. struct trace_seq *s;
  475. int r;
  476. if (*ppos)
  477. return 0;
  478. s = kmalloc(sizeof(*s), GFP_KERNEL);
  479. if (!s)
  480. return -ENOMEM;
  481. trace_seq_init(s);
  482. print_event_filter(call, s);
  483. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  484. kfree(s);
  485. return r;
  486. }
  487. static ssize_t
  488. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  489. loff_t *ppos)
  490. {
  491. struct ftrace_event_call *call = filp->private_data;
  492. char *buf;
  493. int err;
  494. if (cnt >= PAGE_SIZE)
  495. return -EINVAL;
  496. buf = (char *)__get_free_page(GFP_TEMPORARY);
  497. if (!buf)
  498. return -ENOMEM;
  499. if (copy_from_user(buf, ubuf, cnt)) {
  500. free_page((unsigned long) buf);
  501. return -EFAULT;
  502. }
  503. buf[cnt] = '\0';
  504. err = apply_event_filter(call, buf);
  505. free_page((unsigned long) buf);
  506. if (err < 0)
  507. return err;
  508. *ppos += cnt;
  509. return cnt;
  510. }
  511. static ssize_t
  512. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  513. loff_t *ppos)
  514. {
  515. struct event_subsystem *system = filp->private_data;
  516. struct trace_seq *s;
  517. int r;
  518. if (*ppos)
  519. return 0;
  520. s = kmalloc(sizeof(*s), GFP_KERNEL);
  521. if (!s)
  522. return -ENOMEM;
  523. trace_seq_init(s);
  524. print_subsystem_event_filter(system, s);
  525. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  526. kfree(s);
  527. return r;
  528. }
  529. static ssize_t
  530. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  531. loff_t *ppos)
  532. {
  533. struct event_subsystem *system = filp->private_data;
  534. char *buf;
  535. int err;
  536. if (cnt >= PAGE_SIZE)
  537. return -EINVAL;
  538. buf = (char *)__get_free_page(GFP_TEMPORARY);
  539. if (!buf)
  540. return -ENOMEM;
  541. if (copy_from_user(buf, ubuf, cnt)) {
  542. free_page((unsigned long) buf);
  543. return -EFAULT;
  544. }
  545. buf[cnt] = '\0';
  546. err = apply_subsystem_event_filter(system, buf);
  547. free_page((unsigned long) buf);
  548. if (err < 0)
  549. return err;
  550. *ppos += cnt;
  551. return cnt;
  552. }
  553. static ssize_t
  554. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  555. {
  556. int (*func)(struct trace_seq *s) = filp->private_data;
  557. struct trace_seq *s;
  558. int r;
  559. if (*ppos)
  560. return 0;
  561. s = kmalloc(sizeof(*s), GFP_KERNEL);
  562. if (!s)
  563. return -ENOMEM;
  564. trace_seq_init(s);
  565. func(s);
  566. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  567. kfree(s);
  568. return r;
  569. }
  570. static const struct seq_operations show_event_seq_ops = {
  571. .start = t_start,
  572. .next = t_next,
  573. .show = t_show,
  574. .stop = t_stop,
  575. };
  576. static const struct seq_operations show_set_event_seq_ops = {
  577. .start = s_start,
  578. .next = s_next,
  579. .show = t_show,
  580. .stop = t_stop,
  581. };
  582. static const struct file_operations ftrace_avail_fops = {
  583. .open = ftrace_event_seq_open,
  584. .read = seq_read,
  585. .llseek = seq_lseek,
  586. .release = seq_release,
  587. };
  588. static const struct file_operations ftrace_set_event_fops = {
  589. .open = ftrace_event_seq_open,
  590. .read = seq_read,
  591. .write = ftrace_event_write,
  592. .llseek = seq_lseek,
  593. .release = seq_release,
  594. };
  595. static const struct file_operations ftrace_enable_fops = {
  596. .open = tracing_open_generic,
  597. .read = event_enable_read,
  598. .write = event_enable_write,
  599. };
  600. static const struct file_operations ftrace_event_format_fops = {
  601. .open = tracing_open_generic,
  602. .read = event_format_read,
  603. };
  604. static const struct file_operations ftrace_event_id_fops = {
  605. .open = tracing_open_generic,
  606. .read = event_id_read,
  607. };
  608. static const struct file_operations ftrace_event_filter_fops = {
  609. .open = tracing_open_generic,
  610. .read = event_filter_read,
  611. .write = event_filter_write,
  612. };
  613. static const struct file_operations ftrace_subsystem_filter_fops = {
  614. .open = tracing_open_generic,
  615. .read = subsystem_filter_read,
  616. .write = subsystem_filter_write,
  617. };
  618. static const struct file_operations ftrace_system_enable_fops = {
  619. .open = tracing_open_generic,
  620. .read = system_enable_read,
  621. .write = system_enable_write,
  622. };
  623. static const struct file_operations ftrace_show_header_fops = {
  624. .open = tracing_open_generic,
  625. .read = show_header,
  626. };
  627. static struct dentry *event_trace_events_dir(void)
  628. {
  629. static struct dentry *d_tracer;
  630. static struct dentry *d_events;
  631. if (d_events)
  632. return d_events;
  633. d_tracer = tracing_init_dentry();
  634. if (!d_tracer)
  635. return NULL;
  636. d_events = debugfs_create_dir("events", d_tracer);
  637. if (!d_events)
  638. pr_warning("Could not create debugfs "
  639. "'events' directory\n");
  640. return d_events;
  641. }
  642. static LIST_HEAD(event_subsystems);
  643. static struct dentry *
  644. event_subsystem_dir(const char *name, struct dentry *d_events)
  645. {
  646. struct event_subsystem *system;
  647. struct dentry *entry;
  648. /* First see if we did not already create this dir */
  649. list_for_each_entry(system, &event_subsystems, list) {
  650. if (strcmp(system->name, name) == 0) {
  651. system->nr_events++;
  652. return system->entry;
  653. }
  654. }
  655. /* need to create new entry */
  656. system = kmalloc(sizeof(*system), GFP_KERNEL);
  657. if (!system) {
  658. pr_warning("No memory to create event subsystem %s\n",
  659. name);
  660. return d_events;
  661. }
  662. system->entry = debugfs_create_dir(name, d_events);
  663. if (!system->entry) {
  664. pr_warning("Could not create event subsystem %s\n",
  665. name);
  666. kfree(system);
  667. return d_events;
  668. }
  669. system->nr_events = 1;
  670. system->name = kstrdup(name, GFP_KERNEL);
  671. if (!system->name) {
  672. debugfs_remove(system->entry);
  673. kfree(system);
  674. return d_events;
  675. }
  676. list_add(&system->list, &event_subsystems);
  677. system->filter = NULL;
  678. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  679. if (!system->filter) {
  680. pr_warning("Could not allocate filter for subsystem "
  681. "'%s'\n", name);
  682. return system->entry;
  683. }
  684. entry = debugfs_create_file("filter", 0644, system->entry, system,
  685. &ftrace_subsystem_filter_fops);
  686. if (!entry) {
  687. kfree(system->filter);
  688. system->filter = NULL;
  689. pr_warning("Could not create debugfs "
  690. "'%s/filter' entry\n", name);
  691. }
  692. trace_create_file("enable", 0644, system->entry,
  693. (void *)system->name,
  694. &ftrace_system_enable_fops);
  695. return system->entry;
  696. }
  697. static int
  698. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  699. const struct file_operations *id,
  700. const struct file_operations *enable,
  701. const struct file_operations *filter,
  702. const struct file_operations *format)
  703. {
  704. int ret;
  705. /*
  706. * If the trace point header did not define TRACE_SYSTEM
  707. * then the system would be called "TRACE_SYSTEM".
  708. */
  709. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  710. d_events = event_subsystem_dir(call->system, d_events);
  711. call->dir = debugfs_create_dir(call->name, d_events);
  712. if (!call->dir) {
  713. pr_warning("Could not create debugfs "
  714. "'%s' directory\n", call->name);
  715. return -1;
  716. }
  717. if (call->regfunc)
  718. trace_create_file("enable", 0644, call->dir, call,
  719. enable);
  720. if (call->id && call->profile_enable)
  721. trace_create_file("id", 0444, call->dir, call,
  722. id);
  723. if (call->define_fields) {
  724. ret = call->define_fields(call);
  725. if (ret < 0) {
  726. pr_warning("Could not initialize trace point"
  727. " events/%s\n", call->name);
  728. return ret;
  729. }
  730. trace_create_file("filter", 0644, call->dir, call,
  731. filter);
  732. }
  733. /* A trace may not want to export its format */
  734. if (!call->show_format)
  735. return 0;
  736. trace_create_file("format", 0444, call->dir, call,
  737. format);
  738. return 0;
  739. }
  740. static int __trace_add_event_call(struct ftrace_event_call *call)
  741. {
  742. struct dentry *d_events;
  743. int ret;
  744. if (!call->name)
  745. return -EINVAL;
  746. if (call->raw_init) {
  747. ret = call->raw_init(call);
  748. if (ret < 0) {
  749. if (ret != -ENOSYS)
  750. pr_warning("Could not initialize trace "
  751. "events/%s\n", call->name);
  752. return ret;
  753. }
  754. }
  755. d_events = event_trace_events_dir();
  756. if (!d_events)
  757. return -ENOENT;
  758. ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
  759. &ftrace_enable_fops, &ftrace_event_filter_fops,
  760. &ftrace_event_format_fops);
  761. if (!ret)
  762. list_add(&call->list, &ftrace_events);
  763. return ret;
  764. }
  765. /* Add an additional event_call dynamically */
  766. int trace_add_event_call(struct ftrace_event_call *call)
  767. {
  768. int ret;
  769. mutex_lock(&event_mutex);
  770. ret = __trace_add_event_call(call);
  771. mutex_unlock(&event_mutex);
  772. return ret;
  773. }
  774. static void remove_subsystem_dir(const char *name)
  775. {
  776. struct event_subsystem *system;
  777. if (strcmp(name, TRACE_SYSTEM) == 0)
  778. return;
  779. list_for_each_entry(system, &event_subsystems, list) {
  780. if (strcmp(system->name, name) == 0) {
  781. if (!--system->nr_events) {
  782. struct event_filter *filter = system->filter;
  783. debugfs_remove_recursive(system->entry);
  784. list_del(&system->list);
  785. if (filter) {
  786. kfree(filter->filter_string);
  787. kfree(filter);
  788. }
  789. kfree(system->name);
  790. kfree(system);
  791. }
  792. break;
  793. }
  794. }
  795. }
  796. /*
  797. * Must be called under locking both of event_mutex and trace_event_mutex.
  798. */
  799. static void __trace_remove_event_call(struct ftrace_event_call *call)
  800. {
  801. ftrace_event_enable_disable(call, 0);
  802. if (call->event)
  803. __unregister_ftrace_event(call->event);
  804. debugfs_remove_recursive(call->dir);
  805. list_del(&call->list);
  806. trace_destroy_fields(call);
  807. destroy_preds(call);
  808. remove_subsystem_dir(call->system);
  809. }
  810. /* Remove an event_call */
  811. void trace_remove_event_call(struct ftrace_event_call *call)
  812. {
  813. mutex_lock(&event_mutex);
  814. down_write(&trace_event_mutex);
  815. __trace_remove_event_call(call);
  816. up_write(&trace_event_mutex);
  817. mutex_unlock(&event_mutex);
  818. }
  819. #define for_each_event(event, start, end) \
  820. for (event = start; \
  821. (unsigned long)event < (unsigned long)end; \
  822. event++)
  823. #ifdef CONFIG_MODULES
  824. static LIST_HEAD(ftrace_module_file_list);
  825. /*
  826. * Modules must own their file_operations to keep up with
  827. * reference counting.
  828. */
  829. struct ftrace_module_file_ops {
  830. struct list_head list;
  831. struct module *mod;
  832. struct file_operations id;
  833. struct file_operations enable;
  834. struct file_operations format;
  835. struct file_operations filter;
  836. };
  837. static struct ftrace_module_file_ops *
  838. trace_create_file_ops(struct module *mod)
  839. {
  840. struct ftrace_module_file_ops *file_ops;
  841. /*
  842. * This is a bit of a PITA. To allow for correct reference
  843. * counting, modules must "own" their file_operations.
  844. * To do this, we allocate the file operations that will be
  845. * used in the event directory.
  846. */
  847. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  848. if (!file_ops)
  849. return NULL;
  850. file_ops->mod = mod;
  851. file_ops->id = ftrace_event_id_fops;
  852. file_ops->id.owner = mod;
  853. file_ops->enable = ftrace_enable_fops;
  854. file_ops->enable.owner = mod;
  855. file_ops->filter = ftrace_event_filter_fops;
  856. file_ops->filter.owner = mod;
  857. file_ops->format = ftrace_event_format_fops;
  858. file_ops->format.owner = mod;
  859. list_add(&file_ops->list, &ftrace_module_file_list);
  860. return file_ops;
  861. }
  862. static void trace_module_add_events(struct module *mod)
  863. {
  864. struct ftrace_module_file_ops *file_ops = NULL;
  865. struct ftrace_event_call *call, *start, *end;
  866. struct dentry *d_events;
  867. int ret;
  868. start = mod->trace_events;
  869. end = mod->trace_events + mod->num_trace_events;
  870. if (start == end)
  871. return;
  872. d_events = event_trace_events_dir();
  873. if (!d_events)
  874. return;
  875. for_each_event(call, start, end) {
  876. /* The linker may leave blanks */
  877. if (!call->name)
  878. continue;
  879. if (call->raw_init) {
  880. ret = call->raw_init(call);
  881. if (ret < 0) {
  882. if (ret != -ENOSYS)
  883. pr_warning("Could not initialize trace "
  884. "point events/%s\n", call->name);
  885. continue;
  886. }
  887. }
  888. /*
  889. * This module has events, create file ops for this module
  890. * if not already done.
  891. */
  892. if (!file_ops) {
  893. file_ops = trace_create_file_ops(mod);
  894. if (!file_ops)
  895. return;
  896. }
  897. call->mod = mod;
  898. ret = event_create_dir(call, d_events,
  899. &file_ops->id, &file_ops->enable,
  900. &file_ops->filter, &file_ops->format);
  901. if (!ret)
  902. list_add(&call->list, &ftrace_events);
  903. }
  904. }
  905. static void trace_module_remove_events(struct module *mod)
  906. {
  907. struct ftrace_module_file_ops *file_ops;
  908. struct ftrace_event_call *call, *p;
  909. bool found = false;
  910. down_write(&trace_event_mutex);
  911. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  912. if (call->mod == mod) {
  913. found = true;
  914. __trace_remove_event_call(call);
  915. }
  916. }
  917. /* Now free the file_operations */
  918. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  919. if (file_ops->mod == mod)
  920. break;
  921. }
  922. if (&file_ops->list != &ftrace_module_file_list) {
  923. list_del(&file_ops->list);
  924. kfree(file_ops);
  925. }
  926. /*
  927. * It is safest to reset the ring buffer if the module being unloaded
  928. * registered any events.
  929. */
  930. if (found)
  931. tracing_reset_current_online_cpus();
  932. up_write(&trace_event_mutex);
  933. }
  934. static int trace_module_notify(struct notifier_block *self,
  935. unsigned long val, void *data)
  936. {
  937. struct module *mod = data;
  938. mutex_lock(&event_mutex);
  939. switch (val) {
  940. case MODULE_STATE_COMING:
  941. trace_module_add_events(mod);
  942. break;
  943. case MODULE_STATE_GOING:
  944. trace_module_remove_events(mod);
  945. break;
  946. }
  947. mutex_unlock(&event_mutex);
  948. return 0;
  949. }
  950. #else
  951. static int trace_module_notify(struct notifier_block *self,
  952. unsigned long val, void *data)
  953. {
  954. return 0;
  955. }
  956. #endif /* CONFIG_MODULES */
  957. static struct notifier_block trace_module_nb = {
  958. .notifier_call = trace_module_notify,
  959. .priority = 0,
  960. };
  961. extern struct ftrace_event_call __start_ftrace_events[];
  962. extern struct ftrace_event_call __stop_ftrace_events[];
  963. static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
  964. static __init int setup_trace_event(char *str)
  965. {
  966. strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
  967. ring_buffer_expanded = 1;
  968. tracing_selftest_disabled = 1;
  969. return 1;
  970. }
  971. __setup("trace_event=", setup_trace_event);
  972. static __init int event_trace_init(void)
  973. {
  974. struct ftrace_event_call *call;
  975. struct dentry *d_tracer;
  976. struct dentry *entry;
  977. struct dentry *d_events;
  978. int ret;
  979. char *buf = bootup_event_buf;
  980. char *token;
  981. d_tracer = tracing_init_dentry();
  982. if (!d_tracer)
  983. return 0;
  984. entry = debugfs_create_file("available_events", 0444, d_tracer,
  985. (void *)&show_event_seq_ops,
  986. &ftrace_avail_fops);
  987. if (!entry)
  988. pr_warning("Could not create debugfs "
  989. "'available_events' entry\n");
  990. entry = debugfs_create_file("set_event", 0644, d_tracer,
  991. (void *)&show_set_event_seq_ops,
  992. &ftrace_set_event_fops);
  993. if (!entry)
  994. pr_warning("Could not create debugfs "
  995. "'set_event' entry\n");
  996. d_events = event_trace_events_dir();
  997. if (!d_events)
  998. return 0;
  999. /* ring buffer internal formats */
  1000. trace_create_file("header_page", 0444, d_events,
  1001. ring_buffer_print_page_header,
  1002. &ftrace_show_header_fops);
  1003. trace_create_file("header_event", 0444, d_events,
  1004. ring_buffer_print_entry_header,
  1005. &ftrace_show_header_fops);
  1006. trace_create_file("enable", 0644, d_events,
  1007. NULL, &ftrace_system_enable_fops);
  1008. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  1009. /* The linker may leave blanks */
  1010. if (!call->name)
  1011. continue;
  1012. if (call->raw_init) {
  1013. ret = call->raw_init(call);
  1014. if (ret < 0) {
  1015. if (ret != -ENOSYS)
  1016. pr_warning("Could not initialize trace "
  1017. "point events/%s\n", call->name);
  1018. continue;
  1019. }
  1020. }
  1021. ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
  1022. &ftrace_enable_fops,
  1023. &ftrace_event_filter_fops,
  1024. &ftrace_event_format_fops);
  1025. if (!ret)
  1026. list_add(&call->list, &ftrace_events);
  1027. }
  1028. while (true) {
  1029. token = strsep(&buf, ",");
  1030. if (!token)
  1031. break;
  1032. if (!*token)
  1033. continue;
  1034. ret = ftrace_set_clr_event(token, 1);
  1035. if (ret)
  1036. pr_warning("Failed to enable trace event: %s\n", token);
  1037. }
  1038. ret = register_module_notifier(&trace_module_nb);
  1039. if (ret)
  1040. pr_warning("Failed to register trace events module notifier\n");
  1041. return 0;
  1042. }
  1043. fs_initcall(event_trace_init);
  1044. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1045. static DEFINE_SPINLOCK(test_spinlock);
  1046. static DEFINE_SPINLOCK(test_spinlock_irq);
  1047. static DEFINE_MUTEX(test_mutex);
  1048. static __init void test_work(struct work_struct *dummy)
  1049. {
  1050. spin_lock(&test_spinlock);
  1051. spin_lock_irq(&test_spinlock_irq);
  1052. udelay(1);
  1053. spin_unlock_irq(&test_spinlock_irq);
  1054. spin_unlock(&test_spinlock);
  1055. mutex_lock(&test_mutex);
  1056. msleep(1);
  1057. mutex_unlock(&test_mutex);
  1058. }
  1059. static __init int event_test_thread(void *unused)
  1060. {
  1061. void *test_malloc;
  1062. test_malloc = kmalloc(1234, GFP_KERNEL);
  1063. if (!test_malloc)
  1064. pr_info("failed to kmalloc\n");
  1065. schedule_on_each_cpu(test_work);
  1066. kfree(test_malloc);
  1067. set_current_state(TASK_INTERRUPTIBLE);
  1068. while (!kthread_should_stop())
  1069. schedule();
  1070. return 0;
  1071. }
  1072. /*
  1073. * Do various things that may trigger events.
  1074. */
  1075. static __init void event_test_stuff(void)
  1076. {
  1077. struct task_struct *test_thread;
  1078. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  1079. msleep(1);
  1080. kthread_stop(test_thread);
  1081. }
  1082. /*
  1083. * For every trace event defined, we will test each trace point separately,
  1084. * and then by groups, and finally all trace points.
  1085. */
  1086. static __init void event_trace_self_tests(void)
  1087. {
  1088. struct ftrace_event_call *call;
  1089. struct event_subsystem *system;
  1090. int ret;
  1091. pr_info("Running tests on trace events:\n");
  1092. list_for_each_entry(call, &ftrace_events, list) {
  1093. /* Only test those that have a regfunc */
  1094. if (!call->regfunc)
  1095. continue;
  1096. /*
  1097. * Testing syscall events here is pretty useless, but
  1098. * we still do it if configured. But this is time consuming.
  1099. * What we really need is a user thread to perform the
  1100. * syscalls as we test.
  1101. */
  1102. #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
  1103. if (call->system &&
  1104. strcmp(call->system, "syscalls") == 0)
  1105. continue;
  1106. #endif
  1107. pr_info("Testing event %s: ", call->name);
  1108. /*
  1109. * If an event is already enabled, someone is using
  1110. * it and the self test should not be on.
  1111. */
  1112. if (call->enabled) {
  1113. pr_warning("Enabled event during self test!\n");
  1114. WARN_ON_ONCE(1);
  1115. continue;
  1116. }
  1117. ftrace_event_enable_disable(call, 1);
  1118. event_test_stuff();
  1119. ftrace_event_enable_disable(call, 0);
  1120. pr_cont("OK\n");
  1121. }
  1122. /* Now test at the sub system level */
  1123. pr_info("Running tests on trace event systems:\n");
  1124. list_for_each_entry(system, &event_subsystems, list) {
  1125. /* the ftrace system is special, skip it */
  1126. if (strcmp(system->name, "ftrace") == 0)
  1127. continue;
  1128. pr_info("Testing event system %s: ", system->name);
  1129. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
  1130. if (WARN_ON_ONCE(ret)) {
  1131. pr_warning("error enabling system %s\n",
  1132. system->name);
  1133. continue;
  1134. }
  1135. event_test_stuff();
  1136. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
  1137. if (WARN_ON_ONCE(ret))
  1138. pr_warning("error disabling system %s\n",
  1139. system->name);
  1140. pr_cont("OK\n");
  1141. }
  1142. /* Test with all events enabled */
  1143. pr_info("Running tests on all trace events:\n");
  1144. pr_info("Testing all events: ");
  1145. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
  1146. if (WARN_ON_ONCE(ret)) {
  1147. pr_warning("error enabling all events\n");
  1148. return;
  1149. }
  1150. event_test_stuff();
  1151. /* reset sysname */
  1152. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
  1153. if (WARN_ON_ONCE(ret)) {
  1154. pr_warning("error disabling all events\n");
  1155. return;
  1156. }
  1157. pr_cont("OK\n");
  1158. }
  1159. #ifdef CONFIG_FUNCTION_TRACER
  1160. static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
  1161. static void
  1162. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1163. {
  1164. struct ring_buffer_event *event;
  1165. struct ring_buffer *buffer;
  1166. struct ftrace_entry *entry;
  1167. unsigned long flags;
  1168. long disabled;
  1169. int resched;
  1170. int cpu;
  1171. int pc;
  1172. pc = preempt_count();
  1173. resched = ftrace_preempt_disable();
  1174. cpu = raw_smp_processor_id();
  1175. disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
  1176. if (disabled != 1)
  1177. goto out;
  1178. local_save_flags(flags);
  1179. event = trace_current_buffer_lock_reserve(&buffer,
  1180. TRACE_FN, sizeof(*entry),
  1181. flags, pc);
  1182. if (!event)
  1183. goto out;
  1184. entry = ring_buffer_event_data(event);
  1185. entry->ip = ip;
  1186. entry->parent_ip = parent_ip;
  1187. trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
  1188. out:
  1189. atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
  1190. ftrace_preempt_enable(resched);
  1191. }
  1192. static struct ftrace_ops trace_ops __initdata =
  1193. {
  1194. .func = function_test_events_call,
  1195. };
  1196. static __init void event_trace_self_test_with_function(void)
  1197. {
  1198. register_ftrace_function(&trace_ops);
  1199. pr_info("Running tests again, along with the function tracer\n");
  1200. event_trace_self_tests();
  1201. unregister_ftrace_function(&trace_ops);
  1202. }
  1203. #else
  1204. static __init void event_trace_self_test_with_function(void)
  1205. {
  1206. }
  1207. #endif
  1208. static __init int event_trace_self_tests_init(void)
  1209. {
  1210. if (!tracing_selftest_disabled) {
  1211. event_trace_self_tests();
  1212. event_trace_self_test_with_function();
  1213. }
  1214. return 0;
  1215. }
  1216. late_initcall(event_trace_self_tests_init);
  1217. #endif