trace_events.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include <asm/setup.h>
  19. #include "trace_output.h"
  20. #undef TRACE_SYSTEM
  21. #define TRACE_SYSTEM "TRACE_SYSTEM"
  22. DEFINE_MUTEX(event_mutex);
  23. LIST_HEAD(ftrace_events);
  24. int trace_define_field(struct ftrace_event_call *call, const char *type,
  25. const char *name, int offset, int size, int is_signed,
  26. int filter_type)
  27. {
  28. struct ftrace_event_field *field;
  29. field = kzalloc(sizeof(*field), GFP_KERNEL);
  30. if (!field)
  31. goto err;
  32. field->name = kstrdup(name, GFP_KERNEL);
  33. if (!field->name)
  34. goto err;
  35. field->type = kstrdup(type, GFP_KERNEL);
  36. if (!field->type)
  37. goto err;
  38. if (filter_type == FILTER_OTHER)
  39. field->filter_type = filter_assign_type(type);
  40. else
  41. field->filter_type = filter_type;
  42. field->offset = offset;
  43. field->size = size;
  44. field->is_signed = is_signed;
  45. list_add(&field->link, &call->fields);
  46. return 0;
  47. err:
  48. if (field) {
  49. kfree(field->name);
  50. kfree(field->type);
  51. }
  52. kfree(field);
  53. return -ENOMEM;
  54. }
  55. EXPORT_SYMBOL_GPL(trace_define_field);
  56. #define __common_field(type, item) \
  57. ret = trace_define_field(call, #type, "common_" #item, \
  58. offsetof(typeof(ent), item), \
  59. sizeof(ent.item), \
  60. is_signed_type(type), FILTER_OTHER); \
  61. if (ret) \
  62. return ret;
  63. int trace_define_common_fields(struct ftrace_event_call *call)
  64. {
  65. int ret;
  66. struct trace_entry ent;
  67. __common_field(unsigned short, type);
  68. __common_field(unsigned char, flags);
  69. __common_field(unsigned char, preempt_count);
  70. __common_field(int, pid);
  71. __common_field(int, lock_depth);
  72. return ret;
  73. }
  74. EXPORT_SYMBOL_GPL(trace_define_common_fields);
  75. #ifdef CONFIG_MODULES
  76. static void trace_destroy_fields(struct ftrace_event_call *call)
  77. {
  78. struct ftrace_event_field *field, *next;
  79. list_for_each_entry_safe(field, next, &call->fields, link) {
  80. list_del(&field->link);
  81. kfree(field->type);
  82. kfree(field->name);
  83. kfree(field);
  84. }
  85. }
  86. #endif /* CONFIG_MODULES */
  87. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  88. int enable)
  89. {
  90. switch (enable) {
  91. case 0:
  92. if (call->enabled) {
  93. call->enabled = 0;
  94. tracing_stop_cmdline_record();
  95. call->unregfunc(call->data);
  96. }
  97. break;
  98. case 1:
  99. if (!call->enabled) {
  100. call->enabled = 1;
  101. tracing_start_cmdline_record();
  102. call->regfunc(call->data);
  103. }
  104. break;
  105. }
  106. }
  107. static void ftrace_clear_events(void)
  108. {
  109. struct ftrace_event_call *call;
  110. mutex_lock(&event_mutex);
  111. list_for_each_entry(call, &ftrace_events, list) {
  112. ftrace_event_enable_disable(call, 0);
  113. }
  114. mutex_unlock(&event_mutex);
  115. }
  116. /*
  117. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  118. */
  119. static int __ftrace_set_clr_event(const char *match, const char *sub,
  120. const char *event, int set)
  121. {
  122. struct ftrace_event_call *call;
  123. int ret = -EINVAL;
  124. mutex_lock(&event_mutex);
  125. list_for_each_entry(call, &ftrace_events, list) {
  126. if (!call->name || !call->regfunc)
  127. continue;
  128. if (match &&
  129. strcmp(match, call->name) != 0 &&
  130. strcmp(match, call->system) != 0)
  131. continue;
  132. if (sub && strcmp(sub, call->system) != 0)
  133. continue;
  134. if (event && strcmp(event, call->name) != 0)
  135. continue;
  136. ftrace_event_enable_disable(call, set);
  137. ret = 0;
  138. }
  139. mutex_unlock(&event_mutex);
  140. return ret;
  141. }
  142. static int ftrace_set_clr_event(char *buf, int set)
  143. {
  144. char *event = NULL, *sub = NULL, *match;
  145. /*
  146. * The buf format can be <subsystem>:<event-name>
  147. * *:<event-name> means any event by that name.
  148. * :<event-name> is the same.
  149. *
  150. * <subsystem>:* means all events in that subsystem
  151. * <subsystem>: means the same.
  152. *
  153. * <name> (no ':') means all events in a subsystem with
  154. * the name <name> or any event that matches <name>
  155. */
  156. match = strsep(&buf, ":");
  157. if (buf) {
  158. sub = match;
  159. event = buf;
  160. match = NULL;
  161. if (!strlen(sub) || strcmp(sub, "*") == 0)
  162. sub = NULL;
  163. if (!strlen(event) || strcmp(event, "*") == 0)
  164. event = NULL;
  165. }
  166. return __ftrace_set_clr_event(match, sub, event, set);
  167. }
  168. /**
  169. * trace_set_clr_event - enable or disable an event
  170. * @system: system name to match (NULL for any system)
  171. * @event: event name to match (NULL for all events, within system)
  172. * @set: 1 to enable, 0 to disable
  173. *
  174. * This is a way for other parts of the kernel to enable or disable
  175. * event recording.
  176. *
  177. * Returns 0 on success, -EINVAL if the parameters do not match any
  178. * registered events.
  179. */
  180. int trace_set_clr_event(const char *system, const char *event, int set)
  181. {
  182. return __ftrace_set_clr_event(NULL, system, event, set);
  183. }
  184. /* 128 should be much more than enough */
  185. #define EVENT_BUF_SIZE 127
  186. static ssize_t
  187. ftrace_event_write(struct file *file, const char __user *ubuf,
  188. size_t cnt, loff_t *ppos)
  189. {
  190. struct trace_parser parser;
  191. ssize_t read, ret;
  192. if (!cnt)
  193. return 0;
  194. ret = tracing_update_buffers();
  195. if (ret < 0)
  196. return ret;
  197. if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
  198. return -ENOMEM;
  199. read = trace_get_user(&parser, ubuf, cnt, ppos);
  200. if (read >= 0 && trace_parser_loaded((&parser))) {
  201. int set = 1;
  202. if (*parser.buffer == '!')
  203. set = 0;
  204. parser.buffer[parser.idx] = 0;
  205. ret = ftrace_set_clr_event(parser.buffer + !set, set);
  206. if (ret)
  207. goto out_put;
  208. }
  209. ret = read;
  210. out_put:
  211. trace_parser_put(&parser);
  212. return ret;
  213. }
  214. static void *
  215. t_next(struct seq_file *m, void *v, loff_t *pos)
  216. {
  217. struct ftrace_event_call *call = v;
  218. (*pos)++;
  219. list_for_each_entry_continue(call, &ftrace_events, list) {
  220. /*
  221. * The ftrace subsystem is for showing formats only.
  222. * They can not be enabled or disabled via the event files.
  223. */
  224. if (call->regfunc)
  225. return call;
  226. }
  227. return NULL;
  228. }
  229. static void *t_start(struct seq_file *m, loff_t *pos)
  230. {
  231. struct ftrace_event_call *call;
  232. loff_t l;
  233. mutex_lock(&event_mutex);
  234. call = list_entry(&ftrace_events, struct ftrace_event_call, list);
  235. for (l = 0; l <= *pos; ) {
  236. call = t_next(m, call, &l);
  237. if (!call)
  238. break;
  239. }
  240. return call;
  241. }
  242. static void *
  243. s_next(struct seq_file *m, void *v, loff_t *pos)
  244. {
  245. struct ftrace_event_call *call = v;
  246. (*pos)++;
  247. list_for_each_entry_continue(call, &ftrace_events, list) {
  248. if (call->enabled)
  249. return call;
  250. }
  251. return NULL;
  252. }
  253. static void *s_start(struct seq_file *m, loff_t *pos)
  254. {
  255. struct ftrace_event_call *call;
  256. loff_t l;
  257. mutex_lock(&event_mutex);
  258. call = list_entry(&ftrace_events, struct ftrace_event_call, list);
  259. for (l = 0; l <= *pos; ) {
  260. call = s_next(m, call, &l);
  261. if (!call)
  262. break;
  263. }
  264. return call;
  265. }
  266. static int t_show(struct seq_file *m, void *v)
  267. {
  268. struct ftrace_event_call *call = v;
  269. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  270. seq_printf(m, "%s:", call->system);
  271. seq_printf(m, "%s\n", call->name);
  272. return 0;
  273. }
  274. static void t_stop(struct seq_file *m, void *p)
  275. {
  276. mutex_unlock(&event_mutex);
  277. }
  278. static int
  279. ftrace_event_seq_open(struct inode *inode, struct file *file)
  280. {
  281. const struct seq_operations *seq_ops;
  282. if ((file->f_mode & FMODE_WRITE) &&
  283. (file->f_flags & O_TRUNC))
  284. ftrace_clear_events();
  285. seq_ops = inode->i_private;
  286. return seq_open(file, seq_ops);
  287. }
  288. static ssize_t
  289. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  290. loff_t *ppos)
  291. {
  292. struct ftrace_event_call *call = filp->private_data;
  293. char *buf;
  294. if (call->enabled)
  295. buf = "1\n";
  296. else
  297. buf = "0\n";
  298. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  299. }
  300. static ssize_t
  301. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  302. loff_t *ppos)
  303. {
  304. struct ftrace_event_call *call = filp->private_data;
  305. char buf[64];
  306. unsigned long val;
  307. int ret;
  308. if (cnt >= sizeof(buf))
  309. return -EINVAL;
  310. if (copy_from_user(&buf, ubuf, cnt))
  311. return -EFAULT;
  312. buf[cnt] = 0;
  313. ret = strict_strtoul(buf, 10, &val);
  314. if (ret < 0)
  315. return ret;
  316. ret = tracing_update_buffers();
  317. if (ret < 0)
  318. return ret;
  319. switch (val) {
  320. case 0:
  321. case 1:
  322. mutex_lock(&event_mutex);
  323. ftrace_event_enable_disable(call, val);
  324. mutex_unlock(&event_mutex);
  325. break;
  326. default:
  327. return -EINVAL;
  328. }
  329. *ppos += cnt;
  330. return cnt;
  331. }
  332. static ssize_t
  333. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  334. loff_t *ppos)
  335. {
  336. const char set_to_char[4] = { '?', '0', '1', 'X' };
  337. const char *system = filp->private_data;
  338. struct ftrace_event_call *call;
  339. char buf[2];
  340. int set = 0;
  341. int ret;
  342. mutex_lock(&event_mutex);
  343. list_for_each_entry(call, &ftrace_events, list) {
  344. if (!call->name || !call->regfunc)
  345. continue;
  346. if (system && strcmp(call->system, system) != 0)
  347. continue;
  348. /*
  349. * We need to find out if all the events are set
  350. * or if all events or cleared, or if we have
  351. * a mixture.
  352. */
  353. set |= (1 << !!call->enabled);
  354. /*
  355. * If we have a mixture, no need to look further.
  356. */
  357. if (set == 3)
  358. break;
  359. }
  360. mutex_unlock(&event_mutex);
  361. buf[0] = set_to_char[set];
  362. buf[1] = '\n';
  363. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  364. return ret;
  365. }
  366. static ssize_t
  367. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  368. loff_t *ppos)
  369. {
  370. const char *system = filp->private_data;
  371. unsigned long val;
  372. char buf[64];
  373. ssize_t ret;
  374. if (cnt >= sizeof(buf))
  375. return -EINVAL;
  376. if (copy_from_user(&buf, ubuf, cnt))
  377. return -EFAULT;
  378. buf[cnt] = 0;
  379. ret = strict_strtoul(buf, 10, &val);
  380. if (ret < 0)
  381. return ret;
  382. ret = tracing_update_buffers();
  383. if (ret < 0)
  384. return ret;
  385. if (val != 0 && val != 1)
  386. return -EINVAL;
  387. ret = __ftrace_set_clr_event(NULL, system, NULL, val);
  388. if (ret)
  389. goto out;
  390. ret = cnt;
  391. out:
  392. *ppos += cnt;
  393. return ret;
  394. }
  395. extern char *__bad_type_size(void);
  396. #undef FIELD
  397. #define FIELD(type, name) \
  398. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  399. #type, "common_" #name, offsetof(typeof(field), name), \
  400. sizeof(field.name)
  401. static int trace_write_header(struct trace_seq *s)
  402. {
  403. struct trace_entry field;
  404. /* struct trace_entry */
  405. return trace_seq_printf(s,
  406. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  407. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  408. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  409. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  410. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  411. "\n",
  412. FIELD(unsigned short, type),
  413. FIELD(unsigned char, flags),
  414. FIELD(unsigned char, preempt_count),
  415. FIELD(int, pid),
  416. FIELD(int, lock_depth));
  417. }
  418. static ssize_t
  419. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  420. loff_t *ppos)
  421. {
  422. struct ftrace_event_call *call = filp->private_data;
  423. struct trace_seq *s;
  424. char *buf;
  425. int r;
  426. if (*ppos)
  427. return 0;
  428. s = kmalloc(sizeof(*s), GFP_KERNEL);
  429. if (!s)
  430. return -ENOMEM;
  431. trace_seq_init(s);
  432. /* If any of the first writes fail, so will the show_format. */
  433. trace_seq_printf(s, "name: %s\n", call->name);
  434. trace_seq_printf(s, "ID: %d\n", call->id);
  435. trace_seq_printf(s, "format:\n");
  436. trace_write_header(s);
  437. r = call->show_format(call, s);
  438. if (!r) {
  439. /*
  440. * ug! The format output is bigger than a PAGE!!
  441. */
  442. buf = "FORMAT TOO BIG\n";
  443. r = simple_read_from_buffer(ubuf, cnt, ppos,
  444. buf, strlen(buf));
  445. goto out;
  446. }
  447. r = simple_read_from_buffer(ubuf, cnt, ppos,
  448. s->buffer, s->len);
  449. out:
  450. kfree(s);
  451. return r;
  452. }
  453. static ssize_t
  454. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  455. {
  456. struct ftrace_event_call *call = filp->private_data;
  457. struct trace_seq *s;
  458. int r;
  459. if (*ppos)
  460. return 0;
  461. s = kmalloc(sizeof(*s), GFP_KERNEL);
  462. if (!s)
  463. return -ENOMEM;
  464. trace_seq_init(s);
  465. trace_seq_printf(s, "%d\n", call->id);
  466. r = simple_read_from_buffer(ubuf, cnt, ppos,
  467. s->buffer, s->len);
  468. kfree(s);
  469. return r;
  470. }
  471. static ssize_t
  472. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  473. loff_t *ppos)
  474. {
  475. struct ftrace_event_call *call = filp->private_data;
  476. struct trace_seq *s;
  477. int r;
  478. if (*ppos)
  479. return 0;
  480. s = kmalloc(sizeof(*s), GFP_KERNEL);
  481. if (!s)
  482. return -ENOMEM;
  483. trace_seq_init(s);
  484. print_event_filter(call, s);
  485. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  486. kfree(s);
  487. return r;
  488. }
  489. static ssize_t
  490. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  491. loff_t *ppos)
  492. {
  493. struct ftrace_event_call *call = filp->private_data;
  494. char *buf;
  495. int err;
  496. if (cnt >= PAGE_SIZE)
  497. return -EINVAL;
  498. buf = (char *)__get_free_page(GFP_TEMPORARY);
  499. if (!buf)
  500. return -ENOMEM;
  501. if (copy_from_user(buf, ubuf, cnt)) {
  502. free_page((unsigned long) buf);
  503. return -EFAULT;
  504. }
  505. buf[cnt] = '\0';
  506. err = apply_event_filter(call, buf);
  507. free_page((unsigned long) buf);
  508. if (err < 0)
  509. return err;
  510. *ppos += cnt;
  511. return cnt;
  512. }
  513. static ssize_t
  514. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  515. loff_t *ppos)
  516. {
  517. struct event_subsystem *system = filp->private_data;
  518. struct trace_seq *s;
  519. int r;
  520. if (*ppos)
  521. return 0;
  522. s = kmalloc(sizeof(*s), GFP_KERNEL);
  523. if (!s)
  524. return -ENOMEM;
  525. trace_seq_init(s);
  526. print_subsystem_event_filter(system, s);
  527. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  528. kfree(s);
  529. return r;
  530. }
  531. static ssize_t
  532. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  533. loff_t *ppos)
  534. {
  535. struct event_subsystem *system = filp->private_data;
  536. char *buf;
  537. int err;
  538. if (cnt >= PAGE_SIZE)
  539. return -EINVAL;
  540. buf = (char *)__get_free_page(GFP_TEMPORARY);
  541. if (!buf)
  542. return -ENOMEM;
  543. if (copy_from_user(buf, ubuf, cnt)) {
  544. free_page((unsigned long) buf);
  545. return -EFAULT;
  546. }
  547. buf[cnt] = '\0';
  548. err = apply_subsystem_event_filter(system, buf);
  549. free_page((unsigned long) buf);
  550. if (err < 0)
  551. return err;
  552. *ppos += cnt;
  553. return cnt;
  554. }
  555. static ssize_t
  556. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  557. {
  558. int (*func)(struct trace_seq *s) = filp->private_data;
  559. struct trace_seq *s;
  560. int r;
  561. if (*ppos)
  562. return 0;
  563. s = kmalloc(sizeof(*s), GFP_KERNEL);
  564. if (!s)
  565. return -ENOMEM;
  566. trace_seq_init(s);
  567. func(s);
  568. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  569. kfree(s);
  570. return r;
  571. }
  572. static const struct seq_operations show_event_seq_ops = {
  573. .start = t_start,
  574. .next = t_next,
  575. .show = t_show,
  576. .stop = t_stop,
  577. };
  578. static const struct seq_operations show_set_event_seq_ops = {
  579. .start = s_start,
  580. .next = s_next,
  581. .show = t_show,
  582. .stop = t_stop,
  583. };
  584. static const struct file_operations ftrace_avail_fops = {
  585. .open = ftrace_event_seq_open,
  586. .read = seq_read,
  587. .llseek = seq_lseek,
  588. .release = seq_release,
  589. };
  590. static const struct file_operations ftrace_set_event_fops = {
  591. .open = ftrace_event_seq_open,
  592. .read = seq_read,
  593. .write = ftrace_event_write,
  594. .llseek = seq_lseek,
  595. .release = seq_release,
  596. };
  597. static const struct file_operations ftrace_enable_fops = {
  598. .open = tracing_open_generic,
  599. .read = event_enable_read,
  600. .write = event_enable_write,
  601. };
  602. static const struct file_operations ftrace_event_format_fops = {
  603. .open = tracing_open_generic,
  604. .read = event_format_read,
  605. };
  606. static const struct file_operations ftrace_event_id_fops = {
  607. .open = tracing_open_generic,
  608. .read = event_id_read,
  609. };
  610. static const struct file_operations ftrace_event_filter_fops = {
  611. .open = tracing_open_generic,
  612. .read = event_filter_read,
  613. .write = event_filter_write,
  614. };
  615. static const struct file_operations ftrace_subsystem_filter_fops = {
  616. .open = tracing_open_generic,
  617. .read = subsystem_filter_read,
  618. .write = subsystem_filter_write,
  619. };
  620. static const struct file_operations ftrace_system_enable_fops = {
  621. .open = tracing_open_generic,
  622. .read = system_enable_read,
  623. .write = system_enable_write,
  624. };
  625. static const struct file_operations ftrace_show_header_fops = {
  626. .open = tracing_open_generic,
  627. .read = show_header,
  628. };
  629. static struct dentry *event_trace_events_dir(void)
  630. {
  631. static struct dentry *d_tracer;
  632. static struct dentry *d_events;
  633. if (d_events)
  634. return d_events;
  635. d_tracer = tracing_init_dentry();
  636. if (!d_tracer)
  637. return NULL;
  638. d_events = debugfs_create_dir("events", d_tracer);
  639. if (!d_events)
  640. pr_warning("Could not create debugfs "
  641. "'events' directory\n");
  642. return d_events;
  643. }
  644. static LIST_HEAD(event_subsystems);
  645. static struct dentry *
  646. event_subsystem_dir(const char *name, struct dentry *d_events)
  647. {
  648. struct event_subsystem *system;
  649. struct dentry *entry;
  650. /* First see if we did not already create this dir */
  651. list_for_each_entry(system, &event_subsystems, list) {
  652. if (strcmp(system->name, name) == 0) {
  653. system->nr_events++;
  654. return system->entry;
  655. }
  656. }
  657. /* need to create new entry */
  658. system = kmalloc(sizeof(*system), GFP_KERNEL);
  659. if (!system) {
  660. pr_warning("No memory to create event subsystem %s\n",
  661. name);
  662. return d_events;
  663. }
  664. system->entry = debugfs_create_dir(name, d_events);
  665. if (!system->entry) {
  666. pr_warning("Could not create event subsystem %s\n",
  667. name);
  668. kfree(system);
  669. return d_events;
  670. }
  671. system->nr_events = 1;
  672. system->name = kstrdup(name, GFP_KERNEL);
  673. if (!system->name) {
  674. debugfs_remove(system->entry);
  675. kfree(system);
  676. return d_events;
  677. }
  678. list_add(&system->list, &event_subsystems);
  679. system->filter = NULL;
  680. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  681. if (!system->filter) {
  682. pr_warning("Could not allocate filter for subsystem "
  683. "'%s'\n", name);
  684. return system->entry;
  685. }
  686. entry = debugfs_create_file("filter", 0644, system->entry, system,
  687. &ftrace_subsystem_filter_fops);
  688. if (!entry) {
  689. kfree(system->filter);
  690. system->filter = NULL;
  691. pr_warning("Could not create debugfs "
  692. "'%s/filter' entry\n", name);
  693. }
  694. entry = trace_create_file("enable", 0644, system->entry,
  695. (void *)system->name,
  696. &ftrace_system_enable_fops);
  697. return system->entry;
  698. }
  699. static int
  700. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  701. const struct file_operations *id,
  702. const struct file_operations *enable,
  703. const struct file_operations *filter,
  704. const struct file_operations *format)
  705. {
  706. struct dentry *entry;
  707. int ret;
  708. /*
  709. * If the trace point header did not define TRACE_SYSTEM
  710. * then the system would be called "TRACE_SYSTEM".
  711. */
  712. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  713. d_events = event_subsystem_dir(call->system, d_events);
  714. call->dir = debugfs_create_dir(call->name, d_events);
  715. if (!call->dir) {
  716. pr_warning("Could not create debugfs "
  717. "'%s' directory\n", call->name);
  718. return -1;
  719. }
  720. if (call->regfunc)
  721. entry = trace_create_file("enable", 0644, call->dir, call,
  722. enable);
  723. if (call->id && call->profile_enable)
  724. entry = trace_create_file("id", 0444, call->dir, call,
  725. id);
  726. if (call->define_fields) {
  727. ret = call->define_fields(call);
  728. if (ret < 0) {
  729. pr_warning("Could not initialize trace point"
  730. " events/%s\n", call->name);
  731. return ret;
  732. }
  733. entry = trace_create_file("filter", 0644, call->dir, call,
  734. filter);
  735. }
  736. /* A trace may not want to export its format */
  737. if (!call->show_format)
  738. return 0;
  739. entry = trace_create_file("format", 0444, call->dir, call,
  740. format);
  741. return 0;
  742. }
  743. #define for_each_event(event, start, end) \
  744. for (event = start; \
  745. (unsigned long)event < (unsigned long)end; \
  746. event++)
  747. #ifdef CONFIG_MODULES
  748. static LIST_HEAD(ftrace_module_file_list);
  749. /*
  750. * Modules must own their file_operations to keep up with
  751. * reference counting.
  752. */
  753. struct ftrace_module_file_ops {
  754. struct list_head list;
  755. struct module *mod;
  756. struct file_operations id;
  757. struct file_operations enable;
  758. struct file_operations format;
  759. struct file_operations filter;
  760. };
  761. static void remove_subsystem_dir(const char *name)
  762. {
  763. struct event_subsystem *system;
  764. if (strcmp(name, TRACE_SYSTEM) == 0)
  765. return;
  766. list_for_each_entry(system, &event_subsystems, list) {
  767. if (strcmp(system->name, name) == 0) {
  768. if (!--system->nr_events) {
  769. struct event_filter *filter = system->filter;
  770. debugfs_remove_recursive(system->entry);
  771. list_del(&system->list);
  772. if (filter) {
  773. kfree(filter->filter_string);
  774. kfree(filter);
  775. }
  776. kfree(system->name);
  777. kfree(system);
  778. }
  779. break;
  780. }
  781. }
  782. }
  783. static struct ftrace_module_file_ops *
  784. trace_create_file_ops(struct module *mod)
  785. {
  786. struct ftrace_module_file_ops *file_ops;
  787. /*
  788. * This is a bit of a PITA. To allow for correct reference
  789. * counting, modules must "own" their file_operations.
  790. * To do this, we allocate the file operations that will be
  791. * used in the event directory.
  792. */
  793. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  794. if (!file_ops)
  795. return NULL;
  796. file_ops->mod = mod;
  797. file_ops->id = ftrace_event_id_fops;
  798. file_ops->id.owner = mod;
  799. file_ops->enable = ftrace_enable_fops;
  800. file_ops->enable.owner = mod;
  801. file_ops->filter = ftrace_event_filter_fops;
  802. file_ops->filter.owner = mod;
  803. file_ops->format = ftrace_event_format_fops;
  804. file_ops->format.owner = mod;
  805. list_add(&file_ops->list, &ftrace_module_file_list);
  806. return file_ops;
  807. }
  808. static void trace_module_add_events(struct module *mod)
  809. {
  810. struct ftrace_module_file_ops *file_ops = NULL;
  811. struct ftrace_event_call *call, *start, *end;
  812. struct dentry *d_events;
  813. int ret;
  814. start = mod->trace_events;
  815. end = mod->trace_events + mod->num_trace_events;
  816. if (start == end)
  817. return;
  818. d_events = event_trace_events_dir();
  819. if (!d_events)
  820. return;
  821. for_each_event(call, start, end) {
  822. /* The linker may leave blanks */
  823. if (!call->name)
  824. continue;
  825. if (call->raw_init) {
  826. ret = call->raw_init();
  827. if (ret < 0) {
  828. if (ret != -ENOSYS)
  829. pr_warning("Could not initialize trace "
  830. "point events/%s\n", call->name);
  831. continue;
  832. }
  833. }
  834. /*
  835. * This module has events, create file ops for this module
  836. * if not already done.
  837. */
  838. if (!file_ops) {
  839. file_ops = trace_create_file_ops(mod);
  840. if (!file_ops)
  841. return;
  842. }
  843. call->mod = mod;
  844. list_add(&call->list, &ftrace_events);
  845. event_create_dir(call, d_events,
  846. &file_ops->id, &file_ops->enable,
  847. &file_ops->filter, &file_ops->format);
  848. }
  849. }
  850. static void trace_module_remove_events(struct module *mod)
  851. {
  852. struct ftrace_module_file_ops *file_ops;
  853. struct ftrace_event_call *call, *p;
  854. bool found = false;
  855. down_write(&trace_event_mutex);
  856. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  857. if (call->mod == mod) {
  858. found = true;
  859. ftrace_event_enable_disable(call, 0);
  860. if (call->event)
  861. __unregister_ftrace_event(call->event);
  862. debugfs_remove_recursive(call->dir);
  863. list_del(&call->list);
  864. trace_destroy_fields(call);
  865. destroy_preds(call);
  866. remove_subsystem_dir(call->system);
  867. }
  868. }
  869. /* Now free the file_operations */
  870. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  871. if (file_ops->mod == mod)
  872. break;
  873. }
  874. if (&file_ops->list != &ftrace_module_file_list) {
  875. list_del(&file_ops->list);
  876. kfree(file_ops);
  877. }
  878. /*
  879. * It is safest to reset the ring buffer if the module being unloaded
  880. * registered any events.
  881. */
  882. if (found)
  883. tracing_reset_current_online_cpus();
  884. up_write(&trace_event_mutex);
  885. }
  886. static int trace_module_notify(struct notifier_block *self,
  887. unsigned long val, void *data)
  888. {
  889. struct module *mod = data;
  890. mutex_lock(&event_mutex);
  891. switch (val) {
  892. case MODULE_STATE_COMING:
  893. trace_module_add_events(mod);
  894. break;
  895. case MODULE_STATE_GOING:
  896. trace_module_remove_events(mod);
  897. break;
  898. }
  899. mutex_unlock(&event_mutex);
  900. return 0;
  901. }
  902. #else
  903. static int trace_module_notify(struct notifier_block *self,
  904. unsigned long val, void *data)
  905. {
  906. return 0;
  907. }
  908. #endif /* CONFIG_MODULES */
  909. static struct notifier_block trace_module_nb = {
  910. .notifier_call = trace_module_notify,
  911. .priority = 0,
  912. };
  913. extern struct ftrace_event_call __start_ftrace_events[];
  914. extern struct ftrace_event_call __stop_ftrace_events[];
  915. static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
  916. static __init int setup_trace_event(char *str)
  917. {
  918. strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
  919. ring_buffer_expanded = 1;
  920. tracing_selftest_disabled = 1;
  921. return 1;
  922. }
  923. __setup("trace_event=", setup_trace_event);
  924. static __init int event_trace_init(void)
  925. {
  926. struct ftrace_event_call *call;
  927. struct dentry *d_tracer;
  928. struct dentry *entry;
  929. struct dentry *d_events;
  930. int ret;
  931. char *buf = bootup_event_buf;
  932. char *token;
  933. d_tracer = tracing_init_dentry();
  934. if (!d_tracer)
  935. return 0;
  936. entry = debugfs_create_file("available_events", 0444, d_tracer,
  937. (void *)&show_event_seq_ops,
  938. &ftrace_avail_fops);
  939. if (!entry)
  940. pr_warning("Could not create debugfs "
  941. "'available_events' entry\n");
  942. entry = debugfs_create_file("set_event", 0644, d_tracer,
  943. (void *)&show_set_event_seq_ops,
  944. &ftrace_set_event_fops);
  945. if (!entry)
  946. pr_warning("Could not create debugfs "
  947. "'set_event' entry\n");
  948. d_events = event_trace_events_dir();
  949. if (!d_events)
  950. return 0;
  951. /* ring buffer internal formats */
  952. trace_create_file("header_page", 0444, d_events,
  953. ring_buffer_print_page_header,
  954. &ftrace_show_header_fops);
  955. trace_create_file("header_event", 0444, d_events,
  956. ring_buffer_print_entry_header,
  957. &ftrace_show_header_fops);
  958. trace_create_file("enable", 0644, d_events,
  959. NULL, &ftrace_system_enable_fops);
  960. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  961. /* The linker may leave blanks */
  962. if (!call->name)
  963. continue;
  964. if (call->raw_init) {
  965. ret = call->raw_init();
  966. if (ret < 0) {
  967. if (ret != -ENOSYS)
  968. pr_warning("Could not initialize trace "
  969. "point events/%s\n", call->name);
  970. continue;
  971. }
  972. }
  973. list_add(&call->list, &ftrace_events);
  974. event_create_dir(call, d_events, &ftrace_event_id_fops,
  975. &ftrace_enable_fops, &ftrace_event_filter_fops,
  976. &ftrace_event_format_fops);
  977. }
  978. while (true) {
  979. token = strsep(&buf, ",");
  980. if (!token)
  981. break;
  982. if (!*token)
  983. continue;
  984. ret = ftrace_set_clr_event(token, 1);
  985. if (ret)
  986. pr_warning("Failed to enable trace event: %s\n", token);
  987. }
  988. ret = register_module_notifier(&trace_module_nb);
  989. if (ret)
  990. pr_warning("Failed to register trace events module notifier\n");
  991. return 0;
  992. }
  993. fs_initcall(event_trace_init);
  994. #ifdef CONFIG_FTRACE_STARTUP_TEST
  995. static DEFINE_SPINLOCK(test_spinlock);
  996. static DEFINE_SPINLOCK(test_spinlock_irq);
  997. static DEFINE_MUTEX(test_mutex);
  998. static __init void test_work(struct work_struct *dummy)
  999. {
  1000. spin_lock(&test_spinlock);
  1001. spin_lock_irq(&test_spinlock_irq);
  1002. udelay(1);
  1003. spin_unlock_irq(&test_spinlock_irq);
  1004. spin_unlock(&test_spinlock);
  1005. mutex_lock(&test_mutex);
  1006. msleep(1);
  1007. mutex_unlock(&test_mutex);
  1008. }
  1009. static __init int event_test_thread(void *unused)
  1010. {
  1011. void *test_malloc;
  1012. test_malloc = kmalloc(1234, GFP_KERNEL);
  1013. if (!test_malloc)
  1014. pr_info("failed to kmalloc\n");
  1015. schedule_on_each_cpu(test_work);
  1016. kfree(test_malloc);
  1017. set_current_state(TASK_INTERRUPTIBLE);
  1018. while (!kthread_should_stop())
  1019. schedule();
  1020. return 0;
  1021. }
  1022. /*
  1023. * Do various things that may trigger events.
  1024. */
  1025. static __init void event_test_stuff(void)
  1026. {
  1027. struct task_struct *test_thread;
  1028. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  1029. msleep(1);
  1030. kthread_stop(test_thread);
  1031. }
  1032. /*
  1033. * For every trace event defined, we will test each trace point separately,
  1034. * and then by groups, and finally all trace points.
  1035. */
  1036. static __init void event_trace_self_tests(void)
  1037. {
  1038. struct ftrace_event_call *call;
  1039. struct event_subsystem *system;
  1040. int ret;
  1041. pr_info("Running tests on trace events:\n");
  1042. list_for_each_entry(call, &ftrace_events, list) {
  1043. /* Only test those that have a regfunc */
  1044. if (!call->regfunc)
  1045. continue;
  1046. /*
  1047. * Testing syscall events here is pretty useless, but
  1048. * we still do it if configured. But this is time consuming.
  1049. * What we really need is a user thread to perform the
  1050. * syscalls as we test.
  1051. */
  1052. #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
  1053. if (call->system &&
  1054. strcmp(call->system, "syscalls") == 0)
  1055. continue;
  1056. #endif
  1057. pr_info("Testing event %s: ", call->name);
  1058. /*
  1059. * If an event is already enabled, someone is using
  1060. * it and the self test should not be on.
  1061. */
  1062. if (call->enabled) {
  1063. pr_warning("Enabled event during self test!\n");
  1064. WARN_ON_ONCE(1);
  1065. continue;
  1066. }
  1067. ftrace_event_enable_disable(call, 1);
  1068. event_test_stuff();
  1069. ftrace_event_enable_disable(call, 0);
  1070. pr_cont("OK\n");
  1071. }
  1072. /* Now test at the sub system level */
  1073. pr_info("Running tests on trace event systems:\n");
  1074. list_for_each_entry(system, &event_subsystems, list) {
  1075. /* the ftrace system is special, skip it */
  1076. if (strcmp(system->name, "ftrace") == 0)
  1077. continue;
  1078. pr_info("Testing event system %s: ", system->name);
  1079. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
  1080. if (WARN_ON_ONCE(ret)) {
  1081. pr_warning("error enabling system %s\n",
  1082. system->name);
  1083. continue;
  1084. }
  1085. event_test_stuff();
  1086. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
  1087. if (WARN_ON_ONCE(ret))
  1088. pr_warning("error disabling system %s\n",
  1089. system->name);
  1090. pr_cont("OK\n");
  1091. }
  1092. /* Test with all events enabled */
  1093. pr_info("Running tests on all trace events:\n");
  1094. pr_info("Testing all events: ");
  1095. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
  1096. if (WARN_ON_ONCE(ret)) {
  1097. pr_warning("error enabling all events\n");
  1098. return;
  1099. }
  1100. event_test_stuff();
  1101. /* reset sysname */
  1102. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
  1103. if (WARN_ON_ONCE(ret)) {
  1104. pr_warning("error disabling all events\n");
  1105. return;
  1106. }
  1107. pr_cont("OK\n");
  1108. }
  1109. #ifdef CONFIG_FUNCTION_TRACER
  1110. static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
  1111. static void
  1112. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1113. {
  1114. struct ring_buffer_event *event;
  1115. struct ring_buffer *buffer;
  1116. struct ftrace_entry *entry;
  1117. unsigned long flags;
  1118. long disabled;
  1119. int resched;
  1120. int cpu;
  1121. int pc;
  1122. pc = preempt_count();
  1123. resched = ftrace_preempt_disable();
  1124. cpu = raw_smp_processor_id();
  1125. disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
  1126. if (disabled != 1)
  1127. goto out;
  1128. local_save_flags(flags);
  1129. event = trace_current_buffer_lock_reserve(&buffer,
  1130. TRACE_FN, sizeof(*entry),
  1131. flags, pc);
  1132. if (!event)
  1133. goto out;
  1134. entry = ring_buffer_event_data(event);
  1135. entry->ip = ip;
  1136. entry->parent_ip = parent_ip;
  1137. trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
  1138. out:
  1139. atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
  1140. ftrace_preempt_enable(resched);
  1141. }
  1142. static struct ftrace_ops trace_ops __initdata =
  1143. {
  1144. .func = function_test_events_call,
  1145. };
  1146. static __init void event_trace_self_test_with_function(void)
  1147. {
  1148. register_ftrace_function(&trace_ops);
  1149. pr_info("Running tests again, along with the function tracer\n");
  1150. event_trace_self_tests();
  1151. unregister_ftrace_function(&trace_ops);
  1152. }
  1153. #else
  1154. static __init void event_trace_self_test_with_function(void)
  1155. {
  1156. }
  1157. #endif
  1158. static __init int event_trace_self_tests_init(void)
  1159. {
  1160. if (!tracing_selftest_disabled) {
  1161. event_trace_self_tests();
  1162. event_trace_self_test_with_function();
  1163. }
  1164. return 0;
  1165. }
  1166. late_initcall(event_trace_self_tests_init);
  1167. #endif