trace_events.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include "trace_output.h"
  19. #define TRACE_SYSTEM "TRACE_SYSTEM"
  20. DEFINE_MUTEX(event_mutex);
  21. LIST_HEAD(ftrace_events);
  22. int trace_define_field(struct ftrace_event_call *call, char *type,
  23. char *name, int offset, int size, int is_signed)
  24. {
  25. struct ftrace_event_field *field;
  26. field = kzalloc(sizeof(*field), GFP_KERNEL);
  27. if (!field)
  28. goto err;
  29. field->name = kstrdup(name, GFP_KERNEL);
  30. if (!field->name)
  31. goto err;
  32. field->type = kstrdup(type, GFP_KERNEL);
  33. if (!field->type)
  34. goto err;
  35. field->offset = offset;
  36. field->size = size;
  37. field->is_signed = is_signed;
  38. list_add(&field->link, &call->fields);
  39. return 0;
  40. err:
  41. if (field) {
  42. kfree(field->name);
  43. kfree(field->type);
  44. }
  45. kfree(field);
  46. return -ENOMEM;
  47. }
  48. EXPORT_SYMBOL_GPL(trace_define_field);
  49. #ifdef CONFIG_MODULES
  50. static void trace_destroy_fields(struct ftrace_event_call *call)
  51. {
  52. struct ftrace_event_field *field, *next;
  53. list_for_each_entry_safe(field, next, &call->fields, link) {
  54. list_del(&field->link);
  55. kfree(field->type);
  56. kfree(field->name);
  57. kfree(field);
  58. }
  59. }
  60. #endif /* CONFIG_MODULES */
  61. static void ftrace_clear_events(void)
  62. {
  63. struct ftrace_event_call *call;
  64. mutex_lock(&event_mutex);
  65. list_for_each_entry(call, &ftrace_events, list) {
  66. if (call->enabled) {
  67. call->enabled = 0;
  68. call->unregfunc();
  69. }
  70. }
  71. mutex_unlock(&event_mutex);
  72. }
  73. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  74. int enable)
  75. {
  76. switch (enable) {
  77. case 0:
  78. if (call->enabled) {
  79. call->enabled = 0;
  80. call->unregfunc();
  81. }
  82. break;
  83. case 1:
  84. if (!call->enabled) {
  85. call->enabled = 1;
  86. call->regfunc();
  87. }
  88. break;
  89. }
  90. }
  91. static int ftrace_set_clr_event(char *buf, int set)
  92. {
  93. struct ftrace_event_call *call;
  94. char *event = NULL, *sub = NULL, *match;
  95. int ret = -EINVAL;
  96. /*
  97. * The buf format can be <subsystem>:<event-name>
  98. * *:<event-name> means any event by that name.
  99. * :<event-name> is the same.
  100. *
  101. * <subsystem>:* means all events in that subsystem
  102. * <subsystem>: means the same.
  103. *
  104. * <name> (no ':') means all events in a subsystem with
  105. * the name <name> or any event that matches <name>
  106. */
  107. match = strsep(&buf, ":");
  108. if (buf) {
  109. sub = match;
  110. event = buf;
  111. match = NULL;
  112. if (!strlen(sub) || strcmp(sub, "*") == 0)
  113. sub = NULL;
  114. if (!strlen(event) || strcmp(event, "*") == 0)
  115. event = NULL;
  116. }
  117. mutex_lock(&event_mutex);
  118. list_for_each_entry(call, &ftrace_events, list) {
  119. if (!call->name || !call->regfunc)
  120. continue;
  121. if (match &&
  122. strcmp(match, call->name) != 0 &&
  123. strcmp(match, call->system) != 0)
  124. continue;
  125. if (sub && strcmp(sub, call->system) != 0)
  126. continue;
  127. if (event && strcmp(event, call->name) != 0)
  128. continue;
  129. ftrace_event_enable_disable(call, set);
  130. ret = 0;
  131. }
  132. mutex_unlock(&event_mutex);
  133. return ret;
  134. }
  135. /* 128 should be much more than enough */
  136. #define EVENT_BUF_SIZE 127
  137. static ssize_t
  138. ftrace_event_write(struct file *file, const char __user *ubuf,
  139. size_t cnt, loff_t *ppos)
  140. {
  141. size_t read = 0;
  142. int i, set = 1;
  143. ssize_t ret;
  144. char *buf;
  145. char ch;
  146. if (!cnt || cnt < 0)
  147. return 0;
  148. ret = tracing_update_buffers();
  149. if (ret < 0)
  150. return ret;
  151. ret = get_user(ch, ubuf++);
  152. if (ret)
  153. return ret;
  154. read++;
  155. cnt--;
  156. /* skip white space */
  157. while (cnt && isspace(ch)) {
  158. ret = get_user(ch, ubuf++);
  159. if (ret)
  160. return ret;
  161. read++;
  162. cnt--;
  163. }
  164. /* Only white space found? */
  165. if (isspace(ch)) {
  166. file->f_pos += read;
  167. ret = read;
  168. return ret;
  169. }
  170. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  171. if (!buf)
  172. return -ENOMEM;
  173. if (cnt > EVENT_BUF_SIZE)
  174. cnt = EVENT_BUF_SIZE;
  175. i = 0;
  176. while (cnt && !isspace(ch)) {
  177. if (!i && ch == '!')
  178. set = 0;
  179. else
  180. buf[i++] = ch;
  181. ret = get_user(ch, ubuf++);
  182. if (ret)
  183. goto out_free;
  184. read++;
  185. cnt--;
  186. }
  187. buf[i] = 0;
  188. file->f_pos += read;
  189. ret = ftrace_set_clr_event(buf, set);
  190. if (ret)
  191. goto out_free;
  192. ret = read;
  193. out_free:
  194. kfree(buf);
  195. return ret;
  196. }
  197. static void *
  198. t_next(struct seq_file *m, void *v, loff_t *pos)
  199. {
  200. struct list_head *list = m->private;
  201. struct ftrace_event_call *call;
  202. (*pos)++;
  203. for (;;) {
  204. if (list == &ftrace_events)
  205. return NULL;
  206. call = list_entry(list, struct ftrace_event_call, list);
  207. /*
  208. * The ftrace subsystem is for showing formats only.
  209. * They can not be enabled or disabled via the event files.
  210. */
  211. if (call->regfunc)
  212. break;
  213. list = list->next;
  214. }
  215. m->private = list->next;
  216. return call;
  217. }
  218. static void *t_start(struct seq_file *m, loff_t *pos)
  219. {
  220. mutex_lock(&event_mutex);
  221. if (*pos == 0)
  222. m->private = ftrace_events.next;
  223. return t_next(m, NULL, pos);
  224. }
  225. static void *
  226. s_next(struct seq_file *m, void *v, loff_t *pos)
  227. {
  228. struct list_head *list = m->private;
  229. struct ftrace_event_call *call;
  230. (*pos)++;
  231. retry:
  232. if (list == &ftrace_events)
  233. return NULL;
  234. call = list_entry(list, struct ftrace_event_call, list);
  235. if (!call->enabled) {
  236. list = list->next;
  237. goto retry;
  238. }
  239. m->private = list->next;
  240. return call;
  241. }
  242. static void *s_start(struct seq_file *m, loff_t *pos)
  243. {
  244. mutex_lock(&event_mutex);
  245. if (*pos == 0)
  246. m->private = ftrace_events.next;
  247. return s_next(m, NULL, pos);
  248. }
  249. static int t_show(struct seq_file *m, void *v)
  250. {
  251. struct ftrace_event_call *call = v;
  252. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  253. seq_printf(m, "%s:", call->system);
  254. seq_printf(m, "%s\n", call->name);
  255. return 0;
  256. }
  257. static void t_stop(struct seq_file *m, void *p)
  258. {
  259. mutex_unlock(&event_mutex);
  260. }
  261. static int
  262. ftrace_event_seq_open(struct inode *inode, struct file *file)
  263. {
  264. const struct seq_operations *seq_ops;
  265. if ((file->f_mode & FMODE_WRITE) &&
  266. !(file->f_flags & O_APPEND))
  267. ftrace_clear_events();
  268. seq_ops = inode->i_private;
  269. return seq_open(file, seq_ops);
  270. }
  271. static ssize_t
  272. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  273. loff_t *ppos)
  274. {
  275. struct ftrace_event_call *call = filp->private_data;
  276. char *buf;
  277. if (call->enabled)
  278. buf = "1\n";
  279. else
  280. buf = "0\n";
  281. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  282. }
  283. static ssize_t
  284. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  285. loff_t *ppos)
  286. {
  287. struct ftrace_event_call *call = filp->private_data;
  288. char buf[64];
  289. unsigned long val;
  290. int ret;
  291. if (cnt >= sizeof(buf))
  292. return -EINVAL;
  293. if (copy_from_user(&buf, ubuf, cnt))
  294. return -EFAULT;
  295. buf[cnt] = 0;
  296. ret = strict_strtoul(buf, 10, &val);
  297. if (ret < 0)
  298. return ret;
  299. ret = tracing_update_buffers();
  300. if (ret < 0)
  301. return ret;
  302. switch (val) {
  303. case 0:
  304. case 1:
  305. mutex_lock(&event_mutex);
  306. ftrace_event_enable_disable(call, val);
  307. mutex_unlock(&event_mutex);
  308. break;
  309. default:
  310. return -EINVAL;
  311. }
  312. *ppos += cnt;
  313. return cnt;
  314. }
  315. static ssize_t
  316. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  317. loff_t *ppos)
  318. {
  319. const char *system = filp->private_data;
  320. struct ftrace_event_call *call;
  321. char buf[2];
  322. int set = -1;
  323. int all = 0;
  324. int ret;
  325. if (system[0] == '*')
  326. all = 1;
  327. mutex_lock(&event_mutex);
  328. list_for_each_entry(call, &ftrace_events, list) {
  329. if (!call->name || !call->regfunc)
  330. continue;
  331. if (!all && strcmp(call->system, system) != 0)
  332. continue;
  333. /*
  334. * We need to find out if all the events are set
  335. * or if all events or cleared, or if we have
  336. * a mixture.
  337. */
  338. if (call->enabled) {
  339. switch (set) {
  340. case -1:
  341. set = 1;
  342. break;
  343. case 0:
  344. set = 2;
  345. break;
  346. }
  347. } else {
  348. switch (set) {
  349. case -1:
  350. set = 0;
  351. break;
  352. case 1:
  353. set = 2;
  354. break;
  355. }
  356. }
  357. /*
  358. * If we have a mixture, no need to look further.
  359. */
  360. if (set == 2)
  361. break;
  362. }
  363. mutex_unlock(&event_mutex);
  364. buf[1] = '\n';
  365. switch (set) {
  366. case 0:
  367. buf[0] = '0';
  368. break;
  369. case 1:
  370. buf[0] = '1';
  371. break;
  372. case 2:
  373. buf[0] = 'X';
  374. break;
  375. default:
  376. buf[0] = '?';
  377. }
  378. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  379. return ret;
  380. }
  381. static ssize_t
  382. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  383. loff_t *ppos)
  384. {
  385. const char *system = filp->private_data;
  386. unsigned long val;
  387. char *command;
  388. char buf[64];
  389. ssize_t ret;
  390. if (cnt >= sizeof(buf))
  391. return -EINVAL;
  392. if (copy_from_user(&buf, ubuf, cnt))
  393. return -EFAULT;
  394. buf[cnt] = 0;
  395. ret = strict_strtoul(buf, 10, &val);
  396. if (ret < 0)
  397. return ret;
  398. ret = tracing_update_buffers();
  399. if (ret < 0)
  400. return ret;
  401. switch (val) {
  402. case 0:
  403. case 1:
  404. break;
  405. default:
  406. return -EINVAL;
  407. }
  408. /* +3 for the ":*\0" */
  409. command = kmalloc(strlen(system)+3, GFP_KERNEL);
  410. if (!command)
  411. return -ENOMEM;
  412. sprintf(command, "%s:*", system);
  413. ret = ftrace_set_clr_event(command, val);
  414. if (ret)
  415. goto out_free;
  416. ret = cnt;
  417. out_free:
  418. kfree(command);
  419. *ppos += cnt;
  420. return ret;
  421. }
  422. extern char *__bad_type_size(void);
  423. #undef FIELD
  424. #define FIELD(type, name) \
  425. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  426. #type, "common_" #name, offsetof(typeof(field), name), \
  427. sizeof(field.name)
  428. static int trace_write_header(struct trace_seq *s)
  429. {
  430. struct trace_entry field;
  431. /* struct trace_entry */
  432. return trace_seq_printf(s,
  433. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  434. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  435. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  436. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  437. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  438. "\n",
  439. FIELD(unsigned short, type),
  440. FIELD(unsigned char, flags),
  441. FIELD(unsigned char, preempt_count),
  442. FIELD(int, pid),
  443. FIELD(int, tgid));
  444. }
  445. static ssize_t
  446. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  447. loff_t *ppos)
  448. {
  449. struct ftrace_event_call *call = filp->private_data;
  450. struct trace_seq *s;
  451. char *buf;
  452. int r;
  453. if (*ppos)
  454. return 0;
  455. s = kmalloc(sizeof(*s), GFP_KERNEL);
  456. if (!s)
  457. return -ENOMEM;
  458. trace_seq_init(s);
  459. /* If any of the first writes fail, so will the show_format. */
  460. trace_seq_printf(s, "name: %s\n", call->name);
  461. trace_seq_printf(s, "ID: %d\n", call->id);
  462. trace_seq_printf(s, "format:\n");
  463. trace_write_header(s);
  464. r = call->show_format(s);
  465. if (!r) {
  466. /*
  467. * ug! The format output is bigger than a PAGE!!
  468. */
  469. buf = "FORMAT TOO BIG\n";
  470. r = simple_read_from_buffer(ubuf, cnt, ppos,
  471. buf, strlen(buf));
  472. goto out;
  473. }
  474. r = simple_read_from_buffer(ubuf, cnt, ppos,
  475. s->buffer, s->len);
  476. out:
  477. kfree(s);
  478. return r;
  479. }
  480. static ssize_t
  481. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  482. {
  483. struct ftrace_event_call *call = filp->private_data;
  484. struct trace_seq *s;
  485. int r;
  486. if (*ppos)
  487. return 0;
  488. s = kmalloc(sizeof(*s), GFP_KERNEL);
  489. if (!s)
  490. return -ENOMEM;
  491. trace_seq_init(s);
  492. trace_seq_printf(s, "%d\n", call->id);
  493. r = simple_read_from_buffer(ubuf, cnt, ppos,
  494. s->buffer, s->len);
  495. kfree(s);
  496. return r;
  497. }
  498. static ssize_t
  499. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  500. loff_t *ppos)
  501. {
  502. struct ftrace_event_call *call = filp->private_data;
  503. struct trace_seq *s;
  504. int r;
  505. if (*ppos)
  506. return 0;
  507. s = kmalloc(sizeof(*s), GFP_KERNEL);
  508. if (!s)
  509. return -ENOMEM;
  510. trace_seq_init(s);
  511. print_event_filter(call, s);
  512. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  513. kfree(s);
  514. return r;
  515. }
  516. static ssize_t
  517. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  518. loff_t *ppos)
  519. {
  520. struct ftrace_event_call *call = filp->private_data;
  521. char *buf;
  522. int err;
  523. if (cnt >= PAGE_SIZE)
  524. return -EINVAL;
  525. buf = (char *)__get_free_page(GFP_TEMPORARY);
  526. if (!buf)
  527. return -ENOMEM;
  528. if (copy_from_user(buf, ubuf, cnt)) {
  529. free_page((unsigned long) buf);
  530. return -EFAULT;
  531. }
  532. buf[cnt] = '\0';
  533. err = apply_event_filter(call, buf);
  534. free_page((unsigned long) buf);
  535. if (err < 0)
  536. return err;
  537. *ppos += cnt;
  538. return cnt;
  539. }
  540. static ssize_t
  541. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  542. loff_t *ppos)
  543. {
  544. struct event_subsystem *system = filp->private_data;
  545. struct trace_seq *s;
  546. int r;
  547. if (*ppos)
  548. return 0;
  549. s = kmalloc(sizeof(*s), GFP_KERNEL);
  550. if (!s)
  551. return -ENOMEM;
  552. trace_seq_init(s);
  553. print_subsystem_event_filter(system, s);
  554. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  555. kfree(s);
  556. return r;
  557. }
  558. static ssize_t
  559. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  560. loff_t *ppos)
  561. {
  562. struct event_subsystem *system = filp->private_data;
  563. char *buf;
  564. int err;
  565. if (cnt >= PAGE_SIZE)
  566. return -EINVAL;
  567. buf = (char *)__get_free_page(GFP_TEMPORARY);
  568. if (!buf)
  569. return -ENOMEM;
  570. if (copy_from_user(buf, ubuf, cnt)) {
  571. free_page((unsigned long) buf);
  572. return -EFAULT;
  573. }
  574. buf[cnt] = '\0';
  575. err = apply_subsystem_event_filter(system, buf);
  576. free_page((unsigned long) buf);
  577. if (err < 0)
  578. return err;
  579. *ppos += cnt;
  580. return cnt;
  581. }
  582. static ssize_t
  583. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  584. {
  585. int (*func)(struct trace_seq *s) = filp->private_data;
  586. struct trace_seq *s;
  587. int r;
  588. if (*ppos)
  589. return 0;
  590. s = kmalloc(sizeof(*s), GFP_KERNEL);
  591. if (!s)
  592. return -ENOMEM;
  593. trace_seq_init(s);
  594. func(s);
  595. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  596. kfree(s);
  597. return r;
  598. }
  599. static const struct seq_operations show_event_seq_ops = {
  600. .start = t_start,
  601. .next = t_next,
  602. .show = t_show,
  603. .stop = t_stop,
  604. };
  605. static const struct seq_operations show_set_event_seq_ops = {
  606. .start = s_start,
  607. .next = s_next,
  608. .show = t_show,
  609. .stop = t_stop,
  610. };
  611. static const struct file_operations ftrace_avail_fops = {
  612. .open = ftrace_event_seq_open,
  613. .read = seq_read,
  614. .llseek = seq_lseek,
  615. .release = seq_release,
  616. };
  617. static const struct file_operations ftrace_set_event_fops = {
  618. .open = ftrace_event_seq_open,
  619. .read = seq_read,
  620. .write = ftrace_event_write,
  621. .llseek = seq_lseek,
  622. .release = seq_release,
  623. };
  624. static const struct file_operations ftrace_enable_fops = {
  625. .open = tracing_open_generic,
  626. .read = event_enable_read,
  627. .write = event_enable_write,
  628. };
  629. static const struct file_operations ftrace_event_format_fops = {
  630. .open = tracing_open_generic,
  631. .read = event_format_read,
  632. };
  633. static const struct file_operations ftrace_event_id_fops = {
  634. .open = tracing_open_generic,
  635. .read = event_id_read,
  636. };
  637. static const struct file_operations ftrace_event_filter_fops = {
  638. .open = tracing_open_generic,
  639. .read = event_filter_read,
  640. .write = event_filter_write,
  641. };
  642. static const struct file_operations ftrace_subsystem_filter_fops = {
  643. .open = tracing_open_generic,
  644. .read = subsystem_filter_read,
  645. .write = subsystem_filter_write,
  646. };
  647. static const struct file_operations ftrace_system_enable_fops = {
  648. .open = tracing_open_generic,
  649. .read = system_enable_read,
  650. .write = system_enable_write,
  651. };
  652. static const struct file_operations ftrace_show_header_fops = {
  653. .open = tracing_open_generic,
  654. .read = show_header,
  655. };
  656. static struct dentry *event_trace_events_dir(void)
  657. {
  658. static struct dentry *d_tracer;
  659. static struct dentry *d_events;
  660. if (d_events)
  661. return d_events;
  662. d_tracer = tracing_init_dentry();
  663. if (!d_tracer)
  664. return NULL;
  665. d_events = debugfs_create_dir("events", d_tracer);
  666. if (!d_events)
  667. pr_warning("Could not create debugfs "
  668. "'events' directory\n");
  669. return d_events;
  670. }
  671. static LIST_HEAD(event_subsystems);
  672. static struct dentry *
  673. event_subsystem_dir(const char *name, struct dentry *d_events)
  674. {
  675. struct event_subsystem *system;
  676. struct dentry *entry;
  677. /* First see if we did not already create this dir */
  678. list_for_each_entry(system, &event_subsystems, list) {
  679. if (strcmp(system->name, name) == 0)
  680. return system->entry;
  681. }
  682. /* need to create new entry */
  683. system = kmalloc(sizeof(*system), GFP_KERNEL);
  684. if (!system) {
  685. pr_warning("No memory to create event subsystem %s\n",
  686. name);
  687. return d_events;
  688. }
  689. system->entry = debugfs_create_dir(name, d_events);
  690. if (!system->entry) {
  691. pr_warning("Could not create event subsystem %s\n",
  692. name);
  693. kfree(system);
  694. return d_events;
  695. }
  696. system->name = kstrdup(name, GFP_KERNEL);
  697. if (!system->name) {
  698. debugfs_remove(system->entry);
  699. kfree(system);
  700. return d_events;
  701. }
  702. list_add(&system->list, &event_subsystems);
  703. system->filter = NULL;
  704. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  705. if (!system->filter) {
  706. pr_warning("Could not allocate filter for subsystem "
  707. "'%s'\n", name);
  708. return system->entry;
  709. }
  710. entry = debugfs_create_file("filter", 0644, system->entry, system,
  711. &ftrace_subsystem_filter_fops);
  712. if (!entry) {
  713. kfree(system->filter);
  714. system->filter = NULL;
  715. pr_warning("Could not create debugfs "
  716. "'%s/filter' entry\n", name);
  717. }
  718. entry = trace_create_file("enable", 0644, system->entry,
  719. (void *)system->name,
  720. &ftrace_system_enable_fops);
  721. return system->entry;
  722. }
  723. static int
  724. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  725. const struct file_operations *id,
  726. const struct file_operations *enable,
  727. const struct file_operations *filter,
  728. const struct file_operations *format)
  729. {
  730. struct dentry *entry;
  731. int ret;
  732. /*
  733. * If the trace point header did not define TRACE_SYSTEM
  734. * then the system would be called "TRACE_SYSTEM".
  735. */
  736. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  737. d_events = event_subsystem_dir(call->system, d_events);
  738. if (call->raw_init) {
  739. ret = call->raw_init();
  740. if (ret < 0) {
  741. pr_warning("Could not initialize trace point"
  742. " events/%s\n", call->name);
  743. return ret;
  744. }
  745. }
  746. call->dir = debugfs_create_dir(call->name, d_events);
  747. if (!call->dir) {
  748. pr_warning("Could not create debugfs "
  749. "'%s' directory\n", call->name);
  750. return -1;
  751. }
  752. if (call->regfunc)
  753. entry = trace_create_file("enable", 0644, call->dir, call,
  754. enable);
  755. if (call->id)
  756. entry = trace_create_file("id", 0444, call->dir, call,
  757. id);
  758. if (call->define_fields) {
  759. ret = call->define_fields();
  760. if (ret < 0) {
  761. pr_warning("Could not initialize trace point"
  762. " events/%s\n", call->name);
  763. return ret;
  764. }
  765. entry = trace_create_file("filter", 0644, call->dir, call,
  766. filter);
  767. }
  768. /* A trace may not want to export its format */
  769. if (!call->show_format)
  770. return 0;
  771. entry = trace_create_file("format", 0444, call->dir, call,
  772. format);
  773. return 0;
  774. }
  775. #define for_each_event(event, start, end) \
  776. for (event = start; \
  777. (unsigned long)event < (unsigned long)end; \
  778. event++)
  779. #ifdef CONFIG_MODULES
  780. static LIST_HEAD(ftrace_module_file_list);
  781. /*
  782. * Modules must own their file_operations to keep up with
  783. * reference counting.
  784. */
  785. struct ftrace_module_file_ops {
  786. struct list_head list;
  787. struct module *mod;
  788. struct file_operations id;
  789. struct file_operations enable;
  790. struct file_operations format;
  791. struct file_operations filter;
  792. };
  793. static struct ftrace_module_file_ops *
  794. trace_create_file_ops(struct module *mod)
  795. {
  796. struct ftrace_module_file_ops *file_ops;
  797. /*
  798. * This is a bit of a PITA. To allow for correct reference
  799. * counting, modules must "own" their file_operations.
  800. * To do this, we allocate the file operations that will be
  801. * used in the event directory.
  802. */
  803. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  804. if (!file_ops)
  805. return NULL;
  806. file_ops->mod = mod;
  807. file_ops->id = ftrace_event_id_fops;
  808. file_ops->id.owner = mod;
  809. file_ops->enable = ftrace_enable_fops;
  810. file_ops->enable.owner = mod;
  811. file_ops->filter = ftrace_event_filter_fops;
  812. file_ops->filter.owner = mod;
  813. file_ops->format = ftrace_event_format_fops;
  814. file_ops->format.owner = mod;
  815. list_add(&file_ops->list, &ftrace_module_file_list);
  816. return file_ops;
  817. }
  818. static void trace_module_add_events(struct module *mod)
  819. {
  820. struct ftrace_module_file_ops *file_ops = NULL;
  821. struct ftrace_event_call *call, *start, *end;
  822. struct dentry *d_events;
  823. start = mod->trace_events;
  824. end = mod->trace_events + mod->num_trace_events;
  825. if (start == end)
  826. return;
  827. d_events = event_trace_events_dir();
  828. if (!d_events)
  829. return;
  830. for_each_event(call, start, end) {
  831. /* The linker may leave blanks */
  832. if (!call->name)
  833. continue;
  834. /*
  835. * This module has events, create file ops for this module
  836. * if not already done.
  837. */
  838. if (!file_ops) {
  839. file_ops = trace_create_file_ops(mod);
  840. if (!file_ops)
  841. return;
  842. }
  843. call->mod = mod;
  844. list_add(&call->list, &ftrace_events);
  845. event_create_dir(call, d_events,
  846. &file_ops->id, &file_ops->enable,
  847. &file_ops->filter, &file_ops->format);
  848. }
  849. }
  850. static void trace_module_remove_events(struct module *mod)
  851. {
  852. struct ftrace_module_file_ops *file_ops;
  853. struct ftrace_event_call *call, *p;
  854. bool found = false;
  855. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  856. if (call->mod == mod) {
  857. found = true;
  858. if (call->enabled) {
  859. call->enabled = 0;
  860. call->unregfunc();
  861. }
  862. if (call->event)
  863. unregister_ftrace_event(call->event);
  864. debugfs_remove_recursive(call->dir);
  865. list_del(&call->list);
  866. trace_destroy_fields(call);
  867. destroy_preds(call);
  868. }
  869. }
  870. /* Now free the file_operations */
  871. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  872. if (file_ops->mod == mod)
  873. break;
  874. }
  875. if (&file_ops->list != &ftrace_module_file_list) {
  876. list_del(&file_ops->list);
  877. kfree(file_ops);
  878. }
  879. /*
  880. * It is safest to reset the ring buffer if the module being unloaded
  881. * registered any events.
  882. */
  883. if (found)
  884. tracing_reset_current_online_cpus();
  885. }
  886. static int trace_module_notify(struct notifier_block *self,
  887. unsigned long val, void *data)
  888. {
  889. struct module *mod = data;
  890. mutex_lock(&event_mutex);
  891. switch (val) {
  892. case MODULE_STATE_COMING:
  893. trace_module_add_events(mod);
  894. break;
  895. case MODULE_STATE_GOING:
  896. trace_module_remove_events(mod);
  897. break;
  898. }
  899. mutex_unlock(&event_mutex);
  900. return 0;
  901. }
  902. #else
  903. static int trace_module_notify(struct notifier_block *self,
  904. unsigned long val, void *data)
  905. {
  906. return 0;
  907. }
  908. #endif /* CONFIG_MODULES */
  909. struct notifier_block trace_module_nb = {
  910. .notifier_call = trace_module_notify,
  911. .priority = 0,
  912. };
  913. extern struct ftrace_event_call __start_ftrace_events[];
  914. extern struct ftrace_event_call __stop_ftrace_events[];
  915. static __init int event_trace_init(void)
  916. {
  917. struct ftrace_event_call *call;
  918. struct dentry *d_tracer;
  919. struct dentry *entry;
  920. struct dentry *d_events;
  921. int ret;
  922. d_tracer = tracing_init_dentry();
  923. if (!d_tracer)
  924. return 0;
  925. entry = debugfs_create_file("available_events", 0444, d_tracer,
  926. (void *)&show_event_seq_ops,
  927. &ftrace_avail_fops);
  928. if (!entry)
  929. pr_warning("Could not create debugfs "
  930. "'available_events' entry\n");
  931. entry = debugfs_create_file("set_event", 0644, d_tracer,
  932. (void *)&show_set_event_seq_ops,
  933. &ftrace_set_event_fops);
  934. if (!entry)
  935. pr_warning("Could not create debugfs "
  936. "'set_event' entry\n");
  937. d_events = event_trace_events_dir();
  938. if (!d_events)
  939. return 0;
  940. /* ring buffer internal formats */
  941. trace_create_file("header_page", 0444, d_events,
  942. ring_buffer_print_page_header,
  943. &ftrace_show_header_fops);
  944. trace_create_file("header_event", 0444, d_events,
  945. ring_buffer_print_entry_header,
  946. &ftrace_show_header_fops);
  947. trace_create_file("enable", 0644, d_events,
  948. "*", &ftrace_system_enable_fops);
  949. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  950. /* The linker may leave blanks */
  951. if (!call->name)
  952. continue;
  953. list_add(&call->list, &ftrace_events);
  954. event_create_dir(call, d_events, &ftrace_event_id_fops,
  955. &ftrace_enable_fops, &ftrace_event_filter_fops,
  956. &ftrace_event_format_fops);
  957. }
  958. ret = register_module_notifier(&trace_module_nb);
  959. if (!ret)
  960. pr_warning("Failed to register trace events module notifier\n");
  961. return 0;
  962. }
  963. fs_initcall(event_trace_init);
  964. #ifdef CONFIG_FTRACE_STARTUP_TEST
  965. static DEFINE_SPINLOCK(test_spinlock);
  966. static DEFINE_SPINLOCK(test_spinlock_irq);
  967. static DEFINE_MUTEX(test_mutex);
  968. static __init void test_work(struct work_struct *dummy)
  969. {
  970. spin_lock(&test_spinlock);
  971. spin_lock_irq(&test_spinlock_irq);
  972. udelay(1);
  973. spin_unlock_irq(&test_spinlock_irq);
  974. spin_unlock(&test_spinlock);
  975. mutex_lock(&test_mutex);
  976. msleep(1);
  977. mutex_unlock(&test_mutex);
  978. }
  979. static __init int event_test_thread(void *unused)
  980. {
  981. void *test_malloc;
  982. test_malloc = kmalloc(1234, GFP_KERNEL);
  983. if (!test_malloc)
  984. pr_info("failed to kmalloc\n");
  985. schedule_on_each_cpu(test_work);
  986. kfree(test_malloc);
  987. set_current_state(TASK_INTERRUPTIBLE);
  988. while (!kthread_should_stop())
  989. schedule();
  990. return 0;
  991. }
  992. /*
  993. * Do various things that may trigger events.
  994. */
  995. static __init void event_test_stuff(void)
  996. {
  997. struct task_struct *test_thread;
  998. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  999. msleep(1);
  1000. kthread_stop(test_thread);
  1001. }
  1002. /*
  1003. * For every trace event defined, we will test each trace point separately,
  1004. * and then by groups, and finally all trace points.
  1005. */
  1006. static __init void event_trace_self_tests(void)
  1007. {
  1008. struct ftrace_event_call *call;
  1009. struct event_subsystem *system;
  1010. char *sysname;
  1011. int ret;
  1012. pr_info("Running tests on trace events:\n");
  1013. list_for_each_entry(call, &ftrace_events, list) {
  1014. /* Only test those that have a regfunc */
  1015. if (!call->regfunc)
  1016. continue;
  1017. pr_info("Testing event %s: ", call->name);
  1018. /*
  1019. * If an event is already enabled, someone is using
  1020. * it and the self test should not be on.
  1021. */
  1022. if (call->enabled) {
  1023. pr_warning("Enabled event during self test!\n");
  1024. WARN_ON_ONCE(1);
  1025. continue;
  1026. }
  1027. call->enabled = 1;
  1028. call->regfunc();
  1029. event_test_stuff();
  1030. call->unregfunc();
  1031. call->enabled = 0;
  1032. pr_cont("OK\n");
  1033. }
  1034. /* Now test at the sub system level */
  1035. pr_info("Running tests on trace event systems:\n");
  1036. list_for_each_entry(system, &event_subsystems, list) {
  1037. /* the ftrace system is special, skip it */
  1038. if (strcmp(system->name, "ftrace") == 0)
  1039. continue;
  1040. pr_info("Testing event system %s: ", system->name);
  1041. /* ftrace_set_clr_event can modify the name passed in. */
  1042. sysname = kstrdup(system->name, GFP_KERNEL);
  1043. if (WARN_ON(!sysname)) {
  1044. pr_warning("Can't allocate memory, giving up!\n");
  1045. return;
  1046. }
  1047. ret = ftrace_set_clr_event(sysname, 1);
  1048. kfree(sysname);
  1049. if (WARN_ON_ONCE(ret)) {
  1050. pr_warning("error enabling system %s\n",
  1051. system->name);
  1052. continue;
  1053. }
  1054. event_test_stuff();
  1055. sysname = kstrdup(system->name, GFP_KERNEL);
  1056. if (WARN_ON(!sysname)) {
  1057. pr_warning("Can't allocate memory, giving up!\n");
  1058. return;
  1059. }
  1060. ret = ftrace_set_clr_event(sysname, 0);
  1061. kfree(sysname);
  1062. if (WARN_ON_ONCE(ret))
  1063. pr_warning("error disabling system %s\n",
  1064. system->name);
  1065. pr_cont("OK\n");
  1066. }
  1067. /* Test with all events enabled */
  1068. pr_info("Running tests on all trace events:\n");
  1069. pr_info("Testing all events: ");
  1070. sysname = kmalloc(4, GFP_KERNEL);
  1071. if (WARN_ON(!sysname)) {
  1072. pr_warning("Can't allocate memory, giving up!\n");
  1073. return;
  1074. }
  1075. memcpy(sysname, "*:*", 4);
  1076. ret = ftrace_set_clr_event(sysname, 1);
  1077. if (WARN_ON_ONCE(ret)) {
  1078. kfree(sysname);
  1079. pr_warning("error enabling all events\n");
  1080. return;
  1081. }
  1082. event_test_stuff();
  1083. /* reset sysname */
  1084. memcpy(sysname, "*:*", 4);
  1085. ret = ftrace_set_clr_event(sysname, 0);
  1086. kfree(sysname);
  1087. if (WARN_ON_ONCE(ret)) {
  1088. pr_warning("error disabling all events\n");
  1089. return;
  1090. }
  1091. pr_cont("OK\n");
  1092. }
  1093. #ifdef CONFIG_FUNCTION_TRACER
  1094. static DEFINE_PER_CPU(atomic_t, test_event_disable);
  1095. static void
  1096. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1097. {
  1098. struct ring_buffer_event *event;
  1099. struct ftrace_entry *entry;
  1100. unsigned long flags;
  1101. long disabled;
  1102. int resched;
  1103. int cpu;
  1104. int pc;
  1105. pc = preempt_count();
  1106. resched = ftrace_preempt_disable();
  1107. cpu = raw_smp_processor_id();
  1108. disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
  1109. if (disabled != 1)
  1110. goto out;
  1111. local_save_flags(flags);
  1112. event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
  1113. flags, pc);
  1114. if (!event)
  1115. goto out;
  1116. entry = ring_buffer_event_data(event);
  1117. entry->ip = ip;
  1118. entry->parent_ip = parent_ip;
  1119. trace_nowake_buffer_unlock_commit(event, flags, pc);
  1120. out:
  1121. atomic_dec(&per_cpu(test_event_disable, cpu));
  1122. ftrace_preempt_enable(resched);
  1123. }
  1124. static struct ftrace_ops trace_ops __initdata =
  1125. {
  1126. .func = function_test_events_call,
  1127. };
  1128. static __init void event_trace_self_test_with_function(void)
  1129. {
  1130. register_ftrace_function(&trace_ops);
  1131. pr_info("Running tests again, along with the function tracer\n");
  1132. event_trace_self_tests();
  1133. unregister_ftrace_function(&trace_ops);
  1134. }
  1135. #else
  1136. static __init void event_trace_self_test_with_function(void)
  1137. {
  1138. }
  1139. #endif
  1140. static __init int event_trace_self_tests_init(void)
  1141. {
  1142. event_trace_self_tests();
  1143. event_trace_self_test_with_function();
  1144. return 0;
  1145. }
  1146. late_initcall(event_trace_self_tests_init);
  1147. #endif