trace_events.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include "trace_output.h"
  19. #define TRACE_SYSTEM "TRACE_SYSTEM"
  20. DEFINE_MUTEX(event_mutex);
  21. LIST_HEAD(ftrace_events);
  22. int trace_define_field(struct ftrace_event_call *call, char *type,
  23. char *name, int offset, int size, int is_signed)
  24. {
  25. struct ftrace_event_field *field;
  26. field = kzalloc(sizeof(*field), GFP_KERNEL);
  27. if (!field)
  28. goto err;
  29. field->name = kstrdup(name, GFP_KERNEL);
  30. if (!field->name)
  31. goto err;
  32. field->type = kstrdup(type, GFP_KERNEL);
  33. if (!field->type)
  34. goto err;
  35. field->offset = offset;
  36. field->size = size;
  37. field->is_signed = is_signed;
  38. list_add(&field->link, &call->fields);
  39. return 0;
  40. err:
  41. if (field) {
  42. kfree(field->name);
  43. kfree(field->type);
  44. }
  45. kfree(field);
  46. return -ENOMEM;
  47. }
  48. EXPORT_SYMBOL_GPL(trace_define_field);
  49. #ifdef CONFIG_MODULES
  50. static void trace_destroy_fields(struct ftrace_event_call *call)
  51. {
  52. struct ftrace_event_field *field, *next;
  53. list_for_each_entry_safe(field, next, &call->fields, link) {
  54. list_del(&field->link);
  55. kfree(field->type);
  56. kfree(field->name);
  57. kfree(field);
  58. }
  59. }
  60. #endif /* CONFIG_MODULES */
  61. static void ftrace_clear_events(void)
  62. {
  63. struct ftrace_event_call *call;
  64. mutex_lock(&event_mutex);
  65. list_for_each_entry(call, &ftrace_events, list) {
  66. if (call->enabled) {
  67. call->enabled = 0;
  68. call->unregfunc();
  69. }
  70. }
  71. mutex_unlock(&event_mutex);
  72. }
  73. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  74. int enable)
  75. {
  76. switch (enable) {
  77. case 0:
  78. if (call->enabled) {
  79. call->enabled = 0;
  80. call->unregfunc();
  81. }
  82. break;
  83. case 1:
  84. if (!call->enabled) {
  85. call->enabled = 1;
  86. call->regfunc();
  87. }
  88. break;
  89. }
  90. }
  91. /*
  92. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  93. */
  94. static int __ftrace_set_clr_event(const char *match, const char *sub,
  95. const char *event, int set)
  96. {
  97. struct ftrace_event_call *call;
  98. int ret = -EINVAL;
  99. mutex_lock(&event_mutex);
  100. list_for_each_entry(call, &ftrace_events, list) {
  101. if (!call->name || !call->regfunc)
  102. continue;
  103. if (match &&
  104. strcmp(match, call->name) != 0 &&
  105. strcmp(match, call->system) != 0)
  106. continue;
  107. if (sub && strcmp(sub, call->system) != 0)
  108. continue;
  109. if (event && strcmp(event, call->name) != 0)
  110. continue;
  111. ftrace_event_enable_disable(call, set);
  112. ret = 0;
  113. }
  114. mutex_unlock(&event_mutex);
  115. return ret;
  116. }
  117. static int ftrace_set_clr_event(char *buf, int set)
  118. {
  119. char *event = NULL, *sub = NULL, *match;
  120. /*
  121. * The buf format can be <subsystem>:<event-name>
  122. * *:<event-name> means any event by that name.
  123. * :<event-name> is the same.
  124. *
  125. * <subsystem>:* means all events in that subsystem
  126. * <subsystem>: means the same.
  127. *
  128. * <name> (no ':') means all events in a subsystem with
  129. * the name <name> or any event that matches <name>
  130. */
  131. match = strsep(&buf, ":");
  132. if (buf) {
  133. sub = match;
  134. event = buf;
  135. match = NULL;
  136. if (!strlen(sub) || strcmp(sub, "*") == 0)
  137. sub = NULL;
  138. if (!strlen(event) || strcmp(event, "*") == 0)
  139. event = NULL;
  140. }
  141. return __ftrace_set_clr_event(match, sub, event, set);
  142. }
  143. /**
  144. * trace_set_clr_event - enable or disable an event
  145. * @system: system name to match (NULL for any system)
  146. * @event: event name to match (NULL for all events, within system)
  147. * @set: 1 to enable, 0 to disable
  148. *
  149. * This is a way for other parts of the kernel to enable or disable
  150. * event recording.
  151. *
  152. * Returns 0 on success, -EINVAL if the parameters do not match any
  153. * registered events.
  154. */
  155. int trace_set_clr_event(const char *system, const char *event, int set)
  156. {
  157. return __ftrace_set_clr_event(NULL, system, event, set);
  158. }
  159. /* 128 should be much more than enough */
  160. #define EVENT_BUF_SIZE 127
  161. static ssize_t
  162. ftrace_event_write(struct file *file, const char __user *ubuf,
  163. size_t cnt, loff_t *ppos)
  164. {
  165. size_t read = 0;
  166. int i, set = 1;
  167. ssize_t ret;
  168. char *buf;
  169. char ch;
  170. if (!cnt || cnt < 0)
  171. return 0;
  172. ret = tracing_update_buffers();
  173. if (ret < 0)
  174. return ret;
  175. ret = get_user(ch, ubuf++);
  176. if (ret)
  177. return ret;
  178. read++;
  179. cnt--;
  180. /* skip white space */
  181. while (cnt && isspace(ch)) {
  182. ret = get_user(ch, ubuf++);
  183. if (ret)
  184. return ret;
  185. read++;
  186. cnt--;
  187. }
  188. /* Only white space found? */
  189. if (isspace(ch)) {
  190. file->f_pos += read;
  191. ret = read;
  192. return ret;
  193. }
  194. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  195. if (!buf)
  196. return -ENOMEM;
  197. if (cnt > EVENT_BUF_SIZE)
  198. cnt = EVENT_BUF_SIZE;
  199. i = 0;
  200. while (cnt && !isspace(ch)) {
  201. if (!i && ch == '!')
  202. set = 0;
  203. else
  204. buf[i++] = ch;
  205. ret = get_user(ch, ubuf++);
  206. if (ret)
  207. goto out_free;
  208. read++;
  209. cnt--;
  210. }
  211. buf[i] = 0;
  212. file->f_pos += read;
  213. ret = ftrace_set_clr_event(buf, set);
  214. if (ret)
  215. goto out_free;
  216. ret = read;
  217. out_free:
  218. kfree(buf);
  219. return ret;
  220. }
  221. static void *
  222. t_next(struct seq_file *m, void *v, loff_t *pos)
  223. {
  224. struct list_head *list = m->private;
  225. struct ftrace_event_call *call;
  226. (*pos)++;
  227. for (;;) {
  228. if (list == &ftrace_events)
  229. return NULL;
  230. call = list_entry(list, struct ftrace_event_call, list);
  231. /*
  232. * The ftrace subsystem is for showing formats only.
  233. * They can not be enabled or disabled via the event files.
  234. */
  235. if (call->regfunc)
  236. break;
  237. list = list->next;
  238. }
  239. m->private = list->next;
  240. return call;
  241. }
  242. static void *t_start(struct seq_file *m, loff_t *pos)
  243. {
  244. mutex_lock(&event_mutex);
  245. if (*pos == 0)
  246. m->private = ftrace_events.next;
  247. return t_next(m, NULL, pos);
  248. }
  249. static void *
  250. s_next(struct seq_file *m, void *v, loff_t *pos)
  251. {
  252. struct list_head *list = m->private;
  253. struct ftrace_event_call *call;
  254. (*pos)++;
  255. retry:
  256. if (list == &ftrace_events)
  257. return NULL;
  258. call = list_entry(list, struct ftrace_event_call, list);
  259. if (!call->enabled) {
  260. list = list->next;
  261. goto retry;
  262. }
  263. m->private = list->next;
  264. return call;
  265. }
  266. static void *s_start(struct seq_file *m, loff_t *pos)
  267. {
  268. mutex_lock(&event_mutex);
  269. if (*pos == 0)
  270. m->private = ftrace_events.next;
  271. return s_next(m, NULL, pos);
  272. }
  273. static int t_show(struct seq_file *m, void *v)
  274. {
  275. struct ftrace_event_call *call = v;
  276. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  277. seq_printf(m, "%s:", call->system);
  278. seq_printf(m, "%s\n", call->name);
  279. return 0;
  280. }
  281. static void t_stop(struct seq_file *m, void *p)
  282. {
  283. mutex_unlock(&event_mutex);
  284. }
  285. static int
  286. ftrace_event_seq_open(struct inode *inode, struct file *file)
  287. {
  288. const struct seq_operations *seq_ops;
  289. if ((file->f_mode & FMODE_WRITE) &&
  290. !(file->f_flags & O_APPEND))
  291. ftrace_clear_events();
  292. seq_ops = inode->i_private;
  293. return seq_open(file, seq_ops);
  294. }
  295. static ssize_t
  296. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  297. loff_t *ppos)
  298. {
  299. struct ftrace_event_call *call = filp->private_data;
  300. char *buf;
  301. if (call->enabled)
  302. buf = "1\n";
  303. else
  304. buf = "0\n";
  305. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  306. }
  307. static ssize_t
  308. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  309. loff_t *ppos)
  310. {
  311. struct ftrace_event_call *call = filp->private_data;
  312. char buf[64];
  313. unsigned long val;
  314. int ret;
  315. if (cnt >= sizeof(buf))
  316. return -EINVAL;
  317. if (copy_from_user(&buf, ubuf, cnt))
  318. return -EFAULT;
  319. buf[cnt] = 0;
  320. ret = strict_strtoul(buf, 10, &val);
  321. if (ret < 0)
  322. return ret;
  323. ret = tracing_update_buffers();
  324. if (ret < 0)
  325. return ret;
  326. switch (val) {
  327. case 0:
  328. case 1:
  329. mutex_lock(&event_mutex);
  330. ftrace_event_enable_disable(call, val);
  331. mutex_unlock(&event_mutex);
  332. break;
  333. default:
  334. return -EINVAL;
  335. }
  336. *ppos += cnt;
  337. return cnt;
  338. }
  339. static ssize_t
  340. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  341. loff_t *ppos)
  342. {
  343. const char set_to_char[4] = { '?', '0', '1', 'X' };
  344. const char *system = filp->private_data;
  345. struct ftrace_event_call *call;
  346. char buf[2];
  347. int set = 0;
  348. int ret;
  349. mutex_lock(&event_mutex);
  350. list_for_each_entry(call, &ftrace_events, list) {
  351. if (!call->name || !call->regfunc)
  352. continue;
  353. if (system && strcmp(call->system, system) != 0)
  354. continue;
  355. /*
  356. * We need to find out if all the events are set
  357. * or if all events or cleared, or if we have
  358. * a mixture.
  359. */
  360. set |= (1 << !!call->enabled);
  361. /*
  362. * If we have a mixture, no need to look further.
  363. */
  364. if (set == 3)
  365. break;
  366. }
  367. mutex_unlock(&event_mutex);
  368. buf[0] = set_to_char[set];
  369. buf[1] = '\n';
  370. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  371. return ret;
  372. }
  373. static ssize_t
  374. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  375. loff_t *ppos)
  376. {
  377. const char *system = filp->private_data;
  378. unsigned long val;
  379. char buf[64];
  380. ssize_t ret;
  381. if (cnt >= sizeof(buf))
  382. return -EINVAL;
  383. if (copy_from_user(&buf, ubuf, cnt))
  384. return -EFAULT;
  385. buf[cnt] = 0;
  386. ret = strict_strtoul(buf, 10, &val);
  387. if (ret < 0)
  388. return ret;
  389. ret = tracing_update_buffers();
  390. if (ret < 0)
  391. return ret;
  392. if (val != 0 && val != 1)
  393. return -EINVAL;
  394. ret = __ftrace_set_clr_event(NULL, system, NULL, val);
  395. if (ret)
  396. goto out;
  397. ret = cnt;
  398. out:
  399. *ppos += cnt;
  400. return ret;
  401. }
  402. extern char *__bad_type_size(void);
  403. #undef FIELD
  404. #define FIELD(type, name) \
  405. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  406. #type, "common_" #name, offsetof(typeof(field), name), \
  407. sizeof(field.name)
  408. static int trace_write_header(struct trace_seq *s)
  409. {
  410. struct trace_entry field;
  411. /* struct trace_entry */
  412. return trace_seq_printf(s,
  413. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  414. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  415. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  416. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  417. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  418. "\n",
  419. FIELD(unsigned short, type),
  420. FIELD(unsigned char, flags),
  421. FIELD(unsigned char, preempt_count),
  422. FIELD(int, pid),
  423. FIELD(int, tgid));
  424. }
  425. static ssize_t
  426. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  427. loff_t *ppos)
  428. {
  429. struct ftrace_event_call *call = filp->private_data;
  430. struct trace_seq *s;
  431. char *buf;
  432. int r;
  433. if (*ppos)
  434. return 0;
  435. s = kmalloc(sizeof(*s), GFP_KERNEL);
  436. if (!s)
  437. return -ENOMEM;
  438. trace_seq_init(s);
  439. /* If any of the first writes fail, so will the show_format. */
  440. trace_seq_printf(s, "name: %s\n", call->name);
  441. trace_seq_printf(s, "ID: %d\n", call->id);
  442. trace_seq_printf(s, "format:\n");
  443. trace_write_header(s);
  444. r = call->show_format(s);
  445. if (!r) {
  446. /*
  447. * ug! The format output is bigger than a PAGE!!
  448. */
  449. buf = "FORMAT TOO BIG\n";
  450. r = simple_read_from_buffer(ubuf, cnt, ppos,
  451. buf, strlen(buf));
  452. goto out;
  453. }
  454. r = simple_read_from_buffer(ubuf, cnt, ppos,
  455. s->buffer, s->len);
  456. out:
  457. kfree(s);
  458. return r;
  459. }
  460. static ssize_t
  461. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  462. {
  463. struct ftrace_event_call *call = filp->private_data;
  464. struct trace_seq *s;
  465. int r;
  466. if (*ppos)
  467. return 0;
  468. s = kmalloc(sizeof(*s), GFP_KERNEL);
  469. if (!s)
  470. return -ENOMEM;
  471. trace_seq_init(s);
  472. trace_seq_printf(s, "%d\n", call->id);
  473. r = simple_read_from_buffer(ubuf, cnt, ppos,
  474. s->buffer, s->len);
  475. kfree(s);
  476. return r;
  477. }
  478. static ssize_t
  479. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  480. loff_t *ppos)
  481. {
  482. struct ftrace_event_call *call = filp->private_data;
  483. struct trace_seq *s;
  484. int r;
  485. if (*ppos)
  486. return 0;
  487. s = kmalloc(sizeof(*s), GFP_KERNEL);
  488. if (!s)
  489. return -ENOMEM;
  490. trace_seq_init(s);
  491. print_event_filter(call, s);
  492. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  493. kfree(s);
  494. return r;
  495. }
  496. static ssize_t
  497. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  498. loff_t *ppos)
  499. {
  500. struct ftrace_event_call *call = filp->private_data;
  501. char *buf;
  502. int err;
  503. if (cnt >= PAGE_SIZE)
  504. return -EINVAL;
  505. buf = (char *)__get_free_page(GFP_TEMPORARY);
  506. if (!buf)
  507. return -ENOMEM;
  508. if (copy_from_user(buf, ubuf, cnt)) {
  509. free_page((unsigned long) buf);
  510. return -EFAULT;
  511. }
  512. buf[cnt] = '\0';
  513. err = apply_event_filter(call, buf);
  514. free_page((unsigned long) buf);
  515. if (err < 0)
  516. return err;
  517. *ppos += cnt;
  518. return cnt;
  519. }
  520. static ssize_t
  521. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  522. loff_t *ppos)
  523. {
  524. struct event_subsystem *system = filp->private_data;
  525. struct trace_seq *s;
  526. int r;
  527. if (*ppos)
  528. return 0;
  529. s = kmalloc(sizeof(*s), GFP_KERNEL);
  530. if (!s)
  531. return -ENOMEM;
  532. trace_seq_init(s);
  533. print_subsystem_event_filter(system, s);
  534. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  535. kfree(s);
  536. return r;
  537. }
  538. static ssize_t
  539. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  540. loff_t *ppos)
  541. {
  542. struct event_subsystem *system = filp->private_data;
  543. char *buf;
  544. int err;
  545. if (cnt >= PAGE_SIZE)
  546. return -EINVAL;
  547. buf = (char *)__get_free_page(GFP_TEMPORARY);
  548. if (!buf)
  549. return -ENOMEM;
  550. if (copy_from_user(buf, ubuf, cnt)) {
  551. free_page((unsigned long) buf);
  552. return -EFAULT;
  553. }
  554. buf[cnt] = '\0';
  555. err = apply_subsystem_event_filter(system, buf);
  556. free_page((unsigned long) buf);
  557. if (err < 0)
  558. return err;
  559. *ppos += cnt;
  560. return cnt;
  561. }
  562. static ssize_t
  563. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  564. {
  565. int (*func)(struct trace_seq *s) = filp->private_data;
  566. struct trace_seq *s;
  567. int r;
  568. if (*ppos)
  569. return 0;
  570. s = kmalloc(sizeof(*s), GFP_KERNEL);
  571. if (!s)
  572. return -ENOMEM;
  573. trace_seq_init(s);
  574. func(s);
  575. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  576. kfree(s);
  577. return r;
  578. }
  579. static const struct seq_operations show_event_seq_ops = {
  580. .start = t_start,
  581. .next = t_next,
  582. .show = t_show,
  583. .stop = t_stop,
  584. };
  585. static const struct seq_operations show_set_event_seq_ops = {
  586. .start = s_start,
  587. .next = s_next,
  588. .show = t_show,
  589. .stop = t_stop,
  590. };
  591. static const struct file_operations ftrace_avail_fops = {
  592. .open = ftrace_event_seq_open,
  593. .read = seq_read,
  594. .llseek = seq_lseek,
  595. .release = seq_release,
  596. };
  597. static const struct file_operations ftrace_set_event_fops = {
  598. .open = ftrace_event_seq_open,
  599. .read = seq_read,
  600. .write = ftrace_event_write,
  601. .llseek = seq_lseek,
  602. .release = seq_release,
  603. };
  604. static const struct file_operations ftrace_enable_fops = {
  605. .open = tracing_open_generic,
  606. .read = event_enable_read,
  607. .write = event_enable_write,
  608. };
  609. static const struct file_operations ftrace_event_format_fops = {
  610. .open = tracing_open_generic,
  611. .read = event_format_read,
  612. };
  613. static const struct file_operations ftrace_event_id_fops = {
  614. .open = tracing_open_generic,
  615. .read = event_id_read,
  616. };
  617. static const struct file_operations ftrace_event_filter_fops = {
  618. .open = tracing_open_generic,
  619. .read = event_filter_read,
  620. .write = event_filter_write,
  621. };
  622. static const struct file_operations ftrace_subsystem_filter_fops = {
  623. .open = tracing_open_generic,
  624. .read = subsystem_filter_read,
  625. .write = subsystem_filter_write,
  626. };
  627. static const struct file_operations ftrace_system_enable_fops = {
  628. .open = tracing_open_generic,
  629. .read = system_enable_read,
  630. .write = system_enable_write,
  631. };
  632. static const struct file_operations ftrace_show_header_fops = {
  633. .open = tracing_open_generic,
  634. .read = show_header,
  635. };
  636. static struct dentry *event_trace_events_dir(void)
  637. {
  638. static struct dentry *d_tracer;
  639. static struct dentry *d_events;
  640. if (d_events)
  641. return d_events;
  642. d_tracer = tracing_init_dentry();
  643. if (!d_tracer)
  644. return NULL;
  645. d_events = debugfs_create_dir("events", d_tracer);
  646. if (!d_events)
  647. pr_warning("Could not create debugfs "
  648. "'events' directory\n");
  649. return d_events;
  650. }
  651. static LIST_HEAD(event_subsystems);
  652. static struct dentry *
  653. event_subsystem_dir(const char *name, struct dentry *d_events)
  654. {
  655. struct event_subsystem *system;
  656. struct dentry *entry;
  657. /* First see if we did not already create this dir */
  658. list_for_each_entry(system, &event_subsystems, list) {
  659. if (strcmp(system->name, name) == 0)
  660. return system->entry;
  661. }
  662. /* need to create new entry */
  663. system = kmalloc(sizeof(*system), GFP_KERNEL);
  664. if (!system) {
  665. pr_warning("No memory to create event subsystem %s\n",
  666. name);
  667. return d_events;
  668. }
  669. system->entry = debugfs_create_dir(name, d_events);
  670. if (!system->entry) {
  671. pr_warning("Could not create event subsystem %s\n",
  672. name);
  673. kfree(system);
  674. return d_events;
  675. }
  676. system->name = kstrdup(name, GFP_KERNEL);
  677. if (!system->name) {
  678. debugfs_remove(system->entry);
  679. kfree(system);
  680. return d_events;
  681. }
  682. list_add(&system->list, &event_subsystems);
  683. system->filter = NULL;
  684. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  685. if (!system->filter) {
  686. pr_warning("Could not allocate filter for subsystem "
  687. "'%s'\n", name);
  688. return system->entry;
  689. }
  690. entry = debugfs_create_file("filter", 0644, system->entry, system,
  691. &ftrace_subsystem_filter_fops);
  692. if (!entry) {
  693. kfree(system->filter);
  694. system->filter = NULL;
  695. pr_warning("Could not create debugfs "
  696. "'%s/filter' entry\n", name);
  697. }
  698. entry = trace_create_file("enable", 0644, system->entry,
  699. (void *)system->name,
  700. &ftrace_system_enable_fops);
  701. return system->entry;
  702. }
  703. static int
  704. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  705. const struct file_operations *id,
  706. const struct file_operations *enable,
  707. const struct file_operations *filter,
  708. const struct file_operations *format)
  709. {
  710. struct dentry *entry;
  711. int ret;
  712. /*
  713. * If the trace point header did not define TRACE_SYSTEM
  714. * then the system would be called "TRACE_SYSTEM".
  715. */
  716. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  717. d_events = event_subsystem_dir(call->system, d_events);
  718. if (call->raw_init) {
  719. ret = call->raw_init();
  720. if (ret < 0) {
  721. pr_warning("Could not initialize trace point"
  722. " events/%s\n", call->name);
  723. return ret;
  724. }
  725. }
  726. call->dir = debugfs_create_dir(call->name, d_events);
  727. if (!call->dir) {
  728. pr_warning("Could not create debugfs "
  729. "'%s' directory\n", call->name);
  730. return -1;
  731. }
  732. if (call->regfunc)
  733. entry = trace_create_file("enable", 0644, call->dir, call,
  734. enable);
  735. if (call->id)
  736. entry = trace_create_file("id", 0444, call->dir, call,
  737. id);
  738. if (call->define_fields) {
  739. ret = call->define_fields();
  740. if (ret < 0) {
  741. pr_warning("Could not initialize trace point"
  742. " events/%s\n", call->name);
  743. return ret;
  744. }
  745. entry = trace_create_file("filter", 0644, call->dir, call,
  746. filter);
  747. }
  748. /* A trace may not want to export its format */
  749. if (!call->show_format)
  750. return 0;
  751. entry = trace_create_file("format", 0444, call->dir, call,
  752. format);
  753. return 0;
  754. }
  755. #define for_each_event(event, start, end) \
  756. for (event = start; \
  757. (unsigned long)event < (unsigned long)end; \
  758. event++)
  759. #ifdef CONFIG_MODULES
  760. static LIST_HEAD(ftrace_module_file_list);
  761. /*
  762. * Modules must own their file_operations to keep up with
  763. * reference counting.
  764. */
  765. struct ftrace_module_file_ops {
  766. struct list_head list;
  767. struct module *mod;
  768. struct file_operations id;
  769. struct file_operations enable;
  770. struct file_operations format;
  771. struct file_operations filter;
  772. };
  773. static struct ftrace_module_file_ops *
  774. trace_create_file_ops(struct module *mod)
  775. {
  776. struct ftrace_module_file_ops *file_ops;
  777. /*
  778. * This is a bit of a PITA. To allow for correct reference
  779. * counting, modules must "own" their file_operations.
  780. * To do this, we allocate the file operations that will be
  781. * used in the event directory.
  782. */
  783. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  784. if (!file_ops)
  785. return NULL;
  786. file_ops->mod = mod;
  787. file_ops->id = ftrace_event_id_fops;
  788. file_ops->id.owner = mod;
  789. file_ops->enable = ftrace_enable_fops;
  790. file_ops->enable.owner = mod;
  791. file_ops->filter = ftrace_event_filter_fops;
  792. file_ops->filter.owner = mod;
  793. file_ops->format = ftrace_event_format_fops;
  794. file_ops->format.owner = mod;
  795. list_add(&file_ops->list, &ftrace_module_file_list);
  796. return file_ops;
  797. }
  798. static void trace_module_add_events(struct module *mod)
  799. {
  800. struct ftrace_module_file_ops *file_ops = NULL;
  801. struct ftrace_event_call *call, *start, *end;
  802. struct dentry *d_events;
  803. start = mod->trace_events;
  804. end = mod->trace_events + mod->num_trace_events;
  805. if (start == end)
  806. return;
  807. d_events = event_trace_events_dir();
  808. if (!d_events)
  809. return;
  810. for_each_event(call, start, end) {
  811. /* The linker may leave blanks */
  812. if (!call->name)
  813. continue;
  814. /*
  815. * This module has events, create file ops for this module
  816. * if not already done.
  817. */
  818. if (!file_ops) {
  819. file_ops = trace_create_file_ops(mod);
  820. if (!file_ops)
  821. return;
  822. }
  823. call->mod = mod;
  824. list_add(&call->list, &ftrace_events);
  825. event_create_dir(call, d_events,
  826. &file_ops->id, &file_ops->enable,
  827. &file_ops->filter, &file_ops->format);
  828. }
  829. }
  830. static void trace_module_remove_events(struct module *mod)
  831. {
  832. struct ftrace_module_file_ops *file_ops;
  833. struct ftrace_event_call *call, *p;
  834. bool found = false;
  835. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  836. if (call->mod == mod) {
  837. found = true;
  838. if (call->enabled) {
  839. call->enabled = 0;
  840. call->unregfunc();
  841. }
  842. if (call->event)
  843. unregister_ftrace_event(call->event);
  844. debugfs_remove_recursive(call->dir);
  845. list_del(&call->list);
  846. trace_destroy_fields(call);
  847. destroy_preds(call);
  848. }
  849. }
  850. /* Now free the file_operations */
  851. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  852. if (file_ops->mod == mod)
  853. break;
  854. }
  855. if (&file_ops->list != &ftrace_module_file_list) {
  856. list_del(&file_ops->list);
  857. kfree(file_ops);
  858. }
  859. /*
  860. * It is safest to reset the ring buffer if the module being unloaded
  861. * registered any events.
  862. */
  863. if (found)
  864. tracing_reset_current_online_cpus();
  865. }
  866. static int trace_module_notify(struct notifier_block *self,
  867. unsigned long val, void *data)
  868. {
  869. struct module *mod = data;
  870. mutex_lock(&event_mutex);
  871. switch (val) {
  872. case MODULE_STATE_COMING:
  873. trace_module_add_events(mod);
  874. break;
  875. case MODULE_STATE_GOING:
  876. trace_module_remove_events(mod);
  877. break;
  878. }
  879. mutex_unlock(&event_mutex);
  880. return 0;
  881. }
  882. #else
  883. static int trace_module_notify(struct notifier_block *self,
  884. unsigned long val, void *data)
  885. {
  886. return 0;
  887. }
  888. #endif /* CONFIG_MODULES */
  889. struct notifier_block trace_module_nb = {
  890. .notifier_call = trace_module_notify,
  891. .priority = 0,
  892. };
  893. extern struct ftrace_event_call __start_ftrace_events[];
  894. extern struct ftrace_event_call __stop_ftrace_events[];
  895. static __init int event_trace_init(void)
  896. {
  897. struct ftrace_event_call *call;
  898. struct dentry *d_tracer;
  899. struct dentry *entry;
  900. struct dentry *d_events;
  901. int ret;
  902. d_tracer = tracing_init_dentry();
  903. if (!d_tracer)
  904. return 0;
  905. entry = debugfs_create_file("available_events", 0444, d_tracer,
  906. (void *)&show_event_seq_ops,
  907. &ftrace_avail_fops);
  908. if (!entry)
  909. pr_warning("Could not create debugfs "
  910. "'available_events' entry\n");
  911. entry = debugfs_create_file("set_event", 0644, d_tracer,
  912. (void *)&show_set_event_seq_ops,
  913. &ftrace_set_event_fops);
  914. if (!entry)
  915. pr_warning("Could not create debugfs "
  916. "'set_event' entry\n");
  917. d_events = event_trace_events_dir();
  918. if (!d_events)
  919. return 0;
  920. /* ring buffer internal formats */
  921. trace_create_file("header_page", 0444, d_events,
  922. ring_buffer_print_page_header,
  923. &ftrace_show_header_fops);
  924. trace_create_file("header_event", 0444, d_events,
  925. ring_buffer_print_entry_header,
  926. &ftrace_show_header_fops);
  927. trace_create_file("enable", 0644, d_events,
  928. NULL, &ftrace_system_enable_fops);
  929. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  930. /* The linker may leave blanks */
  931. if (!call->name)
  932. continue;
  933. list_add(&call->list, &ftrace_events);
  934. event_create_dir(call, d_events, &ftrace_event_id_fops,
  935. &ftrace_enable_fops, &ftrace_event_filter_fops,
  936. &ftrace_event_format_fops);
  937. }
  938. ret = register_module_notifier(&trace_module_nb);
  939. if (!ret)
  940. pr_warning("Failed to register trace events module notifier\n");
  941. return 0;
  942. }
  943. fs_initcall(event_trace_init);
  944. #ifdef CONFIG_FTRACE_STARTUP_TEST
  945. static DEFINE_SPINLOCK(test_spinlock);
  946. static DEFINE_SPINLOCK(test_spinlock_irq);
  947. static DEFINE_MUTEX(test_mutex);
  948. static __init void test_work(struct work_struct *dummy)
  949. {
  950. spin_lock(&test_spinlock);
  951. spin_lock_irq(&test_spinlock_irq);
  952. udelay(1);
  953. spin_unlock_irq(&test_spinlock_irq);
  954. spin_unlock(&test_spinlock);
  955. mutex_lock(&test_mutex);
  956. msleep(1);
  957. mutex_unlock(&test_mutex);
  958. }
  959. static __init int event_test_thread(void *unused)
  960. {
  961. void *test_malloc;
  962. test_malloc = kmalloc(1234, GFP_KERNEL);
  963. if (!test_malloc)
  964. pr_info("failed to kmalloc\n");
  965. schedule_on_each_cpu(test_work);
  966. kfree(test_malloc);
  967. set_current_state(TASK_INTERRUPTIBLE);
  968. while (!kthread_should_stop())
  969. schedule();
  970. return 0;
  971. }
  972. /*
  973. * Do various things that may trigger events.
  974. */
  975. static __init void event_test_stuff(void)
  976. {
  977. struct task_struct *test_thread;
  978. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  979. msleep(1);
  980. kthread_stop(test_thread);
  981. }
  982. /*
  983. * For every trace event defined, we will test each trace point separately,
  984. * and then by groups, and finally all trace points.
  985. */
  986. static __init void event_trace_self_tests(void)
  987. {
  988. struct ftrace_event_call *call;
  989. struct event_subsystem *system;
  990. int ret;
  991. pr_info("Running tests on trace events:\n");
  992. list_for_each_entry(call, &ftrace_events, list) {
  993. /* Only test those that have a regfunc */
  994. if (!call->regfunc)
  995. continue;
  996. pr_info("Testing event %s: ", call->name);
  997. /*
  998. * If an event is already enabled, someone is using
  999. * it and the self test should not be on.
  1000. */
  1001. if (call->enabled) {
  1002. pr_warning("Enabled event during self test!\n");
  1003. WARN_ON_ONCE(1);
  1004. continue;
  1005. }
  1006. call->enabled = 1;
  1007. call->regfunc();
  1008. event_test_stuff();
  1009. call->unregfunc();
  1010. call->enabled = 0;
  1011. pr_cont("OK\n");
  1012. }
  1013. /* Now test at the sub system level */
  1014. pr_info("Running tests on trace event systems:\n");
  1015. list_for_each_entry(system, &event_subsystems, list) {
  1016. /* the ftrace system is special, skip it */
  1017. if (strcmp(system->name, "ftrace") == 0)
  1018. continue;
  1019. pr_info("Testing event system %s: ", system->name);
  1020. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
  1021. if (WARN_ON_ONCE(ret)) {
  1022. pr_warning("error enabling system %s\n",
  1023. system->name);
  1024. continue;
  1025. }
  1026. event_test_stuff();
  1027. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
  1028. if (WARN_ON_ONCE(ret))
  1029. pr_warning("error disabling system %s\n",
  1030. system->name);
  1031. pr_cont("OK\n");
  1032. }
  1033. /* Test with all events enabled */
  1034. pr_info("Running tests on all trace events:\n");
  1035. pr_info("Testing all events: ");
  1036. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
  1037. if (WARN_ON_ONCE(ret)) {
  1038. pr_warning("error enabling all events\n");
  1039. return;
  1040. }
  1041. event_test_stuff();
  1042. /* reset sysname */
  1043. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
  1044. if (WARN_ON_ONCE(ret)) {
  1045. pr_warning("error disabling all events\n");
  1046. return;
  1047. }
  1048. pr_cont("OK\n");
  1049. }
  1050. #ifdef CONFIG_FUNCTION_TRACER
  1051. static DEFINE_PER_CPU(atomic_t, test_event_disable);
  1052. static void
  1053. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1054. {
  1055. struct ring_buffer_event *event;
  1056. struct ftrace_entry *entry;
  1057. unsigned long flags;
  1058. long disabled;
  1059. int resched;
  1060. int cpu;
  1061. int pc;
  1062. pc = preempt_count();
  1063. resched = ftrace_preempt_disable();
  1064. cpu = raw_smp_processor_id();
  1065. disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
  1066. if (disabled != 1)
  1067. goto out;
  1068. local_save_flags(flags);
  1069. event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry),
  1070. flags, pc);
  1071. if (!event)
  1072. goto out;
  1073. entry = ring_buffer_event_data(event);
  1074. entry->ip = ip;
  1075. entry->parent_ip = parent_ip;
  1076. trace_nowake_buffer_unlock_commit(event, flags, pc);
  1077. out:
  1078. atomic_dec(&per_cpu(test_event_disable, cpu));
  1079. ftrace_preempt_enable(resched);
  1080. }
  1081. static struct ftrace_ops trace_ops __initdata =
  1082. {
  1083. .func = function_test_events_call,
  1084. };
  1085. static __init void event_trace_self_test_with_function(void)
  1086. {
  1087. register_ftrace_function(&trace_ops);
  1088. pr_info("Running tests again, along with the function tracer\n");
  1089. event_trace_self_tests();
  1090. unregister_ftrace_function(&trace_ops);
  1091. }
  1092. #else
  1093. static __init void event_trace_self_test_with_function(void)
  1094. {
  1095. }
  1096. #endif
  1097. static __init int event_trace_self_tests_init(void)
  1098. {
  1099. event_trace_self_tests();
  1100. event_trace_self_test_with_function();
  1101. return 0;
  1102. }
  1103. late_initcall(event_trace_self_tests_init);
  1104. #endif