trace_events.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include <asm/setup.h>
  19. #include "trace_output.h"
  20. #undef TRACE_SYSTEM
  21. #define TRACE_SYSTEM "TRACE_SYSTEM"
  22. DEFINE_MUTEX(event_mutex);
  23. LIST_HEAD(ftrace_events);
  24. int trace_define_field(struct ftrace_event_call *call, const char *type,
  25. const char *name, int offset, int size, int is_signed,
  26. int filter_type)
  27. {
  28. struct ftrace_event_field *field;
  29. field = kzalloc(sizeof(*field), GFP_KERNEL);
  30. if (!field)
  31. goto err;
  32. field->name = kstrdup(name, GFP_KERNEL);
  33. if (!field->name)
  34. goto err;
  35. field->type = kstrdup(type, GFP_KERNEL);
  36. if (!field->type)
  37. goto err;
  38. if (filter_type == FILTER_OTHER)
  39. field->filter_type = filter_assign_type(type);
  40. else
  41. field->filter_type = filter_type;
  42. field->offset = offset;
  43. field->size = size;
  44. field->is_signed = is_signed;
  45. list_add(&field->link, &call->fields);
  46. return 0;
  47. err:
  48. if (field) {
  49. kfree(field->name);
  50. kfree(field->type);
  51. }
  52. kfree(field);
  53. return -ENOMEM;
  54. }
  55. EXPORT_SYMBOL_GPL(trace_define_field);
  56. #define __common_field(type, item) \
  57. ret = trace_define_field(call, #type, "common_" #item, \
  58. offsetof(typeof(ent), item), \
  59. sizeof(ent.item), \
  60. is_signed_type(type), FILTER_OTHER); \
  61. if (ret) \
  62. return ret;
  63. int trace_define_common_fields(struct ftrace_event_call *call)
  64. {
  65. int ret;
  66. struct trace_entry ent;
  67. __common_field(unsigned short, type);
  68. __common_field(unsigned char, flags);
  69. __common_field(unsigned char, preempt_count);
  70. __common_field(int, pid);
  71. __common_field(int, lock_depth);
  72. return ret;
  73. }
  74. EXPORT_SYMBOL_GPL(trace_define_common_fields);
  75. void trace_destroy_fields(struct ftrace_event_call *call)
  76. {
  77. struct ftrace_event_field *field, *next;
  78. list_for_each_entry_safe(field, next, &call->fields, link) {
  79. list_del(&field->link);
  80. kfree(field->type);
  81. kfree(field->name);
  82. kfree(field);
  83. }
  84. }
  85. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  86. int enable)
  87. {
  88. switch (enable) {
  89. case 0:
  90. if (call->enabled) {
  91. call->enabled = 0;
  92. tracing_stop_cmdline_record();
  93. call->unregfunc(call);
  94. }
  95. break;
  96. case 1:
  97. if (!call->enabled) {
  98. call->enabled = 1;
  99. tracing_start_cmdline_record();
  100. call->regfunc(call);
  101. }
  102. break;
  103. }
  104. }
  105. static void ftrace_clear_events(void)
  106. {
  107. struct ftrace_event_call *call;
  108. mutex_lock(&event_mutex);
  109. list_for_each_entry(call, &ftrace_events, list) {
  110. ftrace_event_enable_disable(call, 0);
  111. }
  112. mutex_unlock(&event_mutex);
  113. }
  114. /*
  115. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  116. */
  117. static int __ftrace_set_clr_event(const char *match, const char *sub,
  118. const char *event, int set)
  119. {
  120. struct ftrace_event_call *call;
  121. int ret = -EINVAL;
  122. mutex_lock(&event_mutex);
  123. list_for_each_entry(call, &ftrace_events, list) {
  124. if (!call->name || !call->regfunc)
  125. continue;
  126. if (match &&
  127. strcmp(match, call->name) != 0 &&
  128. strcmp(match, call->system) != 0)
  129. continue;
  130. if (sub && strcmp(sub, call->system) != 0)
  131. continue;
  132. if (event && strcmp(event, call->name) != 0)
  133. continue;
  134. ftrace_event_enable_disable(call, set);
  135. ret = 0;
  136. }
  137. mutex_unlock(&event_mutex);
  138. return ret;
  139. }
  140. static int ftrace_set_clr_event(char *buf, int set)
  141. {
  142. char *event = NULL, *sub = NULL, *match;
  143. /*
  144. * The buf format can be <subsystem>:<event-name>
  145. * *:<event-name> means any event by that name.
  146. * :<event-name> is the same.
  147. *
  148. * <subsystem>:* means all events in that subsystem
  149. * <subsystem>: means the same.
  150. *
  151. * <name> (no ':') means all events in a subsystem with
  152. * the name <name> or any event that matches <name>
  153. */
  154. match = strsep(&buf, ":");
  155. if (buf) {
  156. sub = match;
  157. event = buf;
  158. match = NULL;
  159. if (!strlen(sub) || strcmp(sub, "*") == 0)
  160. sub = NULL;
  161. if (!strlen(event) || strcmp(event, "*") == 0)
  162. event = NULL;
  163. }
  164. return __ftrace_set_clr_event(match, sub, event, set);
  165. }
  166. /**
  167. * trace_set_clr_event - enable or disable an event
  168. * @system: system name to match (NULL for any system)
  169. * @event: event name to match (NULL for all events, within system)
  170. * @set: 1 to enable, 0 to disable
  171. *
  172. * This is a way for other parts of the kernel to enable or disable
  173. * event recording.
  174. *
  175. * Returns 0 on success, -EINVAL if the parameters do not match any
  176. * registered events.
  177. */
  178. int trace_set_clr_event(const char *system, const char *event, int set)
  179. {
  180. return __ftrace_set_clr_event(NULL, system, event, set);
  181. }
  182. /* 128 should be much more than enough */
  183. #define EVENT_BUF_SIZE 127
  184. static ssize_t
  185. ftrace_event_write(struct file *file, const char __user *ubuf,
  186. size_t cnt, loff_t *ppos)
  187. {
  188. struct trace_parser parser;
  189. size_t read = 0;
  190. ssize_t ret;
  191. if (!cnt || cnt < 0)
  192. return 0;
  193. ret = tracing_update_buffers();
  194. if (ret < 0)
  195. return ret;
  196. if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
  197. return -ENOMEM;
  198. read = trace_get_user(&parser, ubuf, cnt, ppos);
  199. if (trace_parser_loaded((&parser))) {
  200. int set = 1;
  201. if (*parser.buffer == '!')
  202. set = 0;
  203. parser.buffer[parser.idx] = 0;
  204. ret = ftrace_set_clr_event(parser.buffer + !set, set);
  205. if (ret)
  206. goto out_put;
  207. }
  208. ret = read;
  209. out_put:
  210. trace_parser_put(&parser);
  211. return ret;
  212. }
  213. static void *
  214. t_next(struct seq_file *m, void *v, loff_t *pos)
  215. {
  216. struct ftrace_event_call *call = v;
  217. (*pos)++;
  218. list_for_each_entry_continue(call, &ftrace_events, list) {
  219. /*
  220. * The ftrace subsystem is for showing formats only.
  221. * They can not be enabled or disabled via the event files.
  222. */
  223. if (call->regfunc)
  224. return call;
  225. }
  226. return NULL;
  227. }
  228. static void *t_start(struct seq_file *m, loff_t *pos)
  229. {
  230. struct ftrace_event_call *call;
  231. loff_t l;
  232. mutex_lock(&event_mutex);
  233. call = list_entry(&ftrace_events, struct ftrace_event_call, list);
  234. for (l = 0; l <= *pos; ) {
  235. call = t_next(m, call, &l);
  236. if (!call)
  237. break;
  238. }
  239. return call;
  240. }
  241. static void *
  242. s_next(struct seq_file *m, void *v, loff_t *pos)
  243. {
  244. struct ftrace_event_call *call = v;
  245. (*pos)++;
  246. list_for_each_entry_continue(call, &ftrace_events, list) {
  247. if (call->enabled)
  248. return call;
  249. }
  250. return NULL;
  251. }
  252. static void *s_start(struct seq_file *m, loff_t *pos)
  253. {
  254. struct ftrace_event_call *call;
  255. loff_t l;
  256. mutex_lock(&event_mutex);
  257. call = list_entry(&ftrace_events, struct ftrace_event_call, list);
  258. for (l = 0; l <= *pos; ) {
  259. call = s_next(m, call, &l);
  260. if (!call)
  261. break;
  262. }
  263. return call;
  264. }
  265. static int t_show(struct seq_file *m, void *v)
  266. {
  267. struct ftrace_event_call *call = v;
  268. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  269. seq_printf(m, "%s:", call->system);
  270. seq_printf(m, "%s\n", call->name);
  271. return 0;
  272. }
  273. static void t_stop(struct seq_file *m, void *p)
  274. {
  275. mutex_unlock(&event_mutex);
  276. }
  277. static int
  278. ftrace_event_seq_open(struct inode *inode, struct file *file)
  279. {
  280. const struct seq_operations *seq_ops;
  281. if ((file->f_mode & FMODE_WRITE) &&
  282. (file->f_flags & O_TRUNC))
  283. ftrace_clear_events();
  284. seq_ops = inode->i_private;
  285. return seq_open(file, seq_ops);
  286. }
  287. static ssize_t
  288. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  289. loff_t *ppos)
  290. {
  291. struct ftrace_event_call *call = filp->private_data;
  292. char *buf;
  293. if (call->enabled)
  294. buf = "1\n";
  295. else
  296. buf = "0\n";
  297. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  298. }
  299. static ssize_t
  300. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  301. loff_t *ppos)
  302. {
  303. struct ftrace_event_call *call = filp->private_data;
  304. char buf[64];
  305. unsigned long val;
  306. int ret;
  307. if (cnt >= sizeof(buf))
  308. return -EINVAL;
  309. if (copy_from_user(&buf, ubuf, cnt))
  310. return -EFAULT;
  311. buf[cnt] = 0;
  312. ret = strict_strtoul(buf, 10, &val);
  313. if (ret < 0)
  314. return ret;
  315. ret = tracing_update_buffers();
  316. if (ret < 0)
  317. return ret;
  318. switch (val) {
  319. case 0:
  320. case 1:
  321. mutex_lock(&event_mutex);
  322. ftrace_event_enable_disable(call, val);
  323. mutex_unlock(&event_mutex);
  324. break;
  325. default:
  326. return -EINVAL;
  327. }
  328. *ppos += cnt;
  329. return cnt;
  330. }
  331. static ssize_t
  332. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  333. loff_t *ppos)
  334. {
  335. const char set_to_char[4] = { '?', '0', '1', 'X' };
  336. const char *system = filp->private_data;
  337. struct ftrace_event_call *call;
  338. char buf[2];
  339. int set = 0;
  340. int ret;
  341. mutex_lock(&event_mutex);
  342. list_for_each_entry(call, &ftrace_events, list) {
  343. if (!call->name || !call->regfunc)
  344. continue;
  345. if (system && strcmp(call->system, system) != 0)
  346. continue;
  347. /*
  348. * We need to find out if all the events are set
  349. * or if all events or cleared, or if we have
  350. * a mixture.
  351. */
  352. set |= (1 << !!call->enabled);
  353. /*
  354. * If we have a mixture, no need to look further.
  355. */
  356. if (set == 3)
  357. break;
  358. }
  359. mutex_unlock(&event_mutex);
  360. buf[0] = set_to_char[set];
  361. buf[1] = '\n';
  362. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  363. return ret;
  364. }
  365. static ssize_t
  366. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  367. loff_t *ppos)
  368. {
  369. const char *system = filp->private_data;
  370. unsigned long val;
  371. char buf[64];
  372. ssize_t ret;
  373. if (cnt >= sizeof(buf))
  374. return -EINVAL;
  375. if (copy_from_user(&buf, ubuf, cnt))
  376. return -EFAULT;
  377. buf[cnt] = 0;
  378. ret = strict_strtoul(buf, 10, &val);
  379. if (ret < 0)
  380. return ret;
  381. ret = tracing_update_buffers();
  382. if (ret < 0)
  383. return ret;
  384. if (val != 0 && val != 1)
  385. return -EINVAL;
  386. ret = __ftrace_set_clr_event(NULL, system, NULL, val);
  387. if (ret)
  388. goto out;
  389. ret = cnt;
  390. out:
  391. *ppos += cnt;
  392. return ret;
  393. }
  394. extern char *__bad_type_size(void);
  395. #undef FIELD
  396. #define FIELD(type, name) \
  397. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  398. #type, "common_" #name, offsetof(typeof(field), name), \
  399. sizeof(field.name)
  400. static int trace_write_header(struct trace_seq *s)
  401. {
  402. struct trace_entry field;
  403. /* struct trace_entry */
  404. return trace_seq_printf(s,
  405. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  406. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  407. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  408. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  409. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  410. "\n",
  411. FIELD(unsigned short, type),
  412. FIELD(unsigned char, flags),
  413. FIELD(unsigned char, preempt_count),
  414. FIELD(int, pid),
  415. FIELD(int, lock_depth));
  416. }
  417. static ssize_t
  418. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  419. loff_t *ppos)
  420. {
  421. struct ftrace_event_call *call = filp->private_data;
  422. struct trace_seq *s;
  423. char *buf;
  424. int r;
  425. if (*ppos)
  426. return 0;
  427. s = kmalloc(sizeof(*s), GFP_KERNEL);
  428. if (!s)
  429. return -ENOMEM;
  430. trace_seq_init(s);
  431. /* If any of the first writes fail, so will the show_format. */
  432. trace_seq_printf(s, "name: %s\n", call->name);
  433. trace_seq_printf(s, "ID: %d\n", call->id);
  434. trace_seq_printf(s, "format:\n");
  435. trace_write_header(s);
  436. r = call->show_format(call, s);
  437. if (!r) {
  438. /*
  439. * ug! The format output is bigger than a PAGE!!
  440. */
  441. buf = "FORMAT TOO BIG\n";
  442. r = simple_read_from_buffer(ubuf, cnt, ppos,
  443. buf, strlen(buf));
  444. goto out;
  445. }
  446. r = simple_read_from_buffer(ubuf, cnt, ppos,
  447. s->buffer, s->len);
  448. out:
  449. kfree(s);
  450. return r;
  451. }
  452. static ssize_t
  453. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  454. {
  455. struct ftrace_event_call *call = filp->private_data;
  456. struct trace_seq *s;
  457. int r;
  458. if (*ppos)
  459. return 0;
  460. s = kmalloc(sizeof(*s), GFP_KERNEL);
  461. if (!s)
  462. return -ENOMEM;
  463. trace_seq_init(s);
  464. trace_seq_printf(s, "%d\n", call->id);
  465. r = simple_read_from_buffer(ubuf, cnt, ppos,
  466. s->buffer, s->len);
  467. kfree(s);
  468. return r;
  469. }
  470. static ssize_t
  471. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  472. loff_t *ppos)
  473. {
  474. struct ftrace_event_call *call = filp->private_data;
  475. struct trace_seq *s;
  476. int r;
  477. if (*ppos)
  478. return 0;
  479. s = kmalloc(sizeof(*s), GFP_KERNEL);
  480. if (!s)
  481. return -ENOMEM;
  482. trace_seq_init(s);
  483. print_event_filter(call, s);
  484. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  485. kfree(s);
  486. return r;
  487. }
  488. static ssize_t
  489. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  490. loff_t *ppos)
  491. {
  492. struct ftrace_event_call *call = filp->private_data;
  493. char *buf;
  494. int err;
  495. if (cnt >= PAGE_SIZE)
  496. return -EINVAL;
  497. buf = (char *)__get_free_page(GFP_TEMPORARY);
  498. if (!buf)
  499. return -ENOMEM;
  500. if (copy_from_user(buf, ubuf, cnt)) {
  501. free_page((unsigned long) buf);
  502. return -EFAULT;
  503. }
  504. buf[cnt] = '\0';
  505. err = apply_event_filter(call, buf);
  506. free_page((unsigned long) buf);
  507. if (err < 0)
  508. return err;
  509. *ppos += cnt;
  510. return cnt;
  511. }
  512. static ssize_t
  513. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  514. loff_t *ppos)
  515. {
  516. struct event_subsystem *system = filp->private_data;
  517. struct trace_seq *s;
  518. int r;
  519. if (*ppos)
  520. return 0;
  521. s = kmalloc(sizeof(*s), GFP_KERNEL);
  522. if (!s)
  523. return -ENOMEM;
  524. trace_seq_init(s);
  525. print_subsystem_event_filter(system, s);
  526. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  527. kfree(s);
  528. return r;
  529. }
  530. static ssize_t
  531. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  532. loff_t *ppos)
  533. {
  534. struct event_subsystem *system = filp->private_data;
  535. char *buf;
  536. int err;
  537. if (cnt >= PAGE_SIZE)
  538. return -EINVAL;
  539. buf = (char *)__get_free_page(GFP_TEMPORARY);
  540. if (!buf)
  541. return -ENOMEM;
  542. if (copy_from_user(buf, ubuf, cnt)) {
  543. free_page((unsigned long) buf);
  544. return -EFAULT;
  545. }
  546. buf[cnt] = '\0';
  547. err = apply_subsystem_event_filter(system, buf);
  548. free_page((unsigned long) buf);
  549. if (err < 0)
  550. return err;
  551. *ppos += cnt;
  552. return cnt;
  553. }
  554. static ssize_t
  555. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  556. {
  557. int (*func)(struct trace_seq *s) = filp->private_data;
  558. struct trace_seq *s;
  559. int r;
  560. if (*ppos)
  561. return 0;
  562. s = kmalloc(sizeof(*s), GFP_KERNEL);
  563. if (!s)
  564. return -ENOMEM;
  565. trace_seq_init(s);
  566. func(s);
  567. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  568. kfree(s);
  569. return r;
  570. }
  571. static const struct seq_operations show_event_seq_ops = {
  572. .start = t_start,
  573. .next = t_next,
  574. .show = t_show,
  575. .stop = t_stop,
  576. };
  577. static const struct seq_operations show_set_event_seq_ops = {
  578. .start = s_start,
  579. .next = s_next,
  580. .show = t_show,
  581. .stop = t_stop,
  582. };
  583. static const struct file_operations ftrace_avail_fops = {
  584. .open = ftrace_event_seq_open,
  585. .read = seq_read,
  586. .llseek = seq_lseek,
  587. .release = seq_release,
  588. };
  589. static const struct file_operations ftrace_set_event_fops = {
  590. .open = ftrace_event_seq_open,
  591. .read = seq_read,
  592. .write = ftrace_event_write,
  593. .llseek = seq_lseek,
  594. .release = seq_release,
  595. };
  596. static const struct file_operations ftrace_enable_fops = {
  597. .open = tracing_open_generic,
  598. .read = event_enable_read,
  599. .write = event_enable_write,
  600. };
  601. static const struct file_operations ftrace_event_format_fops = {
  602. .open = tracing_open_generic,
  603. .read = event_format_read,
  604. };
  605. static const struct file_operations ftrace_event_id_fops = {
  606. .open = tracing_open_generic,
  607. .read = event_id_read,
  608. };
  609. static const struct file_operations ftrace_event_filter_fops = {
  610. .open = tracing_open_generic,
  611. .read = event_filter_read,
  612. .write = event_filter_write,
  613. };
  614. static const struct file_operations ftrace_subsystem_filter_fops = {
  615. .open = tracing_open_generic,
  616. .read = subsystem_filter_read,
  617. .write = subsystem_filter_write,
  618. };
  619. static const struct file_operations ftrace_system_enable_fops = {
  620. .open = tracing_open_generic,
  621. .read = system_enable_read,
  622. .write = system_enable_write,
  623. };
  624. static const struct file_operations ftrace_show_header_fops = {
  625. .open = tracing_open_generic,
  626. .read = show_header,
  627. };
  628. static struct dentry *event_trace_events_dir(void)
  629. {
  630. static struct dentry *d_tracer;
  631. static struct dentry *d_events;
  632. if (d_events)
  633. return d_events;
  634. d_tracer = tracing_init_dentry();
  635. if (!d_tracer)
  636. return NULL;
  637. d_events = debugfs_create_dir("events", d_tracer);
  638. if (!d_events)
  639. pr_warning("Could not create debugfs "
  640. "'events' directory\n");
  641. return d_events;
  642. }
  643. static LIST_HEAD(event_subsystems);
  644. static struct dentry *
  645. event_subsystem_dir(const char *name, struct dentry *d_events)
  646. {
  647. struct event_subsystem *system;
  648. struct dentry *entry;
  649. /* First see if we did not already create this dir */
  650. list_for_each_entry(system, &event_subsystems, list) {
  651. if (strcmp(system->name, name) == 0) {
  652. system->nr_events++;
  653. return system->entry;
  654. }
  655. }
  656. /* need to create new entry */
  657. system = kmalloc(sizeof(*system), GFP_KERNEL);
  658. if (!system) {
  659. pr_warning("No memory to create event subsystem %s\n",
  660. name);
  661. return d_events;
  662. }
  663. system->entry = debugfs_create_dir(name, d_events);
  664. if (!system->entry) {
  665. pr_warning("Could not create event subsystem %s\n",
  666. name);
  667. kfree(system);
  668. return d_events;
  669. }
  670. system->nr_events = 1;
  671. system->name = kstrdup(name, GFP_KERNEL);
  672. if (!system->name) {
  673. debugfs_remove(system->entry);
  674. kfree(system);
  675. return d_events;
  676. }
  677. list_add(&system->list, &event_subsystems);
  678. system->filter = NULL;
  679. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  680. if (!system->filter) {
  681. pr_warning("Could not allocate filter for subsystem "
  682. "'%s'\n", name);
  683. return system->entry;
  684. }
  685. entry = debugfs_create_file("filter", 0644, system->entry, system,
  686. &ftrace_subsystem_filter_fops);
  687. if (!entry) {
  688. kfree(system->filter);
  689. system->filter = NULL;
  690. pr_warning("Could not create debugfs "
  691. "'%s/filter' entry\n", name);
  692. }
  693. entry = trace_create_file("enable", 0644, system->entry,
  694. (void *)system->name,
  695. &ftrace_system_enable_fops);
  696. return system->entry;
  697. }
  698. static int
  699. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  700. const struct file_operations *id,
  701. const struct file_operations *enable,
  702. const struct file_operations *filter,
  703. const struct file_operations *format)
  704. {
  705. struct dentry *entry;
  706. int ret;
  707. /*
  708. * If the trace point header did not define TRACE_SYSTEM
  709. * then the system would be called "TRACE_SYSTEM".
  710. */
  711. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  712. d_events = event_subsystem_dir(call->system, d_events);
  713. call->dir = debugfs_create_dir(call->name, d_events);
  714. if (!call->dir) {
  715. pr_warning("Could not create debugfs "
  716. "'%s' directory\n", call->name);
  717. return -1;
  718. }
  719. if (call->regfunc)
  720. entry = trace_create_file("enable", 0644, call->dir, call,
  721. enable);
  722. if (call->id && call->profile_enable)
  723. entry = trace_create_file("id", 0444, call->dir, call,
  724. id);
  725. if (call->define_fields) {
  726. ret = call->define_fields(call);
  727. if (ret < 0) {
  728. pr_warning("Could not initialize trace point"
  729. " events/%s\n", call->name);
  730. return ret;
  731. }
  732. entry = trace_create_file("filter", 0644, call->dir, call,
  733. filter);
  734. }
  735. /* A trace may not want to export its format */
  736. if (!call->show_format)
  737. return 0;
  738. entry = trace_create_file("format", 0444, call->dir, call,
  739. format);
  740. return 0;
  741. }
  742. static int __trace_add_event_call(struct ftrace_event_call *call)
  743. {
  744. struct dentry *d_events;
  745. int ret;
  746. if (!call->name)
  747. return -EINVAL;
  748. if (call->raw_init) {
  749. ret = call->raw_init(call);
  750. if (ret < 0) {
  751. if (ret != -ENOSYS)
  752. pr_warning("Could not initialize trace "
  753. "events/%s\n", call->name);
  754. return ret;
  755. }
  756. }
  757. d_events = event_trace_events_dir();
  758. if (!d_events)
  759. return -ENOENT;
  760. ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
  761. &ftrace_enable_fops, &ftrace_event_filter_fops,
  762. &ftrace_event_format_fops);
  763. if (!ret)
  764. list_add(&call->list, &ftrace_events);
  765. return ret;
  766. }
  767. /* Add an additional event_call dynamically */
  768. int trace_add_event_call(struct ftrace_event_call *call)
  769. {
  770. int ret;
  771. mutex_lock(&event_mutex);
  772. ret = __trace_add_event_call(call);
  773. mutex_unlock(&event_mutex);
  774. return ret;
  775. }
  776. static void remove_subsystem_dir(const char *name)
  777. {
  778. struct event_subsystem *system;
  779. if (strcmp(name, TRACE_SYSTEM) == 0)
  780. return;
  781. list_for_each_entry(system, &event_subsystems, list) {
  782. if (strcmp(system->name, name) == 0) {
  783. if (!--system->nr_events) {
  784. struct event_filter *filter = system->filter;
  785. debugfs_remove_recursive(system->entry);
  786. list_del(&system->list);
  787. if (filter) {
  788. kfree(filter->filter_string);
  789. kfree(filter);
  790. }
  791. kfree(system->name);
  792. kfree(system);
  793. }
  794. break;
  795. }
  796. }
  797. }
  798. /*
  799. * Must be called under locking both of event_mutex and trace_event_mutex.
  800. */
  801. static void __trace_remove_event_call(struct ftrace_event_call *call)
  802. {
  803. ftrace_event_enable_disable(call, 0);
  804. if (call->event)
  805. __unregister_ftrace_event(call->event);
  806. debugfs_remove_recursive(call->dir);
  807. list_del(&call->list);
  808. trace_destroy_fields(call);
  809. destroy_preds(call);
  810. remove_subsystem_dir(call->system);
  811. }
  812. /* Remove an event_call */
  813. void trace_remove_event_call(struct ftrace_event_call *call)
  814. {
  815. mutex_lock(&event_mutex);
  816. down_write(&trace_event_mutex);
  817. __trace_remove_event_call(call);
  818. up_write(&trace_event_mutex);
  819. mutex_unlock(&event_mutex);
  820. }
  821. #define for_each_event(event, start, end) \
  822. for (event = start; \
  823. (unsigned long)event < (unsigned long)end; \
  824. event++)
  825. #ifdef CONFIG_MODULES
  826. static LIST_HEAD(ftrace_module_file_list);
  827. /*
  828. * Modules must own their file_operations to keep up with
  829. * reference counting.
  830. */
  831. struct ftrace_module_file_ops {
  832. struct list_head list;
  833. struct module *mod;
  834. struct file_operations id;
  835. struct file_operations enable;
  836. struct file_operations format;
  837. struct file_operations filter;
  838. };
  839. static struct ftrace_module_file_ops *
  840. trace_create_file_ops(struct module *mod)
  841. {
  842. struct ftrace_module_file_ops *file_ops;
  843. /*
  844. * This is a bit of a PITA. To allow for correct reference
  845. * counting, modules must "own" their file_operations.
  846. * To do this, we allocate the file operations that will be
  847. * used in the event directory.
  848. */
  849. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  850. if (!file_ops)
  851. return NULL;
  852. file_ops->mod = mod;
  853. file_ops->id = ftrace_event_id_fops;
  854. file_ops->id.owner = mod;
  855. file_ops->enable = ftrace_enable_fops;
  856. file_ops->enable.owner = mod;
  857. file_ops->filter = ftrace_event_filter_fops;
  858. file_ops->filter.owner = mod;
  859. file_ops->format = ftrace_event_format_fops;
  860. file_ops->format.owner = mod;
  861. list_add(&file_ops->list, &ftrace_module_file_list);
  862. return file_ops;
  863. }
  864. static void trace_module_add_events(struct module *mod)
  865. {
  866. struct ftrace_module_file_ops *file_ops = NULL;
  867. struct ftrace_event_call *call, *start, *end;
  868. struct dentry *d_events;
  869. int ret;
  870. start = mod->trace_events;
  871. end = mod->trace_events + mod->num_trace_events;
  872. if (start == end)
  873. return;
  874. d_events = event_trace_events_dir();
  875. if (!d_events)
  876. return;
  877. for_each_event(call, start, end) {
  878. /* The linker may leave blanks */
  879. if (!call->name)
  880. continue;
  881. if (call->raw_init) {
  882. ret = call->raw_init(call);
  883. if (ret < 0) {
  884. if (ret != -ENOSYS)
  885. pr_warning("Could not initialize trace "
  886. "point events/%s\n", call->name);
  887. continue;
  888. }
  889. }
  890. /*
  891. * This module has events, create file ops for this module
  892. * if not already done.
  893. */
  894. if (!file_ops) {
  895. file_ops = trace_create_file_ops(mod);
  896. if (!file_ops)
  897. return;
  898. }
  899. call->mod = mod;
  900. ret = event_create_dir(call, d_events,
  901. &file_ops->id, &file_ops->enable,
  902. &file_ops->filter, &file_ops->format);
  903. if (!ret)
  904. list_add(&call->list, &ftrace_events);
  905. }
  906. }
  907. static void trace_module_remove_events(struct module *mod)
  908. {
  909. struct ftrace_module_file_ops *file_ops;
  910. struct ftrace_event_call *call, *p;
  911. bool found = false;
  912. down_write(&trace_event_mutex);
  913. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  914. if (call->mod == mod) {
  915. found = true;
  916. __trace_remove_event_call(call);
  917. }
  918. }
  919. /* Now free the file_operations */
  920. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  921. if (file_ops->mod == mod)
  922. break;
  923. }
  924. if (&file_ops->list != &ftrace_module_file_list) {
  925. list_del(&file_ops->list);
  926. kfree(file_ops);
  927. }
  928. /*
  929. * It is safest to reset the ring buffer if the module being unloaded
  930. * registered any events.
  931. */
  932. if (found)
  933. tracing_reset_current_online_cpus();
  934. up_write(&trace_event_mutex);
  935. }
  936. static int trace_module_notify(struct notifier_block *self,
  937. unsigned long val, void *data)
  938. {
  939. struct module *mod = data;
  940. mutex_lock(&event_mutex);
  941. switch (val) {
  942. case MODULE_STATE_COMING:
  943. trace_module_add_events(mod);
  944. break;
  945. case MODULE_STATE_GOING:
  946. trace_module_remove_events(mod);
  947. break;
  948. }
  949. mutex_unlock(&event_mutex);
  950. return 0;
  951. }
  952. #else
  953. static int trace_module_notify(struct notifier_block *self,
  954. unsigned long val, void *data)
  955. {
  956. return 0;
  957. }
  958. #endif /* CONFIG_MODULES */
  959. static struct notifier_block trace_module_nb = {
  960. .notifier_call = trace_module_notify,
  961. .priority = 0,
  962. };
  963. extern struct ftrace_event_call __start_ftrace_events[];
  964. extern struct ftrace_event_call __stop_ftrace_events[];
  965. static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
  966. static __init int setup_trace_event(char *str)
  967. {
  968. strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
  969. ring_buffer_expanded = 1;
  970. tracing_selftest_disabled = 1;
  971. return 1;
  972. }
  973. __setup("trace_event=", setup_trace_event);
  974. static __init int event_trace_init(void)
  975. {
  976. struct ftrace_event_call *call;
  977. struct dentry *d_tracer;
  978. struct dentry *entry;
  979. struct dentry *d_events;
  980. int ret;
  981. char *buf = bootup_event_buf;
  982. char *token;
  983. d_tracer = tracing_init_dentry();
  984. if (!d_tracer)
  985. return 0;
  986. entry = debugfs_create_file("available_events", 0444, d_tracer,
  987. (void *)&show_event_seq_ops,
  988. &ftrace_avail_fops);
  989. if (!entry)
  990. pr_warning("Could not create debugfs "
  991. "'available_events' entry\n");
  992. entry = debugfs_create_file("set_event", 0644, d_tracer,
  993. (void *)&show_set_event_seq_ops,
  994. &ftrace_set_event_fops);
  995. if (!entry)
  996. pr_warning("Could not create debugfs "
  997. "'set_event' entry\n");
  998. d_events = event_trace_events_dir();
  999. if (!d_events)
  1000. return 0;
  1001. /* ring buffer internal formats */
  1002. trace_create_file("header_page", 0444, d_events,
  1003. ring_buffer_print_page_header,
  1004. &ftrace_show_header_fops);
  1005. trace_create_file("header_event", 0444, d_events,
  1006. ring_buffer_print_entry_header,
  1007. &ftrace_show_header_fops);
  1008. trace_create_file("enable", 0644, d_events,
  1009. NULL, &ftrace_system_enable_fops);
  1010. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  1011. /* The linker may leave blanks */
  1012. if (!call->name)
  1013. continue;
  1014. if (call->raw_init) {
  1015. ret = call->raw_init(call);
  1016. if (ret < 0) {
  1017. if (ret != -ENOSYS)
  1018. pr_warning("Could not initialize trace "
  1019. "point events/%s\n", call->name);
  1020. continue;
  1021. }
  1022. }
  1023. ret = event_create_dir(call, d_events, &ftrace_event_id_fops,
  1024. &ftrace_enable_fops,
  1025. &ftrace_event_filter_fops,
  1026. &ftrace_event_format_fops);
  1027. if (!ret)
  1028. list_add(&call->list, &ftrace_events);
  1029. }
  1030. while (true) {
  1031. token = strsep(&buf, ",");
  1032. if (!token)
  1033. break;
  1034. if (!*token)
  1035. continue;
  1036. ret = ftrace_set_clr_event(token, 1);
  1037. if (ret)
  1038. pr_warning("Failed to enable trace event: %s\n", token);
  1039. }
  1040. ret = register_module_notifier(&trace_module_nb);
  1041. if (ret)
  1042. pr_warning("Failed to register trace events module notifier\n");
  1043. return 0;
  1044. }
  1045. fs_initcall(event_trace_init);
  1046. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1047. static DEFINE_SPINLOCK(test_spinlock);
  1048. static DEFINE_SPINLOCK(test_spinlock_irq);
  1049. static DEFINE_MUTEX(test_mutex);
  1050. static __init void test_work(struct work_struct *dummy)
  1051. {
  1052. spin_lock(&test_spinlock);
  1053. spin_lock_irq(&test_spinlock_irq);
  1054. udelay(1);
  1055. spin_unlock_irq(&test_spinlock_irq);
  1056. spin_unlock(&test_spinlock);
  1057. mutex_lock(&test_mutex);
  1058. msleep(1);
  1059. mutex_unlock(&test_mutex);
  1060. }
  1061. static __init int event_test_thread(void *unused)
  1062. {
  1063. void *test_malloc;
  1064. test_malloc = kmalloc(1234, GFP_KERNEL);
  1065. if (!test_malloc)
  1066. pr_info("failed to kmalloc\n");
  1067. schedule_on_each_cpu(test_work);
  1068. kfree(test_malloc);
  1069. set_current_state(TASK_INTERRUPTIBLE);
  1070. while (!kthread_should_stop())
  1071. schedule();
  1072. return 0;
  1073. }
  1074. /*
  1075. * Do various things that may trigger events.
  1076. */
  1077. static __init void event_test_stuff(void)
  1078. {
  1079. struct task_struct *test_thread;
  1080. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  1081. msleep(1);
  1082. kthread_stop(test_thread);
  1083. }
  1084. /*
  1085. * For every trace event defined, we will test each trace point separately,
  1086. * and then by groups, and finally all trace points.
  1087. */
  1088. static __init void event_trace_self_tests(void)
  1089. {
  1090. struct ftrace_event_call *call;
  1091. struct event_subsystem *system;
  1092. int ret;
  1093. pr_info("Running tests on trace events:\n");
  1094. list_for_each_entry(call, &ftrace_events, list) {
  1095. /* Only test those that have a regfunc */
  1096. if (!call->regfunc)
  1097. continue;
  1098. /*
  1099. * Testing syscall events here is pretty useless, but
  1100. * we still do it if configured. But this is time consuming.
  1101. * What we really need is a user thread to perform the
  1102. * syscalls as we test.
  1103. */
  1104. #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
  1105. if (call->system &&
  1106. strcmp(call->system, "syscalls") == 0)
  1107. continue;
  1108. #endif
  1109. pr_info("Testing event %s: ", call->name);
  1110. /*
  1111. * If an event is already enabled, someone is using
  1112. * it and the self test should not be on.
  1113. */
  1114. if (call->enabled) {
  1115. pr_warning("Enabled event during self test!\n");
  1116. WARN_ON_ONCE(1);
  1117. continue;
  1118. }
  1119. ftrace_event_enable_disable(call, 1);
  1120. event_test_stuff();
  1121. ftrace_event_enable_disable(call, 0);
  1122. pr_cont("OK\n");
  1123. }
  1124. /* Now test at the sub system level */
  1125. pr_info("Running tests on trace event systems:\n");
  1126. list_for_each_entry(system, &event_subsystems, list) {
  1127. /* the ftrace system is special, skip it */
  1128. if (strcmp(system->name, "ftrace") == 0)
  1129. continue;
  1130. pr_info("Testing event system %s: ", system->name);
  1131. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
  1132. if (WARN_ON_ONCE(ret)) {
  1133. pr_warning("error enabling system %s\n",
  1134. system->name);
  1135. continue;
  1136. }
  1137. event_test_stuff();
  1138. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
  1139. if (WARN_ON_ONCE(ret))
  1140. pr_warning("error disabling system %s\n",
  1141. system->name);
  1142. pr_cont("OK\n");
  1143. }
  1144. /* Test with all events enabled */
  1145. pr_info("Running tests on all trace events:\n");
  1146. pr_info("Testing all events: ");
  1147. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
  1148. if (WARN_ON_ONCE(ret)) {
  1149. pr_warning("error enabling all events\n");
  1150. return;
  1151. }
  1152. event_test_stuff();
  1153. /* reset sysname */
  1154. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
  1155. if (WARN_ON_ONCE(ret)) {
  1156. pr_warning("error disabling all events\n");
  1157. return;
  1158. }
  1159. pr_cont("OK\n");
  1160. }
  1161. #ifdef CONFIG_FUNCTION_TRACER
  1162. static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
  1163. static void
  1164. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1165. {
  1166. struct ring_buffer_event *event;
  1167. struct ring_buffer *buffer;
  1168. struct ftrace_entry *entry;
  1169. unsigned long flags;
  1170. long disabled;
  1171. int resched;
  1172. int cpu;
  1173. int pc;
  1174. pc = preempt_count();
  1175. resched = ftrace_preempt_disable();
  1176. cpu = raw_smp_processor_id();
  1177. disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
  1178. if (disabled != 1)
  1179. goto out;
  1180. local_save_flags(flags);
  1181. event = trace_current_buffer_lock_reserve(&buffer,
  1182. TRACE_FN, sizeof(*entry),
  1183. flags, pc);
  1184. if (!event)
  1185. goto out;
  1186. entry = ring_buffer_event_data(event);
  1187. entry->ip = ip;
  1188. entry->parent_ip = parent_ip;
  1189. trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
  1190. out:
  1191. atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
  1192. ftrace_preempt_enable(resched);
  1193. }
  1194. static struct ftrace_ops trace_ops __initdata =
  1195. {
  1196. .func = function_test_events_call,
  1197. };
  1198. static __init void event_trace_self_test_with_function(void)
  1199. {
  1200. register_ftrace_function(&trace_ops);
  1201. pr_info("Running tests again, along with the function tracer\n");
  1202. event_trace_self_tests();
  1203. unregister_ftrace_function(&trace_ops);
  1204. }
  1205. #else
  1206. static __init void event_trace_self_test_with_function(void)
  1207. {
  1208. }
  1209. #endif
  1210. static __init int event_trace_self_tests_init(void)
  1211. {
  1212. if (!tracing_selftest_disabled) {
  1213. event_trace_self_tests();
  1214. event_trace_self_test_with_function();
  1215. }
  1216. return 0;
  1217. }
  1218. late_initcall(event_trace_self_tests_init);
  1219. #endif