trace_events.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502
  1. /*
  2. * event tracer
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * - Added format output of fields of the trace point.
  7. * This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
  8. *
  9. */
  10. #include <linux/workqueue.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kthread.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ctype.h>
  17. #include <linux/delay.h>
  18. #include <asm/setup.h>
  19. #include "trace_output.h"
  20. #define TRACE_SYSTEM "TRACE_SYSTEM"
  21. DEFINE_MUTEX(event_mutex);
  22. LIST_HEAD(ftrace_events);
  23. int trace_define_field(struct ftrace_event_call *call, const char *type,
  24. const char *name, int offset, int size, int is_signed,
  25. int filter_type)
  26. {
  27. struct ftrace_event_field *field;
  28. field = kzalloc(sizeof(*field), GFP_KERNEL);
  29. if (!field)
  30. goto err;
  31. field->name = kstrdup(name, GFP_KERNEL);
  32. if (!field->name)
  33. goto err;
  34. field->type = kstrdup(type, GFP_KERNEL);
  35. if (!field->type)
  36. goto err;
  37. if (filter_type == FILTER_OTHER)
  38. field->filter_type = filter_assign_type(type);
  39. else
  40. field->filter_type = filter_type;
  41. field->offset = offset;
  42. field->size = size;
  43. field->is_signed = is_signed;
  44. list_add(&field->link, &call->fields);
  45. return 0;
  46. err:
  47. if (field) {
  48. kfree(field->name);
  49. kfree(field->type);
  50. }
  51. kfree(field);
  52. return -ENOMEM;
  53. }
  54. EXPORT_SYMBOL_GPL(trace_define_field);
  55. #define __common_field(type, item) \
  56. ret = trace_define_field(call, #type, "common_" #item, \
  57. offsetof(typeof(ent), item), \
  58. sizeof(ent.item), \
  59. is_signed_type(type), FILTER_OTHER); \
  60. if (ret) \
  61. return ret;
  62. int trace_define_common_fields(struct ftrace_event_call *call)
  63. {
  64. int ret;
  65. struct trace_entry ent;
  66. __common_field(unsigned short, type);
  67. __common_field(unsigned char, flags);
  68. __common_field(unsigned char, preempt_count);
  69. __common_field(int, pid);
  70. return ret;
  71. }
  72. EXPORT_SYMBOL_GPL(trace_define_common_fields);
  73. #ifdef CONFIG_MODULES
  74. static void trace_destroy_fields(struct ftrace_event_call *call)
  75. {
  76. struct ftrace_event_field *field, *next;
  77. list_for_each_entry_safe(field, next, &call->fields, link) {
  78. list_del(&field->link);
  79. kfree(field->type);
  80. kfree(field->name);
  81. kfree(field);
  82. }
  83. }
  84. #endif /* CONFIG_MODULES */
  85. static void ftrace_event_enable_disable(struct ftrace_event_call *call,
  86. int enable)
  87. {
  88. switch (enable) {
  89. case 0:
  90. if (call->enabled) {
  91. call->enabled = 0;
  92. tracing_stop_cmdline_record();
  93. call->unregfunc(call->data);
  94. }
  95. break;
  96. case 1:
  97. if (!call->enabled) {
  98. call->enabled = 1;
  99. tracing_start_cmdline_record();
  100. call->regfunc(call->data);
  101. }
  102. break;
  103. }
  104. }
  105. static void ftrace_clear_events(void)
  106. {
  107. struct ftrace_event_call *call;
  108. mutex_lock(&event_mutex);
  109. list_for_each_entry(call, &ftrace_events, list) {
  110. ftrace_event_enable_disable(call, 0);
  111. }
  112. mutex_unlock(&event_mutex);
  113. }
  114. /*
  115. * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
  116. */
  117. static int __ftrace_set_clr_event(const char *match, const char *sub,
  118. const char *event, int set)
  119. {
  120. struct ftrace_event_call *call;
  121. int ret = -EINVAL;
  122. mutex_lock(&event_mutex);
  123. list_for_each_entry(call, &ftrace_events, list) {
  124. if (!call->name || !call->regfunc)
  125. continue;
  126. if (match &&
  127. strcmp(match, call->name) != 0 &&
  128. strcmp(match, call->system) != 0)
  129. continue;
  130. if (sub && strcmp(sub, call->system) != 0)
  131. continue;
  132. if (event && strcmp(event, call->name) != 0)
  133. continue;
  134. ftrace_event_enable_disable(call, set);
  135. ret = 0;
  136. }
  137. mutex_unlock(&event_mutex);
  138. return ret;
  139. }
  140. static int ftrace_set_clr_event(char *buf, int set)
  141. {
  142. char *event = NULL, *sub = NULL, *match;
  143. /*
  144. * The buf format can be <subsystem>:<event-name>
  145. * *:<event-name> means any event by that name.
  146. * :<event-name> is the same.
  147. *
  148. * <subsystem>:* means all events in that subsystem
  149. * <subsystem>: means the same.
  150. *
  151. * <name> (no ':') means all events in a subsystem with
  152. * the name <name> or any event that matches <name>
  153. */
  154. match = strsep(&buf, ":");
  155. if (buf) {
  156. sub = match;
  157. event = buf;
  158. match = NULL;
  159. if (!strlen(sub) || strcmp(sub, "*") == 0)
  160. sub = NULL;
  161. if (!strlen(event) || strcmp(event, "*") == 0)
  162. event = NULL;
  163. }
  164. return __ftrace_set_clr_event(match, sub, event, set);
  165. }
  166. /**
  167. * trace_set_clr_event - enable or disable an event
  168. * @system: system name to match (NULL for any system)
  169. * @event: event name to match (NULL for all events, within system)
  170. * @set: 1 to enable, 0 to disable
  171. *
  172. * This is a way for other parts of the kernel to enable or disable
  173. * event recording.
  174. *
  175. * Returns 0 on success, -EINVAL if the parameters do not match any
  176. * registered events.
  177. */
  178. int trace_set_clr_event(const char *system, const char *event, int set)
  179. {
  180. return __ftrace_set_clr_event(NULL, system, event, set);
  181. }
  182. /* 128 should be much more than enough */
  183. #define EVENT_BUF_SIZE 127
  184. static ssize_t
  185. ftrace_event_write(struct file *file, const char __user *ubuf,
  186. size_t cnt, loff_t *ppos)
  187. {
  188. size_t read = 0;
  189. int i, set = 1;
  190. ssize_t ret;
  191. char *buf;
  192. char ch;
  193. if (!cnt || cnt < 0)
  194. return 0;
  195. ret = tracing_update_buffers();
  196. if (ret < 0)
  197. return ret;
  198. ret = get_user(ch, ubuf++);
  199. if (ret)
  200. return ret;
  201. read++;
  202. cnt--;
  203. /* skip white space */
  204. while (cnt && isspace(ch)) {
  205. ret = get_user(ch, ubuf++);
  206. if (ret)
  207. return ret;
  208. read++;
  209. cnt--;
  210. }
  211. /* Only white space found? */
  212. if (isspace(ch)) {
  213. file->f_pos += read;
  214. ret = read;
  215. return ret;
  216. }
  217. buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
  218. if (!buf)
  219. return -ENOMEM;
  220. if (cnt > EVENT_BUF_SIZE)
  221. cnt = EVENT_BUF_SIZE;
  222. i = 0;
  223. while (cnt && !isspace(ch)) {
  224. if (!i && ch == '!')
  225. set = 0;
  226. else
  227. buf[i++] = ch;
  228. ret = get_user(ch, ubuf++);
  229. if (ret)
  230. goto out_free;
  231. read++;
  232. cnt--;
  233. }
  234. buf[i] = 0;
  235. file->f_pos += read;
  236. ret = ftrace_set_clr_event(buf, set);
  237. if (ret)
  238. goto out_free;
  239. ret = read;
  240. out_free:
  241. kfree(buf);
  242. return ret;
  243. }
  244. static void *
  245. t_next(struct seq_file *m, void *v, loff_t *pos)
  246. {
  247. struct list_head *list = m->private;
  248. struct ftrace_event_call *call;
  249. (*pos)++;
  250. for (;;) {
  251. if (list == &ftrace_events)
  252. return NULL;
  253. call = list_entry(list, struct ftrace_event_call, list);
  254. /*
  255. * The ftrace subsystem is for showing formats only.
  256. * They can not be enabled or disabled via the event files.
  257. */
  258. if (call->regfunc)
  259. break;
  260. list = list->next;
  261. }
  262. m->private = list->next;
  263. return call;
  264. }
  265. static void *t_start(struct seq_file *m, loff_t *pos)
  266. {
  267. struct ftrace_event_call *call = NULL;
  268. loff_t l;
  269. mutex_lock(&event_mutex);
  270. m->private = ftrace_events.next;
  271. for (l = 0; l <= *pos; ) {
  272. call = t_next(m, NULL, &l);
  273. if (!call)
  274. break;
  275. }
  276. return call;
  277. }
  278. static void *
  279. s_next(struct seq_file *m, void *v, loff_t *pos)
  280. {
  281. struct list_head *list = m->private;
  282. struct ftrace_event_call *call;
  283. (*pos)++;
  284. retry:
  285. if (list == &ftrace_events)
  286. return NULL;
  287. call = list_entry(list, struct ftrace_event_call, list);
  288. if (!call->enabled) {
  289. list = list->next;
  290. goto retry;
  291. }
  292. m->private = list->next;
  293. return call;
  294. }
  295. static void *s_start(struct seq_file *m, loff_t *pos)
  296. {
  297. struct ftrace_event_call *call = NULL;
  298. loff_t l;
  299. mutex_lock(&event_mutex);
  300. m->private = ftrace_events.next;
  301. for (l = 0; l <= *pos; ) {
  302. call = s_next(m, NULL, &l);
  303. if (!call)
  304. break;
  305. }
  306. return call;
  307. }
  308. static int t_show(struct seq_file *m, void *v)
  309. {
  310. struct ftrace_event_call *call = v;
  311. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  312. seq_printf(m, "%s:", call->system);
  313. seq_printf(m, "%s\n", call->name);
  314. return 0;
  315. }
  316. static void t_stop(struct seq_file *m, void *p)
  317. {
  318. mutex_unlock(&event_mutex);
  319. }
  320. static int
  321. ftrace_event_seq_open(struct inode *inode, struct file *file)
  322. {
  323. const struct seq_operations *seq_ops;
  324. if ((file->f_mode & FMODE_WRITE) &&
  325. (file->f_flags & O_TRUNC))
  326. ftrace_clear_events();
  327. seq_ops = inode->i_private;
  328. return seq_open(file, seq_ops);
  329. }
  330. static ssize_t
  331. event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  332. loff_t *ppos)
  333. {
  334. struct ftrace_event_call *call = filp->private_data;
  335. char *buf;
  336. if (call->enabled)
  337. buf = "1\n";
  338. else
  339. buf = "0\n";
  340. return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  341. }
  342. static ssize_t
  343. event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  344. loff_t *ppos)
  345. {
  346. struct ftrace_event_call *call = filp->private_data;
  347. char buf[64];
  348. unsigned long val;
  349. int ret;
  350. if (cnt >= sizeof(buf))
  351. return -EINVAL;
  352. if (copy_from_user(&buf, ubuf, cnt))
  353. return -EFAULT;
  354. buf[cnt] = 0;
  355. ret = strict_strtoul(buf, 10, &val);
  356. if (ret < 0)
  357. return ret;
  358. ret = tracing_update_buffers();
  359. if (ret < 0)
  360. return ret;
  361. switch (val) {
  362. case 0:
  363. case 1:
  364. mutex_lock(&event_mutex);
  365. ftrace_event_enable_disable(call, val);
  366. mutex_unlock(&event_mutex);
  367. break;
  368. default:
  369. return -EINVAL;
  370. }
  371. *ppos += cnt;
  372. return cnt;
  373. }
  374. static ssize_t
  375. system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
  376. loff_t *ppos)
  377. {
  378. const char set_to_char[4] = { '?', '0', '1', 'X' };
  379. const char *system = filp->private_data;
  380. struct ftrace_event_call *call;
  381. char buf[2];
  382. int set = 0;
  383. int ret;
  384. mutex_lock(&event_mutex);
  385. list_for_each_entry(call, &ftrace_events, list) {
  386. if (!call->name || !call->regfunc)
  387. continue;
  388. if (system && strcmp(call->system, system) != 0)
  389. continue;
  390. /*
  391. * We need to find out if all the events are set
  392. * or if all events or cleared, or if we have
  393. * a mixture.
  394. */
  395. set |= (1 << !!call->enabled);
  396. /*
  397. * If we have a mixture, no need to look further.
  398. */
  399. if (set == 3)
  400. break;
  401. }
  402. mutex_unlock(&event_mutex);
  403. buf[0] = set_to_char[set];
  404. buf[1] = '\n';
  405. ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
  406. return ret;
  407. }
  408. static ssize_t
  409. system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
  410. loff_t *ppos)
  411. {
  412. const char *system = filp->private_data;
  413. unsigned long val;
  414. char buf[64];
  415. ssize_t ret;
  416. if (cnt >= sizeof(buf))
  417. return -EINVAL;
  418. if (copy_from_user(&buf, ubuf, cnt))
  419. return -EFAULT;
  420. buf[cnt] = 0;
  421. ret = strict_strtoul(buf, 10, &val);
  422. if (ret < 0)
  423. return ret;
  424. ret = tracing_update_buffers();
  425. if (ret < 0)
  426. return ret;
  427. if (val != 0 && val != 1)
  428. return -EINVAL;
  429. ret = __ftrace_set_clr_event(NULL, system, NULL, val);
  430. if (ret)
  431. goto out;
  432. ret = cnt;
  433. out:
  434. *ppos += cnt;
  435. return ret;
  436. }
  437. extern char *__bad_type_size(void);
  438. #undef FIELD
  439. #define FIELD(type, name) \
  440. sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
  441. #type, "common_" #name, offsetof(typeof(field), name), \
  442. sizeof(field.name)
  443. static int trace_write_header(struct trace_seq *s)
  444. {
  445. struct trace_entry field;
  446. /* struct trace_entry */
  447. return trace_seq_printf(s,
  448. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  449. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  450. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  451. "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n"
  452. "\n",
  453. FIELD(unsigned short, type),
  454. FIELD(unsigned char, flags),
  455. FIELD(unsigned char, preempt_count),
  456. FIELD(int, pid));
  457. }
  458. static ssize_t
  459. event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
  460. loff_t *ppos)
  461. {
  462. struct ftrace_event_call *call = filp->private_data;
  463. struct trace_seq *s;
  464. char *buf;
  465. int r;
  466. if (*ppos)
  467. return 0;
  468. s = kmalloc(sizeof(*s), GFP_KERNEL);
  469. if (!s)
  470. return -ENOMEM;
  471. trace_seq_init(s);
  472. /* If any of the first writes fail, so will the show_format. */
  473. trace_seq_printf(s, "name: %s\n", call->name);
  474. trace_seq_printf(s, "ID: %d\n", call->id);
  475. trace_seq_printf(s, "format:\n");
  476. trace_write_header(s);
  477. r = call->show_format(call, s);
  478. if (!r) {
  479. /*
  480. * ug! The format output is bigger than a PAGE!!
  481. */
  482. buf = "FORMAT TOO BIG\n";
  483. r = simple_read_from_buffer(ubuf, cnt, ppos,
  484. buf, strlen(buf));
  485. goto out;
  486. }
  487. r = simple_read_from_buffer(ubuf, cnt, ppos,
  488. s->buffer, s->len);
  489. out:
  490. kfree(s);
  491. return r;
  492. }
  493. static ssize_t
  494. event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  495. {
  496. struct ftrace_event_call *call = filp->private_data;
  497. struct trace_seq *s;
  498. int r;
  499. if (*ppos)
  500. return 0;
  501. s = kmalloc(sizeof(*s), GFP_KERNEL);
  502. if (!s)
  503. return -ENOMEM;
  504. trace_seq_init(s);
  505. trace_seq_printf(s, "%d\n", call->id);
  506. r = simple_read_from_buffer(ubuf, cnt, ppos,
  507. s->buffer, s->len);
  508. kfree(s);
  509. return r;
  510. }
  511. static ssize_t
  512. event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  513. loff_t *ppos)
  514. {
  515. struct ftrace_event_call *call = filp->private_data;
  516. struct trace_seq *s;
  517. int r;
  518. if (*ppos)
  519. return 0;
  520. s = kmalloc(sizeof(*s), GFP_KERNEL);
  521. if (!s)
  522. return -ENOMEM;
  523. trace_seq_init(s);
  524. print_event_filter(call, s);
  525. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  526. kfree(s);
  527. return r;
  528. }
  529. static ssize_t
  530. event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  531. loff_t *ppos)
  532. {
  533. struct ftrace_event_call *call = filp->private_data;
  534. char *buf;
  535. int err;
  536. if (cnt >= PAGE_SIZE)
  537. return -EINVAL;
  538. buf = (char *)__get_free_page(GFP_TEMPORARY);
  539. if (!buf)
  540. return -ENOMEM;
  541. if (copy_from_user(buf, ubuf, cnt)) {
  542. free_page((unsigned long) buf);
  543. return -EFAULT;
  544. }
  545. buf[cnt] = '\0';
  546. err = apply_event_filter(call, buf);
  547. free_page((unsigned long) buf);
  548. if (err < 0)
  549. return err;
  550. *ppos += cnt;
  551. return cnt;
  552. }
  553. static ssize_t
  554. subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
  555. loff_t *ppos)
  556. {
  557. struct event_subsystem *system = filp->private_data;
  558. struct trace_seq *s;
  559. int r;
  560. if (*ppos)
  561. return 0;
  562. s = kmalloc(sizeof(*s), GFP_KERNEL);
  563. if (!s)
  564. return -ENOMEM;
  565. trace_seq_init(s);
  566. print_subsystem_event_filter(system, s);
  567. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  568. kfree(s);
  569. return r;
  570. }
  571. static ssize_t
  572. subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
  573. loff_t *ppos)
  574. {
  575. struct event_subsystem *system = filp->private_data;
  576. char *buf;
  577. int err;
  578. if (cnt >= PAGE_SIZE)
  579. return -EINVAL;
  580. buf = (char *)__get_free_page(GFP_TEMPORARY);
  581. if (!buf)
  582. return -ENOMEM;
  583. if (copy_from_user(buf, ubuf, cnt)) {
  584. free_page((unsigned long) buf);
  585. return -EFAULT;
  586. }
  587. buf[cnt] = '\0';
  588. err = apply_subsystem_event_filter(system, buf);
  589. free_page((unsigned long) buf);
  590. if (err < 0)
  591. return err;
  592. *ppos += cnt;
  593. return cnt;
  594. }
  595. static ssize_t
  596. show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
  597. {
  598. int (*func)(struct trace_seq *s) = filp->private_data;
  599. struct trace_seq *s;
  600. int r;
  601. if (*ppos)
  602. return 0;
  603. s = kmalloc(sizeof(*s), GFP_KERNEL);
  604. if (!s)
  605. return -ENOMEM;
  606. trace_seq_init(s);
  607. func(s);
  608. r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
  609. kfree(s);
  610. return r;
  611. }
  612. static const struct seq_operations show_event_seq_ops = {
  613. .start = t_start,
  614. .next = t_next,
  615. .show = t_show,
  616. .stop = t_stop,
  617. };
  618. static const struct seq_operations show_set_event_seq_ops = {
  619. .start = s_start,
  620. .next = s_next,
  621. .show = t_show,
  622. .stop = t_stop,
  623. };
  624. static const struct file_operations ftrace_avail_fops = {
  625. .open = ftrace_event_seq_open,
  626. .read = seq_read,
  627. .llseek = seq_lseek,
  628. .release = seq_release,
  629. };
  630. static const struct file_operations ftrace_set_event_fops = {
  631. .open = ftrace_event_seq_open,
  632. .read = seq_read,
  633. .write = ftrace_event_write,
  634. .llseek = seq_lseek,
  635. .release = seq_release,
  636. };
  637. static const struct file_operations ftrace_enable_fops = {
  638. .open = tracing_open_generic,
  639. .read = event_enable_read,
  640. .write = event_enable_write,
  641. };
  642. static const struct file_operations ftrace_event_format_fops = {
  643. .open = tracing_open_generic,
  644. .read = event_format_read,
  645. };
  646. static const struct file_operations ftrace_event_id_fops = {
  647. .open = tracing_open_generic,
  648. .read = event_id_read,
  649. };
  650. static const struct file_operations ftrace_event_filter_fops = {
  651. .open = tracing_open_generic,
  652. .read = event_filter_read,
  653. .write = event_filter_write,
  654. };
  655. static const struct file_operations ftrace_subsystem_filter_fops = {
  656. .open = tracing_open_generic,
  657. .read = subsystem_filter_read,
  658. .write = subsystem_filter_write,
  659. };
  660. static const struct file_operations ftrace_system_enable_fops = {
  661. .open = tracing_open_generic,
  662. .read = system_enable_read,
  663. .write = system_enable_write,
  664. };
  665. static const struct file_operations ftrace_show_header_fops = {
  666. .open = tracing_open_generic,
  667. .read = show_header,
  668. };
  669. static struct dentry *event_trace_events_dir(void)
  670. {
  671. static struct dentry *d_tracer;
  672. static struct dentry *d_events;
  673. if (d_events)
  674. return d_events;
  675. d_tracer = tracing_init_dentry();
  676. if (!d_tracer)
  677. return NULL;
  678. d_events = debugfs_create_dir("events", d_tracer);
  679. if (!d_events)
  680. pr_warning("Could not create debugfs "
  681. "'events' directory\n");
  682. return d_events;
  683. }
  684. static LIST_HEAD(event_subsystems);
  685. static struct dentry *
  686. event_subsystem_dir(const char *name, struct dentry *d_events)
  687. {
  688. struct event_subsystem *system;
  689. struct dentry *entry;
  690. /* First see if we did not already create this dir */
  691. list_for_each_entry(system, &event_subsystems, list) {
  692. if (strcmp(system->name, name) == 0) {
  693. system->nr_events++;
  694. return system->entry;
  695. }
  696. }
  697. /* need to create new entry */
  698. system = kmalloc(sizeof(*system), GFP_KERNEL);
  699. if (!system) {
  700. pr_warning("No memory to create event subsystem %s\n",
  701. name);
  702. return d_events;
  703. }
  704. system->entry = debugfs_create_dir(name, d_events);
  705. if (!system->entry) {
  706. pr_warning("Could not create event subsystem %s\n",
  707. name);
  708. kfree(system);
  709. return d_events;
  710. }
  711. system->nr_events = 1;
  712. system->name = kstrdup(name, GFP_KERNEL);
  713. if (!system->name) {
  714. debugfs_remove(system->entry);
  715. kfree(system);
  716. return d_events;
  717. }
  718. list_add(&system->list, &event_subsystems);
  719. system->filter = NULL;
  720. system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
  721. if (!system->filter) {
  722. pr_warning("Could not allocate filter for subsystem "
  723. "'%s'\n", name);
  724. return system->entry;
  725. }
  726. entry = debugfs_create_file("filter", 0644, system->entry, system,
  727. &ftrace_subsystem_filter_fops);
  728. if (!entry) {
  729. kfree(system->filter);
  730. system->filter = NULL;
  731. pr_warning("Could not create debugfs "
  732. "'%s/filter' entry\n", name);
  733. }
  734. entry = trace_create_file("enable", 0644, system->entry,
  735. (void *)system->name,
  736. &ftrace_system_enable_fops);
  737. return system->entry;
  738. }
  739. static int
  740. event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
  741. const struct file_operations *id,
  742. const struct file_operations *enable,
  743. const struct file_operations *filter,
  744. const struct file_operations *format)
  745. {
  746. struct dentry *entry;
  747. int ret;
  748. /*
  749. * If the trace point header did not define TRACE_SYSTEM
  750. * then the system would be called "TRACE_SYSTEM".
  751. */
  752. if (strcmp(call->system, TRACE_SYSTEM) != 0)
  753. d_events = event_subsystem_dir(call->system, d_events);
  754. call->dir = debugfs_create_dir(call->name, d_events);
  755. if (!call->dir) {
  756. pr_warning("Could not create debugfs "
  757. "'%s' directory\n", call->name);
  758. return -1;
  759. }
  760. if (call->regfunc)
  761. entry = trace_create_file("enable", 0644, call->dir, call,
  762. enable);
  763. if (call->id && call->profile_enable)
  764. entry = trace_create_file("id", 0444, call->dir, call,
  765. id);
  766. if (call->define_fields) {
  767. ret = call->define_fields(call);
  768. if (ret < 0) {
  769. pr_warning("Could not initialize trace point"
  770. " events/%s\n", call->name);
  771. return ret;
  772. }
  773. entry = trace_create_file("filter", 0644, call->dir, call,
  774. filter);
  775. }
  776. /* A trace may not want to export its format */
  777. if (!call->show_format)
  778. return 0;
  779. entry = trace_create_file("format", 0444, call->dir, call,
  780. format);
  781. return 0;
  782. }
  783. #define for_each_event(event, start, end) \
  784. for (event = start; \
  785. (unsigned long)event < (unsigned long)end; \
  786. event++)
  787. #ifdef CONFIG_MODULES
  788. static LIST_HEAD(ftrace_module_file_list);
  789. /*
  790. * Modules must own their file_operations to keep up with
  791. * reference counting.
  792. */
  793. struct ftrace_module_file_ops {
  794. struct list_head list;
  795. struct module *mod;
  796. struct file_operations id;
  797. struct file_operations enable;
  798. struct file_operations format;
  799. struct file_operations filter;
  800. };
  801. static void remove_subsystem_dir(const char *name)
  802. {
  803. struct event_subsystem *system;
  804. if (strcmp(name, TRACE_SYSTEM) == 0)
  805. return;
  806. list_for_each_entry(system, &event_subsystems, list) {
  807. if (strcmp(system->name, name) == 0) {
  808. if (!--system->nr_events) {
  809. struct event_filter *filter = system->filter;
  810. debugfs_remove_recursive(system->entry);
  811. list_del(&system->list);
  812. if (filter) {
  813. kfree(filter->filter_string);
  814. kfree(filter);
  815. }
  816. kfree(system->name);
  817. kfree(system);
  818. }
  819. break;
  820. }
  821. }
  822. }
  823. static struct ftrace_module_file_ops *
  824. trace_create_file_ops(struct module *mod)
  825. {
  826. struct ftrace_module_file_ops *file_ops;
  827. /*
  828. * This is a bit of a PITA. To allow for correct reference
  829. * counting, modules must "own" their file_operations.
  830. * To do this, we allocate the file operations that will be
  831. * used in the event directory.
  832. */
  833. file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
  834. if (!file_ops)
  835. return NULL;
  836. file_ops->mod = mod;
  837. file_ops->id = ftrace_event_id_fops;
  838. file_ops->id.owner = mod;
  839. file_ops->enable = ftrace_enable_fops;
  840. file_ops->enable.owner = mod;
  841. file_ops->filter = ftrace_event_filter_fops;
  842. file_ops->filter.owner = mod;
  843. file_ops->format = ftrace_event_format_fops;
  844. file_ops->format.owner = mod;
  845. list_add(&file_ops->list, &ftrace_module_file_list);
  846. return file_ops;
  847. }
  848. static void trace_module_add_events(struct module *mod)
  849. {
  850. struct ftrace_module_file_ops *file_ops = NULL;
  851. struct ftrace_event_call *call, *start, *end;
  852. struct dentry *d_events;
  853. int ret;
  854. start = mod->trace_events;
  855. end = mod->trace_events + mod->num_trace_events;
  856. if (start == end)
  857. return;
  858. d_events = event_trace_events_dir();
  859. if (!d_events)
  860. return;
  861. for_each_event(call, start, end) {
  862. /* The linker may leave blanks */
  863. if (!call->name)
  864. continue;
  865. if (call->raw_init) {
  866. ret = call->raw_init();
  867. if (ret < 0) {
  868. if (ret != -ENOSYS)
  869. pr_warning("Could not initialize trace "
  870. "point events/%s\n", call->name);
  871. continue;
  872. }
  873. }
  874. /*
  875. * This module has events, create file ops for this module
  876. * if not already done.
  877. */
  878. if (!file_ops) {
  879. file_ops = trace_create_file_ops(mod);
  880. if (!file_ops)
  881. return;
  882. }
  883. call->mod = mod;
  884. list_add(&call->list, &ftrace_events);
  885. event_create_dir(call, d_events,
  886. &file_ops->id, &file_ops->enable,
  887. &file_ops->filter, &file_ops->format);
  888. }
  889. }
  890. static void trace_module_remove_events(struct module *mod)
  891. {
  892. struct ftrace_module_file_ops *file_ops;
  893. struct ftrace_event_call *call, *p;
  894. bool found = false;
  895. down_write(&trace_event_mutex);
  896. list_for_each_entry_safe(call, p, &ftrace_events, list) {
  897. if (call->mod == mod) {
  898. found = true;
  899. ftrace_event_enable_disable(call, 0);
  900. if (call->event)
  901. __unregister_ftrace_event(call->event);
  902. debugfs_remove_recursive(call->dir);
  903. list_del(&call->list);
  904. trace_destroy_fields(call);
  905. destroy_preds(call);
  906. remove_subsystem_dir(call->system);
  907. }
  908. }
  909. /* Now free the file_operations */
  910. list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
  911. if (file_ops->mod == mod)
  912. break;
  913. }
  914. if (&file_ops->list != &ftrace_module_file_list) {
  915. list_del(&file_ops->list);
  916. kfree(file_ops);
  917. }
  918. /*
  919. * It is safest to reset the ring buffer if the module being unloaded
  920. * registered any events.
  921. */
  922. if (found)
  923. tracing_reset_current_online_cpus();
  924. up_write(&trace_event_mutex);
  925. }
  926. static int trace_module_notify(struct notifier_block *self,
  927. unsigned long val, void *data)
  928. {
  929. struct module *mod = data;
  930. mutex_lock(&event_mutex);
  931. switch (val) {
  932. case MODULE_STATE_COMING:
  933. trace_module_add_events(mod);
  934. break;
  935. case MODULE_STATE_GOING:
  936. trace_module_remove_events(mod);
  937. break;
  938. }
  939. mutex_unlock(&event_mutex);
  940. return 0;
  941. }
  942. #else
  943. static int trace_module_notify(struct notifier_block *self,
  944. unsigned long val, void *data)
  945. {
  946. return 0;
  947. }
  948. #endif /* CONFIG_MODULES */
  949. struct notifier_block trace_module_nb = {
  950. .notifier_call = trace_module_notify,
  951. .priority = 0,
  952. };
  953. extern struct ftrace_event_call __start_ftrace_events[];
  954. extern struct ftrace_event_call __stop_ftrace_events[];
  955. static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
  956. static __init int setup_trace_event(char *str)
  957. {
  958. strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
  959. ring_buffer_expanded = 1;
  960. tracing_selftest_disabled = 1;
  961. return 1;
  962. }
  963. __setup("trace_event=", setup_trace_event);
  964. static __init int event_trace_init(void)
  965. {
  966. struct ftrace_event_call *call;
  967. struct dentry *d_tracer;
  968. struct dentry *entry;
  969. struct dentry *d_events;
  970. int ret;
  971. char *buf = bootup_event_buf;
  972. char *token;
  973. d_tracer = tracing_init_dentry();
  974. if (!d_tracer)
  975. return 0;
  976. entry = debugfs_create_file("available_events", 0444, d_tracer,
  977. (void *)&show_event_seq_ops,
  978. &ftrace_avail_fops);
  979. if (!entry)
  980. pr_warning("Could not create debugfs "
  981. "'available_events' entry\n");
  982. entry = debugfs_create_file("set_event", 0644, d_tracer,
  983. (void *)&show_set_event_seq_ops,
  984. &ftrace_set_event_fops);
  985. if (!entry)
  986. pr_warning("Could not create debugfs "
  987. "'set_event' entry\n");
  988. d_events = event_trace_events_dir();
  989. if (!d_events)
  990. return 0;
  991. /* ring buffer internal formats */
  992. trace_create_file("header_page", 0444, d_events,
  993. ring_buffer_print_page_header,
  994. &ftrace_show_header_fops);
  995. trace_create_file("header_event", 0444, d_events,
  996. ring_buffer_print_entry_header,
  997. &ftrace_show_header_fops);
  998. trace_create_file("enable", 0644, d_events,
  999. NULL, &ftrace_system_enable_fops);
  1000. for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
  1001. /* The linker may leave blanks */
  1002. if (!call->name)
  1003. continue;
  1004. if (call->raw_init) {
  1005. ret = call->raw_init();
  1006. if (ret < 0) {
  1007. if (ret != -ENOSYS)
  1008. pr_warning("Could not initialize trace "
  1009. "point events/%s\n", call->name);
  1010. continue;
  1011. }
  1012. }
  1013. list_add(&call->list, &ftrace_events);
  1014. event_create_dir(call, d_events, &ftrace_event_id_fops,
  1015. &ftrace_enable_fops, &ftrace_event_filter_fops,
  1016. &ftrace_event_format_fops);
  1017. }
  1018. while (true) {
  1019. token = strsep(&buf, ",");
  1020. if (!token)
  1021. break;
  1022. if (!*token)
  1023. continue;
  1024. ret = ftrace_set_clr_event(token, 1);
  1025. if (ret)
  1026. pr_warning("Failed to enable trace event: %s\n", token);
  1027. }
  1028. ret = register_module_notifier(&trace_module_nb);
  1029. if (ret)
  1030. pr_warning("Failed to register trace events module notifier\n");
  1031. return 0;
  1032. }
  1033. fs_initcall(event_trace_init);
  1034. #ifdef CONFIG_FTRACE_STARTUP_TEST
  1035. static DEFINE_SPINLOCK(test_spinlock);
  1036. static DEFINE_SPINLOCK(test_spinlock_irq);
  1037. static DEFINE_MUTEX(test_mutex);
  1038. static __init void test_work(struct work_struct *dummy)
  1039. {
  1040. spin_lock(&test_spinlock);
  1041. spin_lock_irq(&test_spinlock_irq);
  1042. udelay(1);
  1043. spin_unlock_irq(&test_spinlock_irq);
  1044. spin_unlock(&test_spinlock);
  1045. mutex_lock(&test_mutex);
  1046. msleep(1);
  1047. mutex_unlock(&test_mutex);
  1048. }
  1049. static __init int event_test_thread(void *unused)
  1050. {
  1051. void *test_malloc;
  1052. test_malloc = kmalloc(1234, GFP_KERNEL);
  1053. if (!test_malloc)
  1054. pr_info("failed to kmalloc\n");
  1055. schedule_on_each_cpu(test_work);
  1056. kfree(test_malloc);
  1057. set_current_state(TASK_INTERRUPTIBLE);
  1058. while (!kthread_should_stop())
  1059. schedule();
  1060. return 0;
  1061. }
  1062. /*
  1063. * Do various things that may trigger events.
  1064. */
  1065. static __init void event_test_stuff(void)
  1066. {
  1067. struct task_struct *test_thread;
  1068. test_thread = kthread_run(event_test_thread, NULL, "test-events");
  1069. msleep(1);
  1070. kthread_stop(test_thread);
  1071. }
  1072. /*
  1073. * For every trace event defined, we will test each trace point separately,
  1074. * and then by groups, and finally all trace points.
  1075. */
  1076. static __init void event_trace_self_tests(void)
  1077. {
  1078. struct ftrace_event_call *call;
  1079. struct event_subsystem *system;
  1080. int ret;
  1081. pr_info("Running tests on trace events:\n");
  1082. list_for_each_entry(call, &ftrace_events, list) {
  1083. /* Only test those that have a regfunc */
  1084. if (!call->regfunc)
  1085. continue;
  1086. pr_info("Testing event %s: ", call->name);
  1087. /*
  1088. * If an event is already enabled, someone is using
  1089. * it and the self test should not be on.
  1090. */
  1091. if (call->enabled) {
  1092. pr_warning("Enabled event during self test!\n");
  1093. WARN_ON_ONCE(1);
  1094. continue;
  1095. }
  1096. ftrace_event_enable_disable(call, 1);
  1097. event_test_stuff();
  1098. ftrace_event_enable_disable(call, 0);
  1099. pr_cont("OK\n");
  1100. }
  1101. /* Now test at the sub system level */
  1102. pr_info("Running tests on trace event systems:\n");
  1103. list_for_each_entry(system, &event_subsystems, list) {
  1104. /* the ftrace system is special, skip it */
  1105. if (strcmp(system->name, "ftrace") == 0)
  1106. continue;
  1107. pr_info("Testing event system %s: ", system->name);
  1108. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 1);
  1109. if (WARN_ON_ONCE(ret)) {
  1110. pr_warning("error enabling system %s\n",
  1111. system->name);
  1112. continue;
  1113. }
  1114. event_test_stuff();
  1115. ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0);
  1116. if (WARN_ON_ONCE(ret))
  1117. pr_warning("error disabling system %s\n",
  1118. system->name);
  1119. pr_cont("OK\n");
  1120. }
  1121. /* Test with all events enabled */
  1122. pr_info("Running tests on all trace events:\n");
  1123. pr_info("Testing all events: ");
  1124. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 1);
  1125. if (WARN_ON_ONCE(ret)) {
  1126. pr_warning("error enabling all events\n");
  1127. return;
  1128. }
  1129. event_test_stuff();
  1130. /* reset sysname */
  1131. ret = __ftrace_set_clr_event(NULL, NULL, NULL, 0);
  1132. if (WARN_ON_ONCE(ret)) {
  1133. pr_warning("error disabling all events\n");
  1134. return;
  1135. }
  1136. pr_cont("OK\n");
  1137. }
  1138. #ifdef CONFIG_FUNCTION_TRACER
  1139. static DEFINE_PER_CPU(atomic_t, test_event_disable);
  1140. static void
  1141. function_test_events_call(unsigned long ip, unsigned long parent_ip)
  1142. {
  1143. struct ring_buffer_event *event;
  1144. struct ring_buffer *buffer;
  1145. struct ftrace_entry *entry;
  1146. unsigned long flags;
  1147. long disabled;
  1148. int resched;
  1149. int cpu;
  1150. int pc;
  1151. pc = preempt_count();
  1152. resched = ftrace_preempt_disable();
  1153. cpu = raw_smp_processor_id();
  1154. disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu));
  1155. if (disabled != 1)
  1156. goto out;
  1157. local_save_flags(flags);
  1158. event = trace_current_buffer_lock_reserve(&buffer,
  1159. TRACE_FN, sizeof(*entry),
  1160. flags, pc);
  1161. if (!event)
  1162. goto out;
  1163. entry = ring_buffer_event_data(event);
  1164. entry->ip = ip;
  1165. entry->parent_ip = parent_ip;
  1166. trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
  1167. out:
  1168. atomic_dec(&per_cpu(test_event_disable, cpu));
  1169. ftrace_preempt_enable(resched);
  1170. }
  1171. static struct ftrace_ops trace_ops __initdata =
  1172. {
  1173. .func = function_test_events_call,
  1174. };
  1175. static __init void event_trace_self_test_with_function(void)
  1176. {
  1177. register_ftrace_function(&trace_ops);
  1178. pr_info("Running tests again, along with the function tracer\n");
  1179. event_trace_self_tests();
  1180. unregister_ftrace_function(&trace_ops);
  1181. }
  1182. #else
  1183. static __init void event_trace_self_test_with_function(void)
  1184. {
  1185. }
  1186. #endif
  1187. static __init int event_trace_self_tests_init(void)
  1188. {
  1189. if (!tracing_selftest_disabled) {
  1190. event_trace_self_tests();
  1191. event_trace_self_test_with_function();
  1192. }
  1193. return 0;
  1194. }
  1195. late_initcall(event_trace_self_tests_init);
  1196. #endif