trace_output.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. /*
  2. * trace_output.c
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/ftrace.h>
  10. #include "trace_output.h"
  11. /* must be a power of 2 */
  12. #define EVENT_HASHSIZE 128
  13. DECLARE_RWSEM(trace_event_mutex);
  14. static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
  15. static int next_event_type = __TRACE_LAST_TYPE + 1;
  16. int trace_print_seq(struct seq_file *m, struct trace_seq *s)
  17. {
  18. int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
  19. int ret;
  20. ret = seq_write(m, s->buffer, len);
  21. /*
  22. * Only reset this buffer if we successfully wrote to the
  23. * seq_file buffer.
  24. */
  25. if (!ret)
  26. trace_seq_init(s);
  27. return ret;
  28. }
  29. enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
  30. {
  31. struct trace_seq *s = &iter->seq;
  32. struct trace_entry *entry = iter->ent;
  33. struct bprint_entry *field;
  34. int ret;
  35. trace_assign_type(field, entry);
  36. ret = trace_seq_bprintf(s, field->fmt, field->buf);
  37. if (!ret)
  38. return TRACE_TYPE_PARTIAL_LINE;
  39. return TRACE_TYPE_HANDLED;
  40. }
  41. enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
  42. {
  43. struct trace_seq *s = &iter->seq;
  44. struct trace_entry *entry = iter->ent;
  45. struct print_entry *field;
  46. int ret;
  47. trace_assign_type(field, entry);
  48. ret = trace_seq_printf(s, "%s", field->buf);
  49. if (!ret)
  50. return TRACE_TYPE_PARTIAL_LINE;
  51. return TRACE_TYPE_HANDLED;
  52. }
  53. /**
  54. * trace_seq_printf - sequence printing of trace information
  55. * @s: trace sequence descriptor
  56. * @fmt: printf format string
  57. *
  58. * It returns 0 if the trace oversizes the buffer's free
  59. * space, 1 otherwise.
  60. *
  61. * The tracer may use either sequence operations or its own
  62. * copy to user routines. To simplify formating of a trace
  63. * trace_seq_printf is used to store strings into a special
  64. * buffer (@s). Then the output may be either used by
  65. * the sequencer or pulled into another buffer.
  66. */
  67. int
  68. trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
  69. {
  70. int len = (PAGE_SIZE - 1) - s->len;
  71. va_list ap;
  72. int ret;
  73. if (s->full || !len)
  74. return 0;
  75. va_start(ap, fmt);
  76. ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
  77. va_end(ap);
  78. /* If we can't write it all, don't bother writing anything */
  79. if (ret >= len) {
  80. s->full = 1;
  81. return 0;
  82. }
  83. s->len += ret;
  84. return 1;
  85. }
  86. EXPORT_SYMBOL_GPL(trace_seq_printf);
  87. /**
  88. * trace_seq_vprintf - sequence printing of trace information
  89. * @s: trace sequence descriptor
  90. * @fmt: printf format string
  91. *
  92. * The tracer may use either sequence operations or its own
  93. * copy to user routines. To simplify formating of a trace
  94. * trace_seq_printf is used to store strings into a special
  95. * buffer (@s). Then the output may be either used by
  96. * the sequencer or pulled into another buffer.
  97. */
  98. int
  99. trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
  100. {
  101. int len = (PAGE_SIZE - 1) - s->len;
  102. int ret;
  103. if (s->full || !len)
  104. return 0;
  105. ret = vsnprintf(s->buffer + s->len, len, fmt, args);
  106. /* If we can't write it all, don't bother writing anything */
  107. if (ret >= len) {
  108. s->full = 1;
  109. return 0;
  110. }
  111. s->len += ret;
  112. return len;
  113. }
  114. EXPORT_SYMBOL_GPL(trace_seq_vprintf);
  115. int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
  116. {
  117. int len = (PAGE_SIZE - 1) - s->len;
  118. int ret;
  119. if (s->full || !len)
  120. return 0;
  121. ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
  122. /* If we can't write it all, don't bother writing anything */
  123. if (ret >= len) {
  124. s->full = 1;
  125. return 0;
  126. }
  127. s->len += ret;
  128. return len;
  129. }
  130. /**
  131. * trace_seq_puts - trace sequence printing of simple string
  132. * @s: trace sequence descriptor
  133. * @str: simple string to record
  134. *
  135. * The tracer may use either the sequence operations or its own
  136. * copy to user routines. This function records a simple string
  137. * into a special buffer (@s) for later retrieval by a sequencer
  138. * or other mechanism.
  139. */
  140. int trace_seq_puts(struct trace_seq *s, const char *str)
  141. {
  142. int len = strlen(str);
  143. if (s->full)
  144. return 0;
  145. if (len > ((PAGE_SIZE - 1) - s->len)) {
  146. s->full = 1;
  147. return 0;
  148. }
  149. memcpy(s->buffer + s->len, str, len);
  150. s->len += len;
  151. return len;
  152. }
  153. int trace_seq_putc(struct trace_seq *s, unsigned char c)
  154. {
  155. if (s->full)
  156. return 0;
  157. if (s->len >= (PAGE_SIZE - 1)) {
  158. s->full = 1;
  159. return 0;
  160. }
  161. s->buffer[s->len++] = c;
  162. return 1;
  163. }
  164. EXPORT_SYMBOL(trace_seq_putc);
  165. int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
  166. {
  167. if (s->full)
  168. return 0;
  169. if (len > ((PAGE_SIZE - 1) - s->len)) {
  170. s->full = 1;
  171. return 0;
  172. }
  173. memcpy(s->buffer + s->len, mem, len);
  174. s->len += len;
  175. return len;
  176. }
  177. int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
  178. {
  179. unsigned char hex[HEX_CHARS];
  180. const unsigned char *data = mem;
  181. int i, j;
  182. if (s->full)
  183. return 0;
  184. #ifdef __BIG_ENDIAN
  185. for (i = 0, j = 0; i < len; i++) {
  186. #else
  187. for (i = len-1, j = 0; i >= 0; i--) {
  188. #endif
  189. hex[j++] = hex_asc_hi(data[i]);
  190. hex[j++] = hex_asc_lo(data[i]);
  191. }
  192. hex[j++] = ' ';
  193. return trace_seq_putmem(s, hex, j);
  194. }
  195. void *trace_seq_reserve(struct trace_seq *s, size_t len)
  196. {
  197. void *ret;
  198. if (s->full)
  199. return NULL;
  200. if (len > ((PAGE_SIZE - 1) - s->len)) {
  201. s->full = 1;
  202. return NULL;
  203. }
  204. ret = s->buffer + s->len;
  205. s->len += len;
  206. return ret;
  207. }
  208. int trace_seq_path(struct trace_seq *s, struct path *path)
  209. {
  210. unsigned char *p;
  211. if (s->full)
  212. return 0;
  213. if (s->len >= (PAGE_SIZE - 1)) {
  214. s->full = 1;
  215. return 0;
  216. }
  217. p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
  218. if (!IS_ERR(p)) {
  219. p = mangle_path(s->buffer + s->len, p, "\n");
  220. if (p) {
  221. s->len = p - s->buffer;
  222. return 1;
  223. }
  224. } else {
  225. s->buffer[s->len++] = '?';
  226. return 1;
  227. }
  228. s->full = 1;
  229. return 0;
  230. }
  231. const char *
  232. ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
  233. unsigned long flags,
  234. const struct trace_print_flags *flag_array)
  235. {
  236. unsigned long mask;
  237. const char *str;
  238. const char *ret = p->buffer + p->len;
  239. int i;
  240. for (i = 0; flag_array[i].name && flags; i++) {
  241. mask = flag_array[i].mask;
  242. if ((flags & mask) != mask)
  243. continue;
  244. str = flag_array[i].name;
  245. flags &= ~mask;
  246. if (p->len && delim)
  247. trace_seq_puts(p, delim);
  248. trace_seq_puts(p, str);
  249. }
  250. /* check for left over flags */
  251. if (flags) {
  252. if (p->len && delim)
  253. trace_seq_puts(p, delim);
  254. trace_seq_printf(p, "0x%lx", flags);
  255. }
  256. trace_seq_putc(p, 0);
  257. return ret;
  258. }
  259. EXPORT_SYMBOL(ftrace_print_flags_seq);
  260. const char *
  261. ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  262. const struct trace_print_flags *symbol_array)
  263. {
  264. int i;
  265. const char *ret = p->buffer + p->len;
  266. for (i = 0; symbol_array[i].name; i++) {
  267. if (val != symbol_array[i].mask)
  268. continue;
  269. trace_seq_puts(p, symbol_array[i].name);
  270. break;
  271. }
  272. if (!p->len)
  273. trace_seq_printf(p, "0x%lx", val);
  274. trace_seq_putc(p, 0);
  275. return ret;
  276. }
  277. EXPORT_SYMBOL(ftrace_print_symbols_seq);
  278. #if BITS_PER_LONG == 32
  279. const char *
  280. ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
  281. const struct trace_print_flags_u64 *symbol_array)
  282. {
  283. int i;
  284. const char *ret = p->buffer + p->len;
  285. for (i = 0; symbol_array[i].name; i++) {
  286. if (val != symbol_array[i].mask)
  287. continue;
  288. trace_seq_puts(p, symbol_array[i].name);
  289. break;
  290. }
  291. if (!p->len)
  292. trace_seq_printf(p, "0x%llx", val);
  293. trace_seq_putc(p, 0);
  294. return ret;
  295. }
  296. EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
  297. #endif
  298. const char *
  299. ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
  300. {
  301. int i;
  302. const char *ret = p->buffer + p->len;
  303. for (i = 0; i < buf_len; i++)
  304. trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
  305. trace_seq_putc(p, 0);
  306. return ret;
  307. }
  308. EXPORT_SYMBOL(ftrace_print_hex_seq);
  309. #ifdef CONFIG_KRETPROBES
  310. static inline const char *kretprobed(const char *name)
  311. {
  312. static const char tramp_name[] = "kretprobe_trampoline";
  313. int size = sizeof(tramp_name);
  314. if (strncmp(tramp_name, name, size) == 0)
  315. return "[unknown/kretprobe'd]";
  316. return name;
  317. }
  318. #else
  319. static inline const char *kretprobed(const char *name)
  320. {
  321. return name;
  322. }
  323. #endif /* CONFIG_KRETPROBES */
  324. static int
  325. seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
  326. {
  327. #ifdef CONFIG_KALLSYMS
  328. char str[KSYM_SYMBOL_LEN];
  329. const char *name;
  330. kallsyms_lookup(address, NULL, NULL, NULL, str);
  331. name = kretprobed(str);
  332. return trace_seq_printf(s, fmt, name);
  333. #endif
  334. return 1;
  335. }
  336. static int
  337. seq_print_sym_offset(struct trace_seq *s, const char *fmt,
  338. unsigned long address)
  339. {
  340. #ifdef CONFIG_KALLSYMS
  341. char str[KSYM_SYMBOL_LEN];
  342. const char *name;
  343. sprint_symbol(str, address);
  344. name = kretprobed(str);
  345. return trace_seq_printf(s, fmt, name);
  346. #endif
  347. return 1;
  348. }
  349. #ifndef CONFIG_64BIT
  350. # define IP_FMT "%08lx"
  351. #else
  352. # define IP_FMT "%016lx"
  353. #endif
  354. int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
  355. unsigned long ip, unsigned long sym_flags)
  356. {
  357. struct file *file = NULL;
  358. unsigned long vmstart = 0;
  359. int ret = 1;
  360. if (s->full)
  361. return 0;
  362. if (mm) {
  363. const struct vm_area_struct *vma;
  364. down_read(&mm->mmap_sem);
  365. vma = find_vma(mm, ip);
  366. if (vma) {
  367. file = vma->vm_file;
  368. vmstart = vma->vm_start;
  369. }
  370. if (file) {
  371. ret = trace_seq_path(s, &file->f_path);
  372. if (ret)
  373. ret = trace_seq_printf(s, "[+0x%lx]",
  374. ip - vmstart);
  375. }
  376. up_read(&mm->mmap_sem);
  377. }
  378. if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
  379. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  380. return ret;
  381. }
  382. int
  383. seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
  384. unsigned long sym_flags)
  385. {
  386. struct mm_struct *mm = NULL;
  387. int ret = 1;
  388. unsigned int i;
  389. if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
  390. struct task_struct *task;
  391. /*
  392. * we do the lookup on the thread group leader,
  393. * since individual threads might have already quit!
  394. */
  395. rcu_read_lock();
  396. task = find_task_by_vpid(entry->tgid);
  397. if (task)
  398. mm = get_task_mm(task);
  399. rcu_read_unlock();
  400. }
  401. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  402. unsigned long ip = entry->caller[i];
  403. if (ip == ULONG_MAX || !ret)
  404. break;
  405. if (ret)
  406. ret = trace_seq_puts(s, " => ");
  407. if (!ip) {
  408. if (ret)
  409. ret = trace_seq_puts(s, "??");
  410. if (ret)
  411. ret = trace_seq_puts(s, "\n");
  412. continue;
  413. }
  414. if (!ret)
  415. break;
  416. if (ret)
  417. ret = seq_print_user_ip(s, mm, ip, sym_flags);
  418. ret = trace_seq_puts(s, "\n");
  419. }
  420. if (mm)
  421. mmput(mm);
  422. return ret;
  423. }
  424. int
  425. seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  426. {
  427. int ret;
  428. if (!ip)
  429. return trace_seq_printf(s, "0");
  430. if (sym_flags & TRACE_ITER_SYM_OFFSET)
  431. ret = seq_print_sym_offset(s, "%s", ip);
  432. else
  433. ret = seq_print_sym_short(s, "%s", ip);
  434. if (!ret)
  435. return 0;
  436. if (sym_flags & TRACE_ITER_SYM_ADDR)
  437. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  438. return ret;
  439. }
  440. /**
  441. * trace_print_lat_fmt - print the irq, preempt and lockdep fields
  442. * @s: trace seq struct to write to
  443. * @entry: The trace entry field from the ring buffer
  444. *
  445. * Prints the generic fields of irqs off, in hard or softirq, preempt
  446. * count.
  447. */
  448. int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  449. {
  450. char hardsoft_irq;
  451. char need_resched;
  452. char irqs_off;
  453. int hardirq;
  454. int softirq;
  455. int ret;
  456. hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
  457. softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
  458. irqs_off =
  459. (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
  460. (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
  461. '.';
  462. need_resched =
  463. (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.';
  464. hardsoft_irq =
  465. (hardirq && softirq) ? 'H' :
  466. hardirq ? 'h' :
  467. softirq ? 's' :
  468. '.';
  469. if (!trace_seq_printf(s, "%c%c%c",
  470. irqs_off, need_resched, hardsoft_irq))
  471. return 0;
  472. if (entry->preempt_count)
  473. ret = trace_seq_printf(s, "%x", entry->preempt_count);
  474. else
  475. ret = trace_seq_putc(s, '.');
  476. return ret;
  477. }
  478. static int
  479. lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
  480. {
  481. char comm[TASK_COMM_LEN];
  482. trace_find_cmdline(entry->pid, comm);
  483. if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
  484. comm, entry->pid, cpu))
  485. return 0;
  486. return trace_print_lat_fmt(s, entry);
  487. }
  488. static unsigned long preempt_mark_thresh = 100;
  489. static int
  490. lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
  491. unsigned long rel_usecs)
  492. {
  493. return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
  494. rel_usecs > preempt_mark_thresh ? '!' :
  495. rel_usecs > 1 ? '+' : ' ');
  496. }
  497. int trace_print_context(struct trace_iterator *iter)
  498. {
  499. struct trace_seq *s = &iter->seq;
  500. struct trace_entry *entry = iter->ent;
  501. unsigned long long t = ns2usecs(iter->ts);
  502. unsigned long usec_rem = do_div(t, USEC_PER_SEC);
  503. unsigned long secs = (unsigned long)t;
  504. char comm[TASK_COMM_LEN];
  505. int ret;
  506. trace_find_cmdline(entry->pid, comm);
  507. ret = trace_seq_printf(s, "%16s-%-5d [%03d] ",
  508. comm, entry->pid, iter->cpu);
  509. if (!ret)
  510. return 0;
  511. if (trace_flags & TRACE_ITER_IRQ_INFO) {
  512. ret = trace_print_lat_fmt(s, entry);
  513. if (!ret)
  514. return 0;
  515. }
  516. return trace_seq_printf(s, " %5lu.%06lu: ",
  517. secs, usec_rem);
  518. }
  519. int trace_print_lat_context(struct trace_iterator *iter)
  520. {
  521. u64 next_ts;
  522. int ret;
  523. struct trace_seq *s = &iter->seq;
  524. struct trace_entry *entry = iter->ent,
  525. *next_entry = trace_find_next_entry(iter, NULL,
  526. &next_ts);
  527. unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
  528. unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
  529. unsigned long rel_usecs;
  530. if (!next_entry)
  531. next_ts = iter->ts;
  532. rel_usecs = ns2usecs(next_ts - iter->ts);
  533. if (verbose) {
  534. char comm[TASK_COMM_LEN];
  535. trace_find_cmdline(entry->pid, comm);
  536. ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
  537. " %ld.%03ldms (+%ld.%03ldms): ", comm,
  538. entry->pid, iter->cpu, entry->flags,
  539. entry->preempt_count, iter->idx,
  540. ns2usecs(iter->ts),
  541. abs_usecs / USEC_PER_MSEC,
  542. abs_usecs % USEC_PER_MSEC,
  543. rel_usecs / USEC_PER_MSEC,
  544. rel_usecs % USEC_PER_MSEC);
  545. } else {
  546. ret = lat_print_generic(s, entry, iter->cpu);
  547. if (ret)
  548. ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
  549. }
  550. return ret;
  551. }
  552. static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
  553. static int task_state_char(unsigned long state)
  554. {
  555. int bit = state ? __ffs(state) + 1 : 0;
  556. return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
  557. }
  558. /**
  559. * ftrace_find_event - find a registered event
  560. * @type: the type of event to look for
  561. *
  562. * Returns an event of type @type otherwise NULL
  563. * Called with trace_event_read_lock() held.
  564. */
  565. struct trace_event *ftrace_find_event(int type)
  566. {
  567. struct trace_event *event;
  568. struct hlist_node *n;
  569. unsigned key;
  570. key = type & (EVENT_HASHSIZE - 1);
  571. hlist_for_each_entry(event, n, &event_hash[key], node) {
  572. if (event->type == type)
  573. return event;
  574. }
  575. return NULL;
  576. }
  577. static LIST_HEAD(ftrace_event_list);
  578. static int trace_search_list(struct list_head **list)
  579. {
  580. struct trace_event *e;
  581. int last = __TRACE_LAST_TYPE;
  582. if (list_empty(&ftrace_event_list)) {
  583. *list = &ftrace_event_list;
  584. return last + 1;
  585. }
  586. /*
  587. * We used up all possible max events,
  588. * lets see if somebody freed one.
  589. */
  590. list_for_each_entry(e, &ftrace_event_list, list) {
  591. if (e->type != last + 1)
  592. break;
  593. last++;
  594. }
  595. /* Did we used up all 65 thousand events??? */
  596. if ((last + 1) > FTRACE_MAX_EVENT)
  597. return 0;
  598. *list = &e->list;
  599. return last + 1;
  600. }
  601. void trace_event_read_lock(void)
  602. {
  603. down_read(&trace_event_mutex);
  604. }
  605. void trace_event_read_unlock(void)
  606. {
  607. up_read(&trace_event_mutex);
  608. }
  609. /**
  610. * register_ftrace_event - register output for an event type
  611. * @event: the event type to register
  612. *
  613. * Event types are stored in a hash and this hash is used to
  614. * find a way to print an event. If the @event->type is set
  615. * then it will use that type, otherwise it will assign a
  616. * type to use.
  617. *
  618. * If you assign your own type, please make sure it is added
  619. * to the trace_type enum in trace.h, to avoid collisions
  620. * with the dynamic types.
  621. *
  622. * Returns the event type number or zero on error.
  623. */
  624. int register_ftrace_event(struct trace_event *event)
  625. {
  626. unsigned key;
  627. int ret = 0;
  628. down_write(&trace_event_mutex);
  629. if (WARN_ON(!event))
  630. goto out;
  631. if (WARN_ON(!event->funcs))
  632. goto out;
  633. INIT_LIST_HEAD(&event->list);
  634. if (!event->type) {
  635. struct list_head *list = NULL;
  636. if (next_event_type > FTRACE_MAX_EVENT) {
  637. event->type = trace_search_list(&list);
  638. if (!event->type)
  639. goto out;
  640. } else {
  641. event->type = next_event_type++;
  642. list = &ftrace_event_list;
  643. }
  644. if (WARN_ON(ftrace_find_event(event->type)))
  645. goto out;
  646. list_add_tail(&event->list, list);
  647. } else if (event->type > __TRACE_LAST_TYPE) {
  648. printk(KERN_WARNING "Need to add type to trace.h\n");
  649. WARN_ON(1);
  650. goto out;
  651. } else {
  652. /* Is this event already used */
  653. if (ftrace_find_event(event->type))
  654. goto out;
  655. }
  656. if (event->funcs->trace == NULL)
  657. event->funcs->trace = trace_nop_print;
  658. if (event->funcs->raw == NULL)
  659. event->funcs->raw = trace_nop_print;
  660. if (event->funcs->hex == NULL)
  661. event->funcs->hex = trace_nop_print;
  662. if (event->funcs->binary == NULL)
  663. event->funcs->binary = trace_nop_print;
  664. key = event->type & (EVENT_HASHSIZE - 1);
  665. hlist_add_head(&event->node, &event_hash[key]);
  666. ret = event->type;
  667. out:
  668. up_write(&trace_event_mutex);
  669. return ret;
  670. }
  671. EXPORT_SYMBOL_GPL(register_ftrace_event);
  672. /*
  673. * Used by module code with the trace_event_mutex held for write.
  674. */
  675. int __unregister_ftrace_event(struct trace_event *event)
  676. {
  677. hlist_del(&event->node);
  678. list_del(&event->list);
  679. return 0;
  680. }
  681. /**
  682. * unregister_ftrace_event - remove a no longer used event
  683. * @event: the event to remove
  684. */
  685. int unregister_ftrace_event(struct trace_event *event)
  686. {
  687. down_write(&trace_event_mutex);
  688. __unregister_ftrace_event(event);
  689. up_write(&trace_event_mutex);
  690. return 0;
  691. }
  692. EXPORT_SYMBOL_GPL(unregister_ftrace_event);
  693. /*
  694. * Standard events
  695. */
  696. enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
  697. struct trace_event *event)
  698. {
  699. if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type))
  700. return TRACE_TYPE_PARTIAL_LINE;
  701. return TRACE_TYPE_HANDLED;
  702. }
  703. /* TRACE_FN */
  704. static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
  705. struct trace_event *event)
  706. {
  707. struct ftrace_entry *field;
  708. struct trace_seq *s = &iter->seq;
  709. trace_assign_type(field, iter->ent);
  710. if (!seq_print_ip_sym(s, field->ip, flags))
  711. goto partial;
  712. if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
  713. if (!trace_seq_printf(s, " <-"))
  714. goto partial;
  715. if (!seq_print_ip_sym(s,
  716. field->parent_ip,
  717. flags))
  718. goto partial;
  719. }
  720. if (!trace_seq_printf(s, "\n"))
  721. goto partial;
  722. return TRACE_TYPE_HANDLED;
  723. partial:
  724. return TRACE_TYPE_PARTIAL_LINE;
  725. }
  726. static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
  727. struct trace_event *event)
  728. {
  729. struct ftrace_entry *field;
  730. trace_assign_type(field, iter->ent);
  731. if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
  732. field->ip,
  733. field->parent_ip))
  734. return TRACE_TYPE_PARTIAL_LINE;
  735. return TRACE_TYPE_HANDLED;
  736. }
  737. static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
  738. struct trace_event *event)
  739. {
  740. struct ftrace_entry *field;
  741. struct trace_seq *s = &iter->seq;
  742. trace_assign_type(field, iter->ent);
  743. SEQ_PUT_HEX_FIELD_RET(s, field->ip);
  744. SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
  745. return TRACE_TYPE_HANDLED;
  746. }
  747. static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
  748. struct trace_event *event)
  749. {
  750. struct ftrace_entry *field;
  751. struct trace_seq *s = &iter->seq;
  752. trace_assign_type(field, iter->ent);
  753. SEQ_PUT_FIELD_RET(s, field->ip);
  754. SEQ_PUT_FIELD_RET(s, field->parent_ip);
  755. return TRACE_TYPE_HANDLED;
  756. }
  757. static struct trace_event_functions trace_fn_funcs = {
  758. .trace = trace_fn_trace,
  759. .raw = trace_fn_raw,
  760. .hex = trace_fn_hex,
  761. .binary = trace_fn_bin,
  762. };
  763. static struct trace_event trace_fn_event = {
  764. .type = TRACE_FN,
  765. .funcs = &trace_fn_funcs,
  766. };
  767. /* TRACE_CTX an TRACE_WAKE */
  768. static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
  769. char *delim)
  770. {
  771. struct ctx_switch_entry *field;
  772. char comm[TASK_COMM_LEN];
  773. int S, T;
  774. trace_assign_type(field, iter->ent);
  775. T = task_state_char(field->next_state);
  776. S = task_state_char(field->prev_state);
  777. trace_find_cmdline(field->next_pid, comm);
  778. if (!trace_seq_printf(&iter->seq,
  779. " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
  780. field->prev_pid,
  781. field->prev_prio,
  782. S, delim,
  783. field->next_cpu,
  784. field->next_pid,
  785. field->next_prio,
  786. T, comm))
  787. return TRACE_TYPE_PARTIAL_LINE;
  788. return TRACE_TYPE_HANDLED;
  789. }
  790. static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
  791. struct trace_event *event)
  792. {
  793. return trace_ctxwake_print(iter, "==>");
  794. }
  795. static enum print_line_t trace_wake_print(struct trace_iterator *iter,
  796. int flags, struct trace_event *event)
  797. {
  798. return trace_ctxwake_print(iter, " +");
  799. }
  800. static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
  801. {
  802. struct ctx_switch_entry *field;
  803. int T;
  804. trace_assign_type(field, iter->ent);
  805. if (!S)
  806. S = task_state_char(field->prev_state);
  807. T = task_state_char(field->next_state);
  808. if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
  809. field->prev_pid,
  810. field->prev_prio,
  811. S,
  812. field->next_cpu,
  813. field->next_pid,
  814. field->next_prio,
  815. T))
  816. return TRACE_TYPE_PARTIAL_LINE;
  817. return TRACE_TYPE_HANDLED;
  818. }
  819. static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
  820. struct trace_event *event)
  821. {
  822. return trace_ctxwake_raw(iter, 0);
  823. }
  824. static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
  825. struct trace_event *event)
  826. {
  827. return trace_ctxwake_raw(iter, '+');
  828. }
  829. static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
  830. {
  831. struct ctx_switch_entry *field;
  832. struct trace_seq *s = &iter->seq;
  833. int T;
  834. trace_assign_type(field, iter->ent);
  835. if (!S)
  836. S = task_state_char(field->prev_state);
  837. T = task_state_char(field->next_state);
  838. SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
  839. SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
  840. SEQ_PUT_HEX_FIELD_RET(s, S);
  841. SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
  842. SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
  843. SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
  844. SEQ_PUT_HEX_FIELD_RET(s, T);
  845. return TRACE_TYPE_HANDLED;
  846. }
  847. static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
  848. struct trace_event *event)
  849. {
  850. return trace_ctxwake_hex(iter, 0);
  851. }
  852. static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
  853. struct trace_event *event)
  854. {
  855. return trace_ctxwake_hex(iter, '+');
  856. }
  857. static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
  858. int flags, struct trace_event *event)
  859. {
  860. struct ctx_switch_entry *field;
  861. struct trace_seq *s = &iter->seq;
  862. trace_assign_type(field, iter->ent);
  863. SEQ_PUT_FIELD_RET(s, field->prev_pid);
  864. SEQ_PUT_FIELD_RET(s, field->prev_prio);
  865. SEQ_PUT_FIELD_RET(s, field->prev_state);
  866. SEQ_PUT_FIELD_RET(s, field->next_pid);
  867. SEQ_PUT_FIELD_RET(s, field->next_prio);
  868. SEQ_PUT_FIELD_RET(s, field->next_state);
  869. return TRACE_TYPE_HANDLED;
  870. }
  871. static struct trace_event_functions trace_ctx_funcs = {
  872. .trace = trace_ctx_print,
  873. .raw = trace_ctx_raw,
  874. .hex = trace_ctx_hex,
  875. .binary = trace_ctxwake_bin,
  876. };
  877. static struct trace_event trace_ctx_event = {
  878. .type = TRACE_CTX,
  879. .funcs = &trace_ctx_funcs,
  880. };
  881. static struct trace_event_functions trace_wake_funcs = {
  882. .trace = trace_wake_print,
  883. .raw = trace_wake_raw,
  884. .hex = trace_wake_hex,
  885. .binary = trace_ctxwake_bin,
  886. };
  887. static struct trace_event trace_wake_event = {
  888. .type = TRACE_WAKE,
  889. .funcs = &trace_wake_funcs,
  890. };
  891. /* TRACE_STACK */
  892. static enum print_line_t trace_stack_print(struct trace_iterator *iter,
  893. int flags, struct trace_event *event)
  894. {
  895. struct stack_entry *field;
  896. struct trace_seq *s = &iter->seq;
  897. unsigned long *p;
  898. unsigned long *end;
  899. trace_assign_type(field, iter->ent);
  900. end = (unsigned long *)((long)iter->ent + iter->ent_size);
  901. if (!trace_seq_puts(s, "<stack trace>\n"))
  902. goto partial;
  903. for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
  904. if (!trace_seq_puts(s, " => "))
  905. goto partial;
  906. if (!seq_print_ip_sym(s, *p, flags))
  907. goto partial;
  908. if (!trace_seq_puts(s, "\n"))
  909. goto partial;
  910. }
  911. return TRACE_TYPE_HANDLED;
  912. partial:
  913. return TRACE_TYPE_PARTIAL_LINE;
  914. }
  915. static struct trace_event_functions trace_stack_funcs = {
  916. .trace = trace_stack_print,
  917. };
  918. static struct trace_event trace_stack_event = {
  919. .type = TRACE_STACK,
  920. .funcs = &trace_stack_funcs,
  921. };
  922. /* TRACE_USER_STACK */
  923. static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
  924. int flags, struct trace_event *event)
  925. {
  926. struct userstack_entry *field;
  927. struct trace_seq *s = &iter->seq;
  928. trace_assign_type(field, iter->ent);
  929. if (!trace_seq_puts(s, "<user stack trace>\n"))
  930. goto partial;
  931. if (!seq_print_userip_objs(field, s, flags))
  932. goto partial;
  933. return TRACE_TYPE_HANDLED;
  934. partial:
  935. return TRACE_TYPE_PARTIAL_LINE;
  936. }
  937. static struct trace_event_functions trace_user_stack_funcs = {
  938. .trace = trace_user_stack_print,
  939. };
  940. static struct trace_event trace_user_stack_event = {
  941. .type = TRACE_USER_STACK,
  942. .funcs = &trace_user_stack_funcs,
  943. };
  944. /* TRACE_BPRINT */
  945. static enum print_line_t
  946. trace_bprint_print(struct trace_iterator *iter, int flags,
  947. struct trace_event *event)
  948. {
  949. struct trace_entry *entry = iter->ent;
  950. struct trace_seq *s = &iter->seq;
  951. struct bprint_entry *field;
  952. trace_assign_type(field, entry);
  953. if (!seq_print_ip_sym(s, field->ip, flags))
  954. goto partial;
  955. if (!trace_seq_puts(s, ": "))
  956. goto partial;
  957. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  958. goto partial;
  959. return TRACE_TYPE_HANDLED;
  960. partial:
  961. return TRACE_TYPE_PARTIAL_LINE;
  962. }
  963. static enum print_line_t
  964. trace_bprint_raw(struct trace_iterator *iter, int flags,
  965. struct trace_event *event)
  966. {
  967. struct bprint_entry *field;
  968. struct trace_seq *s = &iter->seq;
  969. trace_assign_type(field, iter->ent);
  970. if (!trace_seq_printf(s, ": %lx : ", field->ip))
  971. goto partial;
  972. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  973. goto partial;
  974. return TRACE_TYPE_HANDLED;
  975. partial:
  976. return TRACE_TYPE_PARTIAL_LINE;
  977. }
  978. static struct trace_event_functions trace_bprint_funcs = {
  979. .trace = trace_bprint_print,
  980. .raw = trace_bprint_raw,
  981. };
  982. static struct trace_event trace_bprint_event = {
  983. .type = TRACE_BPRINT,
  984. .funcs = &trace_bprint_funcs,
  985. };
  986. /* TRACE_PRINT */
  987. static enum print_line_t trace_print_print(struct trace_iterator *iter,
  988. int flags, struct trace_event *event)
  989. {
  990. struct print_entry *field;
  991. struct trace_seq *s = &iter->seq;
  992. trace_assign_type(field, iter->ent);
  993. if (!seq_print_ip_sym(s, field->ip, flags))
  994. goto partial;
  995. if (!trace_seq_printf(s, ": %s", field->buf))
  996. goto partial;
  997. return TRACE_TYPE_HANDLED;
  998. partial:
  999. return TRACE_TYPE_PARTIAL_LINE;
  1000. }
  1001. static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
  1002. struct trace_event *event)
  1003. {
  1004. struct print_entry *field;
  1005. trace_assign_type(field, iter->ent);
  1006. if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
  1007. goto partial;
  1008. return TRACE_TYPE_HANDLED;
  1009. partial:
  1010. return TRACE_TYPE_PARTIAL_LINE;
  1011. }
  1012. static struct trace_event_functions trace_print_funcs = {
  1013. .trace = trace_print_print,
  1014. .raw = trace_print_raw,
  1015. };
  1016. static struct trace_event trace_print_event = {
  1017. .type = TRACE_PRINT,
  1018. .funcs = &trace_print_funcs,
  1019. };
  1020. static struct trace_event *events[] __initdata = {
  1021. &trace_fn_event,
  1022. &trace_ctx_event,
  1023. &trace_wake_event,
  1024. &trace_stack_event,
  1025. &trace_user_stack_event,
  1026. &trace_bprint_event,
  1027. &trace_print_event,
  1028. NULL
  1029. };
  1030. __init static int init_events(void)
  1031. {
  1032. struct trace_event *event;
  1033. int i, ret;
  1034. for (i = 0; events[i]; i++) {
  1035. event = events[i];
  1036. ret = register_ftrace_event(event);
  1037. if (!ret) {
  1038. printk(KERN_WARNING "event %d failed to register\n",
  1039. event->type);
  1040. WARN_ON_ONCE(1);
  1041. }
  1042. }
  1043. return 0;
  1044. }
  1045. device_initcall(init_events);