trace_output.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027
  1. /*
  2. * trace_output.c
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/ftrace.h>
  10. #include "trace_output.h"
  11. /* must be a power of 2 */
  12. #define EVENT_HASHSIZE 128
  13. static DEFINE_MUTEX(trace_event_mutex);
  14. static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
  15. static int next_event_type = __TRACE_LAST_TYPE + 1;
  16. void trace_print_seq(struct seq_file *m, struct trace_seq *s)
  17. {
  18. int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
  19. s->buffer[len] = 0;
  20. seq_puts(m, s->buffer);
  21. trace_seq_init(s);
  22. }
  23. enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
  24. {
  25. struct trace_seq *s = &iter->seq;
  26. struct trace_entry *entry = iter->ent;
  27. struct bprint_entry *field;
  28. int ret;
  29. trace_assign_type(field, entry);
  30. ret = trace_seq_bprintf(s, field->fmt, field->buf);
  31. if (!ret)
  32. return TRACE_TYPE_PARTIAL_LINE;
  33. return TRACE_TYPE_HANDLED;
  34. }
  35. enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
  36. {
  37. struct trace_seq *s = &iter->seq;
  38. struct trace_entry *entry = iter->ent;
  39. struct print_entry *field;
  40. int ret;
  41. trace_assign_type(field, entry);
  42. ret = trace_seq_printf(s, "%s", field->buf);
  43. if (!ret)
  44. return TRACE_TYPE_PARTIAL_LINE;
  45. return TRACE_TYPE_HANDLED;
  46. }
  47. /**
  48. * trace_seq_printf - sequence printing of trace information
  49. * @s: trace sequence descriptor
  50. * @fmt: printf format string
  51. *
  52. * The tracer may use either sequence operations or its own
  53. * copy to user routines. To simplify formating of a trace
  54. * trace_seq_printf is used to store strings into a special
  55. * buffer (@s). Then the output may be either used by
  56. * the sequencer or pulled into another buffer.
  57. */
  58. int
  59. trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
  60. {
  61. int len = (PAGE_SIZE - 1) - s->len;
  62. va_list ap;
  63. int ret;
  64. if (!len)
  65. return 0;
  66. va_start(ap, fmt);
  67. ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
  68. va_end(ap);
  69. /* If we can't write it all, don't bother writing anything */
  70. if (ret >= len)
  71. return 0;
  72. s->len += ret;
  73. return len;
  74. }
  75. int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
  76. {
  77. int len = (PAGE_SIZE - 1) - s->len;
  78. int ret;
  79. if (!len)
  80. return 0;
  81. ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
  82. /* If we can't write it all, don't bother writing anything */
  83. if (ret >= len)
  84. return 0;
  85. s->len += ret;
  86. return len;
  87. }
  88. /**
  89. * trace_seq_puts - trace sequence printing of simple string
  90. * @s: trace sequence descriptor
  91. * @str: simple string to record
  92. *
  93. * The tracer may use either the sequence operations or its own
  94. * copy to user routines. This function records a simple string
  95. * into a special buffer (@s) for later retrieval by a sequencer
  96. * or other mechanism.
  97. */
  98. int trace_seq_puts(struct trace_seq *s, const char *str)
  99. {
  100. int len = strlen(str);
  101. if (len > ((PAGE_SIZE - 1) - s->len))
  102. return 0;
  103. memcpy(s->buffer + s->len, str, len);
  104. s->len += len;
  105. return len;
  106. }
  107. int trace_seq_putc(struct trace_seq *s, unsigned char c)
  108. {
  109. if (s->len >= (PAGE_SIZE - 1))
  110. return 0;
  111. s->buffer[s->len++] = c;
  112. return 1;
  113. }
  114. int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
  115. {
  116. if (len > ((PAGE_SIZE - 1) - s->len))
  117. return 0;
  118. memcpy(s->buffer + s->len, mem, len);
  119. s->len += len;
  120. return len;
  121. }
  122. int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
  123. {
  124. unsigned char hex[HEX_CHARS];
  125. const unsigned char *data = mem;
  126. int i, j;
  127. #ifdef __BIG_ENDIAN
  128. for (i = 0, j = 0; i < len; i++) {
  129. #else
  130. for (i = len-1, j = 0; i >= 0; i--) {
  131. #endif
  132. hex[j++] = hex_asc_hi(data[i]);
  133. hex[j++] = hex_asc_lo(data[i]);
  134. }
  135. hex[j++] = ' ';
  136. return trace_seq_putmem(s, hex, j);
  137. }
  138. void *trace_seq_reserve(struct trace_seq *s, size_t len)
  139. {
  140. void *ret;
  141. if (len > ((PAGE_SIZE - 1) - s->len))
  142. return NULL;
  143. ret = s->buffer + s->len;
  144. s->len += len;
  145. return ret;
  146. }
  147. int trace_seq_path(struct trace_seq *s, struct path *path)
  148. {
  149. unsigned char *p;
  150. if (s->len >= (PAGE_SIZE - 1))
  151. return 0;
  152. p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
  153. if (!IS_ERR(p)) {
  154. p = mangle_path(s->buffer + s->len, p, "\n");
  155. if (p) {
  156. s->len = p - s->buffer;
  157. return 1;
  158. }
  159. } else {
  160. s->buffer[s->len++] = '?';
  161. return 1;
  162. }
  163. return 0;
  164. }
  165. #ifdef CONFIG_KRETPROBES
  166. static inline const char *kretprobed(const char *name)
  167. {
  168. static const char tramp_name[] = "kretprobe_trampoline";
  169. int size = sizeof(tramp_name);
  170. if (strncmp(tramp_name, name, size) == 0)
  171. return "[unknown/kretprobe'd]";
  172. return name;
  173. }
  174. #else
  175. static inline const char *kretprobed(const char *name)
  176. {
  177. return name;
  178. }
  179. #endif /* CONFIG_KRETPROBES */
  180. static int
  181. seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
  182. {
  183. #ifdef CONFIG_KALLSYMS
  184. char str[KSYM_SYMBOL_LEN];
  185. const char *name;
  186. kallsyms_lookup(address, NULL, NULL, NULL, str);
  187. name = kretprobed(str);
  188. return trace_seq_printf(s, fmt, name);
  189. #endif
  190. return 1;
  191. }
  192. static int
  193. seq_print_sym_offset(struct trace_seq *s, const char *fmt,
  194. unsigned long address)
  195. {
  196. #ifdef CONFIG_KALLSYMS
  197. char str[KSYM_SYMBOL_LEN];
  198. const char *name;
  199. sprint_symbol(str, address);
  200. name = kretprobed(str);
  201. return trace_seq_printf(s, fmt, name);
  202. #endif
  203. return 1;
  204. }
  205. #ifndef CONFIG_64BIT
  206. # define IP_FMT "%08lx"
  207. #else
  208. # define IP_FMT "%016lx"
  209. #endif
  210. int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
  211. unsigned long ip, unsigned long sym_flags)
  212. {
  213. struct file *file = NULL;
  214. unsigned long vmstart = 0;
  215. int ret = 1;
  216. if (mm) {
  217. const struct vm_area_struct *vma;
  218. down_read(&mm->mmap_sem);
  219. vma = find_vma(mm, ip);
  220. if (vma) {
  221. file = vma->vm_file;
  222. vmstart = vma->vm_start;
  223. }
  224. if (file) {
  225. ret = trace_seq_path(s, &file->f_path);
  226. if (ret)
  227. ret = trace_seq_printf(s, "[+0x%lx]",
  228. ip - vmstart);
  229. }
  230. up_read(&mm->mmap_sem);
  231. }
  232. if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
  233. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  234. return ret;
  235. }
  236. int
  237. seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
  238. unsigned long sym_flags)
  239. {
  240. struct mm_struct *mm = NULL;
  241. int ret = 1;
  242. unsigned int i;
  243. if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
  244. struct task_struct *task;
  245. /*
  246. * we do the lookup on the thread group leader,
  247. * since individual threads might have already quit!
  248. */
  249. rcu_read_lock();
  250. task = find_task_by_vpid(entry->ent.tgid);
  251. if (task)
  252. mm = get_task_mm(task);
  253. rcu_read_unlock();
  254. }
  255. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  256. unsigned long ip = entry->caller[i];
  257. if (ip == ULONG_MAX || !ret)
  258. break;
  259. if (i && ret)
  260. ret = trace_seq_puts(s, " <- ");
  261. if (!ip) {
  262. if (ret)
  263. ret = trace_seq_puts(s, "??");
  264. continue;
  265. }
  266. if (!ret)
  267. break;
  268. if (ret)
  269. ret = seq_print_user_ip(s, mm, ip, sym_flags);
  270. }
  271. if (mm)
  272. mmput(mm);
  273. return ret;
  274. }
  275. int
  276. seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  277. {
  278. int ret;
  279. if (!ip)
  280. return trace_seq_printf(s, "0");
  281. if (sym_flags & TRACE_ITER_SYM_OFFSET)
  282. ret = seq_print_sym_offset(s, "%s", ip);
  283. else
  284. ret = seq_print_sym_short(s, "%s", ip);
  285. if (!ret)
  286. return 0;
  287. if (sym_flags & TRACE_ITER_SYM_ADDR)
  288. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  289. return ret;
  290. }
  291. static int
  292. lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
  293. {
  294. int hardirq, softirq;
  295. char comm[TASK_COMM_LEN];
  296. trace_find_cmdline(entry->pid, comm);
  297. hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
  298. softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
  299. if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
  300. comm, entry->pid, cpu,
  301. (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
  302. (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
  303. 'X' : '.',
  304. (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
  305. 'N' : '.',
  306. (hardirq && softirq) ? 'H' :
  307. hardirq ? 'h' : softirq ? 's' : '.'))
  308. return 0;
  309. if (entry->preempt_count)
  310. return trace_seq_printf(s, "%x", entry->preempt_count);
  311. return trace_seq_puts(s, ".");
  312. }
  313. static unsigned long preempt_mark_thresh = 100;
  314. static int
  315. lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
  316. unsigned long rel_usecs)
  317. {
  318. return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
  319. rel_usecs > preempt_mark_thresh ? '!' :
  320. rel_usecs > 1 ? '+' : ' ');
  321. }
  322. int trace_print_context(struct trace_iterator *iter)
  323. {
  324. struct trace_seq *s = &iter->seq;
  325. struct trace_entry *entry = iter->ent;
  326. unsigned long long t = ns2usecs(iter->ts);
  327. unsigned long usec_rem = do_div(t, USEC_PER_SEC);
  328. unsigned long secs = (unsigned long)t;
  329. char comm[TASK_COMM_LEN];
  330. trace_find_cmdline(entry->pid, comm);
  331. return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
  332. comm, entry->pid, iter->cpu, secs, usec_rem);
  333. }
  334. int trace_print_lat_context(struct trace_iterator *iter)
  335. {
  336. u64 next_ts;
  337. int ret;
  338. struct trace_seq *s = &iter->seq;
  339. struct trace_entry *entry = iter->ent,
  340. *next_entry = trace_find_next_entry(iter, NULL,
  341. &next_ts);
  342. unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
  343. unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
  344. unsigned long rel_usecs;
  345. if (!next_entry)
  346. next_ts = iter->ts;
  347. rel_usecs = ns2usecs(next_ts - iter->ts);
  348. if (verbose) {
  349. char comm[TASK_COMM_LEN];
  350. trace_find_cmdline(entry->pid, comm);
  351. ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
  352. " %ld.%03ldms (+%ld.%03ldms): ", comm,
  353. entry->pid, iter->cpu, entry->flags,
  354. entry->preempt_count, iter->idx,
  355. ns2usecs(iter->ts),
  356. abs_usecs / USEC_PER_MSEC,
  357. abs_usecs % USEC_PER_MSEC,
  358. rel_usecs / USEC_PER_MSEC,
  359. rel_usecs % USEC_PER_MSEC);
  360. } else {
  361. ret = lat_print_generic(s, entry, iter->cpu);
  362. if (ret)
  363. ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
  364. }
  365. return ret;
  366. }
  367. static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
  368. static int task_state_char(unsigned long state)
  369. {
  370. int bit = state ? __ffs(state) + 1 : 0;
  371. return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
  372. }
  373. /**
  374. * ftrace_find_event - find a registered event
  375. * @type: the type of event to look for
  376. *
  377. * Returns an event of type @type otherwise NULL
  378. */
  379. struct trace_event *ftrace_find_event(int type)
  380. {
  381. struct trace_event *event;
  382. struct hlist_node *n;
  383. unsigned key;
  384. key = type & (EVENT_HASHSIZE - 1);
  385. hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
  386. if (event->type == type)
  387. return event;
  388. }
  389. return NULL;
  390. }
  391. /**
  392. * register_ftrace_event - register output for an event type
  393. * @event: the event type to register
  394. *
  395. * Event types are stored in a hash and this hash is used to
  396. * find a way to print an event. If the @event->type is set
  397. * then it will use that type, otherwise it will assign a
  398. * type to use.
  399. *
  400. * If you assign your own type, please make sure it is added
  401. * to the trace_type enum in trace.h, to avoid collisions
  402. * with the dynamic types.
  403. *
  404. * Returns the event type number or zero on error.
  405. */
  406. int register_ftrace_event(struct trace_event *event)
  407. {
  408. unsigned key;
  409. int ret = 0;
  410. mutex_lock(&trace_event_mutex);
  411. if (!event) {
  412. ret = next_event_type++;
  413. goto out;
  414. }
  415. if (!event->type)
  416. event->type = next_event_type++;
  417. else if (event->type > __TRACE_LAST_TYPE) {
  418. printk(KERN_WARNING "Need to add type to trace.h\n");
  419. WARN_ON(1);
  420. }
  421. if (ftrace_find_event(event->type))
  422. goto out;
  423. if (event->trace == NULL)
  424. event->trace = trace_nop_print;
  425. if (event->raw == NULL)
  426. event->raw = trace_nop_print;
  427. if (event->hex == NULL)
  428. event->hex = trace_nop_print;
  429. if (event->binary == NULL)
  430. event->binary = trace_nop_print;
  431. key = event->type & (EVENT_HASHSIZE - 1);
  432. hlist_add_head_rcu(&event->node, &event_hash[key]);
  433. ret = event->type;
  434. out:
  435. mutex_unlock(&trace_event_mutex);
  436. return ret;
  437. }
  438. /**
  439. * unregister_ftrace_event - remove a no longer used event
  440. * @event: the event to remove
  441. */
  442. int unregister_ftrace_event(struct trace_event *event)
  443. {
  444. mutex_lock(&trace_event_mutex);
  445. hlist_del(&event->node);
  446. mutex_unlock(&trace_event_mutex);
  447. return 0;
  448. }
  449. /*
  450. * Standard events
  451. */
  452. enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
  453. {
  454. return TRACE_TYPE_HANDLED;
  455. }
  456. /* TRACE_FN */
  457. static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
  458. {
  459. struct ftrace_entry *field;
  460. struct trace_seq *s = &iter->seq;
  461. trace_assign_type(field, iter->ent);
  462. if (!seq_print_ip_sym(s, field->ip, flags))
  463. goto partial;
  464. if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
  465. if (!trace_seq_printf(s, " <-"))
  466. goto partial;
  467. if (!seq_print_ip_sym(s,
  468. field->parent_ip,
  469. flags))
  470. goto partial;
  471. }
  472. if (!trace_seq_printf(s, "\n"))
  473. goto partial;
  474. return TRACE_TYPE_HANDLED;
  475. partial:
  476. return TRACE_TYPE_PARTIAL_LINE;
  477. }
  478. static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
  479. {
  480. struct ftrace_entry *field;
  481. trace_assign_type(field, iter->ent);
  482. if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
  483. field->ip,
  484. field->parent_ip))
  485. return TRACE_TYPE_PARTIAL_LINE;
  486. return TRACE_TYPE_HANDLED;
  487. }
  488. static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
  489. {
  490. struct ftrace_entry *field;
  491. struct trace_seq *s = &iter->seq;
  492. trace_assign_type(field, iter->ent);
  493. SEQ_PUT_HEX_FIELD_RET(s, field->ip);
  494. SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
  495. return TRACE_TYPE_HANDLED;
  496. }
  497. static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
  498. {
  499. struct ftrace_entry *field;
  500. struct trace_seq *s = &iter->seq;
  501. trace_assign_type(field, iter->ent);
  502. SEQ_PUT_FIELD_RET(s, field->ip);
  503. SEQ_PUT_FIELD_RET(s, field->parent_ip);
  504. return TRACE_TYPE_HANDLED;
  505. }
  506. static struct trace_event trace_fn_event = {
  507. .type = TRACE_FN,
  508. .trace = trace_fn_trace,
  509. .raw = trace_fn_raw,
  510. .hex = trace_fn_hex,
  511. .binary = trace_fn_bin,
  512. };
  513. /* TRACE_CTX an TRACE_WAKE */
  514. static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
  515. char *delim)
  516. {
  517. struct ctx_switch_entry *field;
  518. char comm[TASK_COMM_LEN];
  519. int S, T;
  520. trace_assign_type(field, iter->ent);
  521. T = task_state_char(field->next_state);
  522. S = task_state_char(field->prev_state);
  523. trace_find_cmdline(field->next_pid, comm);
  524. if (!trace_seq_printf(&iter->seq,
  525. " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
  526. field->prev_pid,
  527. field->prev_prio,
  528. S, delim,
  529. field->next_cpu,
  530. field->next_pid,
  531. field->next_prio,
  532. T, comm))
  533. return TRACE_TYPE_PARTIAL_LINE;
  534. return TRACE_TYPE_HANDLED;
  535. }
  536. static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
  537. {
  538. return trace_ctxwake_print(iter, "==>");
  539. }
  540. static enum print_line_t trace_wake_print(struct trace_iterator *iter,
  541. int flags)
  542. {
  543. return trace_ctxwake_print(iter, " +");
  544. }
  545. static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
  546. {
  547. struct ctx_switch_entry *field;
  548. int T;
  549. trace_assign_type(field, iter->ent);
  550. if (!S)
  551. task_state_char(field->prev_state);
  552. T = task_state_char(field->next_state);
  553. if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
  554. field->prev_pid,
  555. field->prev_prio,
  556. S,
  557. field->next_cpu,
  558. field->next_pid,
  559. field->next_prio,
  560. T))
  561. return TRACE_TYPE_PARTIAL_LINE;
  562. return TRACE_TYPE_HANDLED;
  563. }
  564. static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
  565. {
  566. return trace_ctxwake_raw(iter, 0);
  567. }
  568. static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
  569. {
  570. return trace_ctxwake_raw(iter, '+');
  571. }
  572. static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
  573. {
  574. struct ctx_switch_entry *field;
  575. struct trace_seq *s = &iter->seq;
  576. int T;
  577. trace_assign_type(field, iter->ent);
  578. if (!S)
  579. task_state_char(field->prev_state);
  580. T = task_state_char(field->next_state);
  581. SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
  582. SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
  583. SEQ_PUT_HEX_FIELD_RET(s, S);
  584. SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
  585. SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
  586. SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
  587. SEQ_PUT_HEX_FIELD_RET(s, T);
  588. return TRACE_TYPE_HANDLED;
  589. }
  590. static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
  591. {
  592. return trace_ctxwake_hex(iter, 0);
  593. }
  594. static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
  595. {
  596. return trace_ctxwake_hex(iter, '+');
  597. }
  598. static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
  599. int flags)
  600. {
  601. struct ctx_switch_entry *field;
  602. struct trace_seq *s = &iter->seq;
  603. trace_assign_type(field, iter->ent);
  604. SEQ_PUT_FIELD_RET(s, field->prev_pid);
  605. SEQ_PUT_FIELD_RET(s, field->prev_prio);
  606. SEQ_PUT_FIELD_RET(s, field->prev_state);
  607. SEQ_PUT_FIELD_RET(s, field->next_pid);
  608. SEQ_PUT_FIELD_RET(s, field->next_prio);
  609. SEQ_PUT_FIELD_RET(s, field->next_state);
  610. return TRACE_TYPE_HANDLED;
  611. }
  612. static struct trace_event trace_ctx_event = {
  613. .type = TRACE_CTX,
  614. .trace = trace_ctx_print,
  615. .raw = trace_ctx_raw,
  616. .hex = trace_ctx_hex,
  617. .binary = trace_ctxwake_bin,
  618. };
  619. static struct trace_event trace_wake_event = {
  620. .type = TRACE_WAKE,
  621. .trace = trace_wake_print,
  622. .raw = trace_wake_raw,
  623. .hex = trace_wake_hex,
  624. .binary = trace_ctxwake_bin,
  625. };
  626. /* TRACE_SPECIAL */
  627. static enum print_line_t trace_special_print(struct trace_iterator *iter,
  628. int flags)
  629. {
  630. struct special_entry *field;
  631. trace_assign_type(field, iter->ent);
  632. if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
  633. field->arg1,
  634. field->arg2,
  635. field->arg3))
  636. return TRACE_TYPE_PARTIAL_LINE;
  637. return TRACE_TYPE_HANDLED;
  638. }
  639. static enum print_line_t trace_special_hex(struct trace_iterator *iter,
  640. int flags)
  641. {
  642. struct special_entry *field;
  643. struct trace_seq *s = &iter->seq;
  644. trace_assign_type(field, iter->ent);
  645. SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
  646. SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
  647. SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
  648. return TRACE_TYPE_HANDLED;
  649. }
  650. static enum print_line_t trace_special_bin(struct trace_iterator *iter,
  651. int flags)
  652. {
  653. struct special_entry *field;
  654. struct trace_seq *s = &iter->seq;
  655. trace_assign_type(field, iter->ent);
  656. SEQ_PUT_FIELD_RET(s, field->arg1);
  657. SEQ_PUT_FIELD_RET(s, field->arg2);
  658. SEQ_PUT_FIELD_RET(s, field->arg3);
  659. return TRACE_TYPE_HANDLED;
  660. }
  661. static struct trace_event trace_special_event = {
  662. .type = TRACE_SPECIAL,
  663. .trace = trace_special_print,
  664. .raw = trace_special_print,
  665. .hex = trace_special_hex,
  666. .binary = trace_special_bin,
  667. };
  668. /* TRACE_STACK */
  669. static enum print_line_t trace_stack_print(struct trace_iterator *iter,
  670. int flags)
  671. {
  672. struct stack_entry *field;
  673. struct trace_seq *s = &iter->seq;
  674. int i;
  675. trace_assign_type(field, iter->ent);
  676. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  677. if (i) {
  678. if (!trace_seq_puts(s, " <= "))
  679. goto partial;
  680. if (!seq_print_ip_sym(s, field->caller[i], flags))
  681. goto partial;
  682. }
  683. if (!trace_seq_puts(s, "\n"))
  684. goto partial;
  685. }
  686. return TRACE_TYPE_HANDLED;
  687. partial:
  688. return TRACE_TYPE_PARTIAL_LINE;
  689. }
  690. static struct trace_event trace_stack_event = {
  691. .type = TRACE_STACK,
  692. .trace = trace_stack_print,
  693. .raw = trace_special_print,
  694. .hex = trace_special_hex,
  695. .binary = trace_special_bin,
  696. };
  697. /* TRACE_USER_STACK */
  698. static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
  699. int flags)
  700. {
  701. struct userstack_entry *field;
  702. struct trace_seq *s = &iter->seq;
  703. trace_assign_type(field, iter->ent);
  704. if (!seq_print_userip_objs(field, s, flags))
  705. goto partial;
  706. if (!trace_seq_putc(s, '\n'))
  707. goto partial;
  708. return TRACE_TYPE_HANDLED;
  709. partial:
  710. return TRACE_TYPE_PARTIAL_LINE;
  711. }
  712. static struct trace_event trace_user_stack_event = {
  713. .type = TRACE_USER_STACK,
  714. .trace = trace_user_stack_print,
  715. .raw = trace_special_print,
  716. .hex = trace_special_hex,
  717. .binary = trace_special_bin,
  718. };
  719. /* TRACE_BPRINT */
  720. static enum print_line_t
  721. trace_bprint_print(struct trace_iterator *iter, int flags)
  722. {
  723. struct trace_entry *entry = iter->ent;
  724. struct trace_seq *s = &iter->seq;
  725. struct bprint_entry *field;
  726. trace_assign_type(field, entry);
  727. if (!seq_print_ip_sym(s, field->ip, flags))
  728. goto partial;
  729. if (!trace_seq_puts(s, ": "))
  730. goto partial;
  731. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  732. goto partial;
  733. return TRACE_TYPE_HANDLED;
  734. partial:
  735. return TRACE_TYPE_PARTIAL_LINE;
  736. }
  737. static enum print_line_t
  738. trace_bprint_raw(struct trace_iterator *iter, int flags)
  739. {
  740. struct bprint_entry *field;
  741. struct trace_seq *s = &iter->seq;
  742. trace_assign_type(field, iter->ent);
  743. if (!trace_seq_printf(s, ": %lx : ", field->ip))
  744. goto partial;
  745. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  746. goto partial;
  747. return TRACE_TYPE_HANDLED;
  748. partial:
  749. return TRACE_TYPE_PARTIAL_LINE;
  750. }
  751. static struct trace_event trace_bprint_event = {
  752. .type = TRACE_BPRINT,
  753. .trace = trace_bprint_print,
  754. .raw = trace_bprint_raw,
  755. };
  756. /* TRACE_PRINT */
  757. static enum print_line_t trace_print_print(struct trace_iterator *iter,
  758. int flags)
  759. {
  760. struct print_entry *field;
  761. struct trace_seq *s = &iter->seq;
  762. trace_assign_type(field, iter->ent);
  763. if (!seq_print_ip_sym(s, field->ip, flags))
  764. goto partial;
  765. if (!trace_seq_printf(s, ": %s", field->buf))
  766. goto partial;
  767. return TRACE_TYPE_HANDLED;
  768. partial:
  769. return TRACE_TYPE_PARTIAL_LINE;
  770. }
  771. static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
  772. {
  773. struct print_entry *field;
  774. trace_assign_type(field, iter->ent);
  775. if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
  776. goto partial;
  777. return TRACE_TYPE_HANDLED;
  778. partial:
  779. return TRACE_TYPE_PARTIAL_LINE;
  780. }
  781. static struct trace_event trace_print_event = {
  782. .type = TRACE_PRINT,
  783. .trace = trace_print_print,
  784. .raw = trace_print_raw,
  785. };
  786. static struct trace_event *events[] __initdata = {
  787. &trace_fn_event,
  788. &trace_ctx_event,
  789. &trace_wake_event,
  790. &trace_special_event,
  791. &trace_stack_event,
  792. &trace_user_stack_event,
  793. &trace_bprint_event,
  794. &trace_print_event,
  795. NULL
  796. };
  797. __init static int init_events(void)
  798. {
  799. struct trace_event *event;
  800. int i, ret;
  801. for (i = 0; events[i]; i++) {
  802. event = events[i];
  803. ret = register_ftrace_event(event);
  804. if (!ret) {
  805. printk(KERN_WARNING "event %d failed to register\n",
  806. event->type);
  807. WARN_ON_ONCE(1);
  808. }
  809. }
  810. return 0;
  811. }
  812. device_initcall(init_events);