trace_output.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /*
  2. * trace_output.c
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/ftrace.h>
  10. #include "trace_output.h"
  11. /* must be a power of 2 */
  12. #define EVENT_HASHSIZE 128
  13. static DEFINE_MUTEX(trace_event_mutex);
  14. static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
  15. static int next_event_type = __TRACE_LAST_TYPE + 1;
  16. enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
  17. {
  18. struct trace_seq *s = &iter->seq;
  19. struct trace_entry *entry = iter->ent;
  20. struct bprint_entry *field;
  21. int ret;
  22. trace_assign_type(field, entry);
  23. ret = trace_seq_bprintf(s, field->fmt, field->buf);
  24. if (!ret)
  25. return TRACE_TYPE_PARTIAL_LINE;
  26. return TRACE_TYPE_HANDLED;
  27. }
  28. enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
  29. {
  30. struct trace_seq *s = &iter->seq;
  31. struct trace_entry *entry = iter->ent;
  32. struct print_entry *field;
  33. int ret;
  34. trace_assign_type(field, entry);
  35. ret = trace_seq_printf(s, "%s", field->buf);
  36. if (!ret)
  37. return TRACE_TYPE_PARTIAL_LINE;
  38. return TRACE_TYPE_HANDLED;
  39. }
  40. /**
  41. * trace_seq_printf - sequence printing of trace information
  42. * @s: trace sequence descriptor
  43. * @fmt: printf format string
  44. *
  45. * The tracer may use either sequence operations or its own
  46. * copy to user routines. To simplify formating of a trace
  47. * trace_seq_printf is used to store strings into a special
  48. * buffer (@s). Then the output may be either used by
  49. * the sequencer or pulled into another buffer.
  50. */
  51. int
  52. trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
  53. {
  54. int len = (PAGE_SIZE - 1) - s->len;
  55. va_list ap;
  56. int ret;
  57. if (!len)
  58. return 0;
  59. va_start(ap, fmt);
  60. ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
  61. va_end(ap);
  62. /* If we can't write it all, don't bother writing anything */
  63. if (ret >= len)
  64. return 0;
  65. s->len += ret;
  66. return len;
  67. }
  68. int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
  69. {
  70. int len = (PAGE_SIZE - 1) - s->len;
  71. int ret;
  72. if (!len)
  73. return 0;
  74. ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
  75. /* If we can't write it all, don't bother writing anything */
  76. if (ret >= len)
  77. return 0;
  78. s->len += ret;
  79. return len;
  80. }
  81. /**
  82. * trace_seq_puts - trace sequence printing of simple string
  83. * @s: trace sequence descriptor
  84. * @str: simple string to record
  85. *
  86. * The tracer may use either the sequence operations or its own
  87. * copy to user routines. This function records a simple string
  88. * into a special buffer (@s) for later retrieval by a sequencer
  89. * or other mechanism.
  90. */
  91. int trace_seq_puts(struct trace_seq *s, const char *str)
  92. {
  93. int len = strlen(str);
  94. if (len > ((PAGE_SIZE - 1) - s->len))
  95. return 0;
  96. memcpy(s->buffer + s->len, str, len);
  97. s->len += len;
  98. return len;
  99. }
  100. int trace_seq_putc(struct trace_seq *s, unsigned char c)
  101. {
  102. if (s->len >= (PAGE_SIZE - 1))
  103. return 0;
  104. s->buffer[s->len++] = c;
  105. return 1;
  106. }
  107. int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
  108. {
  109. if (len > ((PAGE_SIZE - 1) - s->len))
  110. return 0;
  111. memcpy(s->buffer + s->len, mem, len);
  112. s->len += len;
  113. return len;
  114. }
  115. int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
  116. {
  117. unsigned char hex[HEX_CHARS];
  118. const unsigned char *data = mem;
  119. int i, j;
  120. #ifdef __BIG_ENDIAN
  121. for (i = 0, j = 0; i < len; i++) {
  122. #else
  123. for (i = len-1, j = 0; i >= 0; i--) {
  124. #endif
  125. hex[j++] = hex_asc_hi(data[i]);
  126. hex[j++] = hex_asc_lo(data[i]);
  127. }
  128. hex[j++] = ' ';
  129. return trace_seq_putmem(s, hex, j);
  130. }
  131. void *trace_seq_reserve(struct trace_seq *s, size_t len)
  132. {
  133. void *ret;
  134. if (len > ((PAGE_SIZE - 1) - s->len))
  135. return NULL;
  136. ret = s->buffer + s->len;
  137. s->len += len;
  138. return ret;
  139. }
  140. int trace_seq_path(struct trace_seq *s, struct path *path)
  141. {
  142. unsigned char *p;
  143. if (s->len >= (PAGE_SIZE - 1))
  144. return 0;
  145. p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
  146. if (!IS_ERR(p)) {
  147. p = mangle_path(s->buffer + s->len, p, "\n");
  148. if (p) {
  149. s->len = p - s->buffer;
  150. return 1;
  151. }
  152. } else {
  153. s->buffer[s->len++] = '?';
  154. return 1;
  155. }
  156. return 0;
  157. }
  158. #ifdef CONFIG_KRETPROBES
  159. static inline const char *kretprobed(const char *name)
  160. {
  161. static const char tramp_name[] = "kretprobe_trampoline";
  162. int size = sizeof(tramp_name);
  163. if (strncmp(tramp_name, name, size) == 0)
  164. return "[unknown/kretprobe'd]";
  165. return name;
  166. }
  167. #else
  168. static inline const char *kretprobed(const char *name)
  169. {
  170. return name;
  171. }
  172. #endif /* CONFIG_KRETPROBES */
  173. static int
  174. seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
  175. {
  176. #ifdef CONFIG_KALLSYMS
  177. char str[KSYM_SYMBOL_LEN];
  178. const char *name;
  179. kallsyms_lookup(address, NULL, NULL, NULL, str);
  180. name = kretprobed(str);
  181. return trace_seq_printf(s, fmt, name);
  182. #endif
  183. return 1;
  184. }
  185. static int
  186. seq_print_sym_offset(struct trace_seq *s, const char *fmt,
  187. unsigned long address)
  188. {
  189. #ifdef CONFIG_KALLSYMS
  190. char str[KSYM_SYMBOL_LEN];
  191. const char *name;
  192. sprint_symbol(str, address);
  193. name = kretprobed(str);
  194. return trace_seq_printf(s, fmt, name);
  195. #endif
  196. return 1;
  197. }
  198. #ifndef CONFIG_64BIT
  199. # define IP_FMT "%08lx"
  200. #else
  201. # define IP_FMT "%016lx"
  202. #endif
  203. int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
  204. unsigned long ip, unsigned long sym_flags)
  205. {
  206. struct file *file = NULL;
  207. unsigned long vmstart = 0;
  208. int ret = 1;
  209. if (mm) {
  210. const struct vm_area_struct *vma;
  211. down_read(&mm->mmap_sem);
  212. vma = find_vma(mm, ip);
  213. if (vma) {
  214. file = vma->vm_file;
  215. vmstart = vma->vm_start;
  216. }
  217. if (file) {
  218. ret = trace_seq_path(s, &file->f_path);
  219. if (ret)
  220. ret = trace_seq_printf(s, "[+0x%lx]",
  221. ip - vmstart);
  222. }
  223. up_read(&mm->mmap_sem);
  224. }
  225. if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
  226. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  227. return ret;
  228. }
  229. int
  230. seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
  231. unsigned long sym_flags)
  232. {
  233. struct mm_struct *mm = NULL;
  234. int ret = 1;
  235. unsigned int i;
  236. if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
  237. struct task_struct *task;
  238. /*
  239. * we do the lookup on the thread group leader,
  240. * since individual threads might have already quit!
  241. */
  242. rcu_read_lock();
  243. task = find_task_by_vpid(entry->ent.tgid);
  244. if (task)
  245. mm = get_task_mm(task);
  246. rcu_read_unlock();
  247. }
  248. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  249. unsigned long ip = entry->caller[i];
  250. if (ip == ULONG_MAX || !ret)
  251. break;
  252. if (i && ret)
  253. ret = trace_seq_puts(s, " <- ");
  254. if (!ip) {
  255. if (ret)
  256. ret = trace_seq_puts(s, "??");
  257. continue;
  258. }
  259. if (!ret)
  260. break;
  261. if (ret)
  262. ret = seq_print_user_ip(s, mm, ip, sym_flags);
  263. }
  264. if (mm)
  265. mmput(mm);
  266. return ret;
  267. }
  268. int
  269. seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  270. {
  271. int ret;
  272. if (!ip)
  273. return trace_seq_printf(s, "0");
  274. if (sym_flags & TRACE_ITER_SYM_OFFSET)
  275. ret = seq_print_sym_offset(s, "%s", ip);
  276. else
  277. ret = seq_print_sym_short(s, "%s", ip);
  278. if (!ret)
  279. return 0;
  280. if (sym_flags & TRACE_ITER_SYM_ADDR)
  281. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  282. return ret;
  283. }
  284. static int
  285. lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
  286. {
  287. int hardirq, softirq;
  288. char comm[TASK_COMM_LEN];
  289. trace_find_cmdline(entry->pid, comm);
  290. hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
  291. softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
  292. if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c",
  293. comm, entry->pid, cpu,
  294. (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
  295. (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
  296. 'X' : '.',
  297. (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
  298. 'N' : '.',
  299. (hardirq && softirq) ? 'H' :
  300. hardirq ? 'h' : softirq ? 's' : '.'))
  301. return 0;
  302. if (entry->preempt_count)
  303. return trace_seq_printf(s, "%x", entry->preempt_count);
  304. return trace_seq_puts(s, ".");
  305. }
  306. static unsigned long preempt_mark_thresh = 100;
  307. static int
  308. lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
  309. unsigned long rel_usecs)
  310. {
  311. return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
  312. rel_usecs > preempt_mark_thresh ? '!' :
  313. rel_usecs > 1 ? '+' : ' ');
  314. }
  315. int trace_print_context(struct trace_iterator *iter)
  316. {
  317. struct trace_seq *s = &iter->seq;
  318. struct trace_entry *entry = iter->ent;
  319. unsigned long long t = ns2usecs(iter->ts);
  320. unsigned long usec_rem = do_div(t, USEC_PER_SEC);
  321. unsigned long secs = (unsigned long)t;
  322. char comm[TASK_COMM_LEN];
  323. trace_find_cmdline(entry->pid, comm);
  324. return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
  325. comm, entry->pid, iter->cpu, secs, usec_rem);
  326. }
  327. int trace_print_lat_context(struct trace_iterator *iter)
  328. {
  329. u64 next_ts;
  330. int ret;
  331. struct trace_seq *s = &iter->seq;
  332. struct trace_entry *entry = iter->ent,
  333. *next_entry = trace_find_next_entry(iter, NULL,
  334. &next_ts);
  335. unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
  336. unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
  337. unsigned long rel_usecs;
  338. if (!next_entry)
  339. next_ts = iter->ts;
  340. rel_usecs = ns2usecs(next_ts - iter->ts);
  341. if (verbose) {
  342. char comm[TASK_COMM_LEN];
  343. trace_find_cmdline(entry->pid, comm);
  344. ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
  345. " %ld.%03ldms (+%ld.%03ldms): ", comm,
  346. entry->pid, iter->cpu, entry->flags,
  347. entry->preempt_count, iter->idx,
  348. ns2usecs(iter->ts),
  349. abs_usecs / USEC_PER_MSEC,
  350. abs_usecs % USEC_PER_MSEC,
  351. rel_usecs / USEC_PER_MSEC,
  352. rel_usecs % USEC_PER_MSEC);
  353. } else {
  354. ret = lat_print_generic(s, entry, iter->cpu);
  355. if (ret)
  356. ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
  357. }
  358. return ret;
  359. }
  360. static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
  361. static int task_state_char(unsigned long state)
  362. {
  363. int bit = state ? __ffs(state) + 1 : 0;
  364. return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
  365. }
  366. /**
  367. * ftrace_find_event - find a registered event
  368. * @type: the type of event to look for
  369. *
  370. * Returns an event of type @type otherwise NULL
  371. */
  372. struct trace_event *ftrace_find_event(int type)
  373. {
  374. struct trace_event *event;
  375. struct hlist_node *n;
  376. unsigned key;
  377. key = type & (EVENT_HASHSIZE - 1);
  378. hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
  379. if (event->type == type)
  380. return event;
  381. }
  382. return NULL;
  383. }
  384. /**
  385. * register_ftrace_event - register output for an event type
  386. * @event: the event type to register
  387. *
  388. * Event types are stored in a hash and this hash is used to
  389. * find a way to print an event. If the @event->type is set
  390. * then it will use that type, otherwise it will assign a
  391. * type to use.
  392. *
  393. * If you assign your own type, please make sure it is added
  394. * to the trace_type enum in trace.h, to avoid collisions
  395. * with the dynamic types.
  396. *
  397. * Returns the event type number or zero on error.
  398. */
  399. int register_ftrace_event(struct trace_event *event)
  400. {
  401. unsigned key;
  402. int ret = 0;
  403. mutex_lock(&trace_event_mutex);
  404. if (!event) {
  405. ret = next_event_type++;
  406. goto out;
  407. }
  408. if (!event->type)
  409. event->type = next_event_type++;
  410. else if (event->type > __TRACE_LAST_TYPE) {
  411. printk(KERN_WARNING "Need to add type to trace.h\n");
  412. WARN_ON(1);
  413. }
  414. if (ftrace_find_event(event->type))
  415. goto out;
  416. if (event->trace == NULL)
  417. event->trace = trace_nop_print;
  418. if (event->raw == NULL)
  419. event->raw = trace_nop_print;
  420. if (event->hex == NULL)
  421. event->hex = trace_nop_print;
  422. if (event->binary == NULL)
  423. event->binary = trace_nop_print;
  424. key = event->type & (EVENT_HASHSIZE - 1);
  425. hlist_add_head_rcu(&event->node, &event_hash[key]);
  426. ret = event->type;
  427. out:
  428. mutex_unlock(&trace_event_mutex);
  429. return ret;
  430. }
  431. /**
  432. * unregister_ftrace_event - remove a no longer used event
  433. * @event: the event to remove
  434. */
  435. int unregister_ftrace_event(struct trace_event *event)
  436. {
  437. mutex_lock(&trace_event_mutex);
  438. hlist_del(&event->node);
  439. mutex_unlock(&trace_event_mutex);
  440. return 0;
  441. }
  442. /*
  443. * Standard events
  444. */
  445. enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
  446. {
  447. return TRACE_TYPE_HANDLED;
  448. }
  449. /* TRACE_FN */
  450. static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
  451. {
  452. struct ftrace_entry *field;
  453. struct trace_seq *s = &iter->seq;
  454. trace_assign_type(field, iter->ent);
  455. if (!seq_print_ip_sym(s, field->ip, flags))
  456. goto partial;
  457. if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
  458. if (!trace_seq_printf(s, " <-"))
  459. goto partial;
  460. if (!seq_print_ip_sym(s,
  461. field->parent_ip,
  462. flags))
  463. goto partial;
  464. }
  465. if (!trace_seq_printf(s, "\n"))
  466. goto partial;
  467. return TRACE_TYPE_HANDLED;
  468. partial:
  469. return TRACE_TYPE_PARTIAL_LINE;
  470. }
  471. static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
  472. {
  473. struct ftrace_entry *field;
  474. trace_assign_type(field, iter->ent);
  475. if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
  476. field->ip,
  477. field->parent_ip))
  478. return TRACE_TYPE_PARTIAL_LINE;
  479. return TRACE_TYPE_HANDLED;
  480. }
  481. static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
  482. {
  483. struct ftrace_entry *field;
  484. struct trace_seq *s = &iter->seq;
  485. trace_assign_type(field, iter->ent);
  486. SEQ_PUT_HEX_FIELD_RET(s, field->ip);
  487. SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
  488. return TRACE_TYPE_HANDLED;
  489. }
  490. static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
  491. {
  492. struct ftrace_entry *field;
  493. struct trace_seq *s = &iter->seq;
  494. trace_assign_type(field, iter->ent);
  495. SEQ_PUT_FIELD_RET(s, field->ip);
  496. SEQ_PUT_FIELD_RET(s, field->parent_ip);
  497. return TRACE_TYPE_HANDLED;
  498. }
  499. static struct trace_event trace_fn_event = {
  500. .type = TRACE_FN,
  501. .trace = trace_fn_trace,
  502. .raw = trace_fn_raw,
  503. .hex = trace_fn_hex,
  504. .binary = trace_fn_bin,
  505. };
  506. /* TRACE_CTX an TRACE_WAKE */
  507. static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
  508. char *delim)
  509. {
  510. struct ctx_switch_entry *field;
  511. char comm[TASK_COMM_LEN];
  512. int S, T;
  513. trace_assign_type(field, iter->ent);
  514. T = task_state_char(field->next_state);
  515. S = task_state_char(field->prev_state);
  516. trace_find_cmdline(field->next_pid, comm);
  517. if (!trace_seq_printf(&iter->seq,
  518. " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
  519. field->prev_pid,
  520. field->prev_prio,
  521. S, delim,
  522. field->next_cpu,
  523. field->next_pid,
  524. field->next_prio,
  525. T, comm))
  526. return TRACE_TYPE_PARTIAL_LINE;
  527. return TRACE_TYPE_HANDLED;
  528. }
  529. static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
  530. {
  531. return trace_ctxwake_print(iter, "==>");
  532. }
  533. static enum print_line_t trace_wake_print(struct trace_iterator *iter,
  534. int flags)
  535. {
  536. return trace_ctxwake_print(iter, " +");
  537. }
  538. static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
  539. {
  540. struct ctx_switch_entry *field;
  541. int T;
  542. trace_assign_type(field, iter->ent);
  543. if (!S)
  544. task_state_char(field->prev_state);
  545. T = task_state_char(field->next_state);
  546. if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
  547. field->prev_pid,
  548. field->prev_prio,
  549. S,
  550. field->next_cpu,
  551. field->next_pid,
  552. field->next_prio,
  553. T))
  554. return TRACE_TYPE_PARTIAL_LINE;
  555. return TRACE_TYPE_HANDLED;
  556. }
  557. static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
  558. {
  559. return trace_ctxwake_raw(iter, 0);
  560. }
  561. static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
  562. {
  563. return trace_ctxwake_raw(iter, '+');
  564. }
  565. static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
  566. {
  567. struct ctx_switch_entry *field;
  568. struct trace_seq *s = &iter->seq;
  569. int T;
  570. trace_assign_type(field, iter->ent);
  571. if (!S)
  572. task_state_char(field->prev_state);
  573. T = task_state_char(field->next_state);
  574. SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
  575. SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
  576. SEQ_PUT_HEX_FIELD_RET(s, S);
  577. SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
  578. SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
  579. SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
  580. SEQ_PUT_HEX_FIELD_RET(s, T);
  581. return TRACE_TYPE_HANDLED;
  582. }
  583. static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
  584. {
  585. return trace_ctxwake_hex(iter, 0);
  586. }
  587. static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
  588. {
  589. return trace_ctxwake_hex(iter, '+');
  590. }
  591. static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
  592. int flags)
  593. {
  594. struct ctx_switch_entry *field;
  595. struct trace_seq *s = &iter->seq;
  596. trace_assign_type(field, iter->ent);
  597. SEQ_PUT_FIELD_RET(s, field->prev_pid);
  598. SEQ_PUT_FIELD_RET(s, field->prev_prio);
  599. SEQ_PUT_FIELD_RET(s, field->prev_state);
  600. SEQ_PUT_FIELD_RET(s, field->next_pid);
  601. SEQ_PUT_FIELD_RET(s, field->next_prio);
  602. SEQ_PUT_FIELD_RET(s, field->next_state);
  603. return TRACE_TYPE_HANDLED;
  604. }
  605. static struct trace_event trace_ctx_event = {
  606. .type = TRACE_CTX,
  607. .trace = trace_ctx_print,
  608. .raw = trace_ctx_raw,
  609. .hex = trace_ctx_hex,
  610. .binary = trace_ctxwake_bin,
  611. };
  612. static struct trace_event trace_wake_event = {
  613. .type = TRACE_WAKE,
  614. .trace = trace_wake_print,
  615. .raw = trace_wake_raw,
  616. .hex = trace_wake_hex,
  617. .binary = trace_ctxwake_bin,
  618. };
  619. /* TRACE_SPECIAL */
  620. static enum print_line_t trace_special_print(struct trace_iterator *iter,
  621. int flags)
  622. {
  623. struct special_entry *field;
  624. trace_assign_type(field, iter->ent);
  625. if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
  626. field->arg1,
  627. field->arg2,
  628. field->arg3))
  629. return TRACE_TYPE_PARTIAL_LINE;
  630. return TRACE_TYPE_HANDLED;
  631. }
  632. static enum print_line_t trace_special_hex(struct trace_iterator *iter,
  633. int flags)
  634. {
  635. struct special_entry *field;
  636. struct trace_seq *s = &iter->seq;
  637. trace_assign_type(field, iter->ent);
  638. SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
  639. SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
  640. SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
  641. return TRACE_TYPE_HANDLED;
  642. }
  643. static enum print_line_t trace_special_bin(struct trace_iterator *iter,
  644. int flags)
  645. {
  646. struct special_entry *field;
  647. struct trace_seq *s = &iter->seq;
  648. trace_assign_type(field, iter->ent);
  649. SEQ_PUT_FIELD_RET(s, field->arg1);
  650. SEQ_PUT_FIELD_RET(s, field->arg2);
  651. SEQ_PUT_FIELD_RET(s, field->arg3);
  652. return TRACE_TYPE_HANDLED;
  653. }
  654. static struct trace_event trace_special_event = {
  655. .type = TRACE_SPECIAL,
  656. .trace = trace_special_print,
  657. .raw = trace_special_print,
  658. .hex = trace_special_hex,
  659. .binary = trace_special_bin,
  660. };
  661. /* TRACE_STACK */
  662. static enum print_line_t trace_stack_print(struct trace_iterator *iter,
  663. int flags)
  664. {
  665. struct stack_entry *field;
  666. struct trace_seq *s = &iter->seq;
  667. int i;
  668. trace_assign_type(field, iter->ent);
  669. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  670. if (i) {
  671. if (!trace_seq_puts(s, " <= "))
  672. goto partial;
  673. if (!seq_print_ip_sym(s, field->caller[i], flags))
  674. goto partial;
  675. }
  676. if (!trace_seq_puts(s, "\n"))
  677. goto partial;
  678. }
  679. return TRACE_TYPE_HANDLED;
  680. partial:
  681. return TRACE_TYPE_PARTIAL_LINE;
  682. }
  683. static struct trace_event trace_stack_event = {
  684. .type = TRACE_STACK,
  685. .trace = trace_stack_print,
  686. .raw = trace_special_print,
  687. .hex = trace_special_hex,
  688. .binary = trace_special_bin,
  689. };
  690. /* TRACE_USER_STACK */
  691. static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
  692. int flags)
  693. {
  694. struct userstack_entry *field;
  695. struct trace_seq *s = &iter->seq;
  696. trace_assign_type(field, iter->ent);
  697. if (!seq_print_userip_objs(field, s, flags))
  698. goto partial;
  699. if (!trace_seq_putc(s, '\n'))
  700. goto partial;
  701. return TRACE_TYPE_HANDLED;
  702. partial:
  703. return TRACE_TYPE_PARTIAL_LINE;
  704. }
  705. static struct trace_event trace_user_stack_event = {
  706. .type = TRACE_USER_STACK,
  707. .trace = trace_user_stack_print,
  708. .raw = trace_special_print,
  709. .hex = trace_special_hex,
  710. .binary = trace_special_bin,
  711. };
  712. /* TRACE_BPRINT */
  713. static enum print_line_t
  714. trace_bprint_print(struct trace_iterator *iter, int flags)
  715. {
  716. struct trace_entry *entry = iter->ent;
  717. struct trace_seq *s = &iter->seq;
  718. struct bprint_entry *field;
  719. trace_assign_type(field, entry);
  720. if (!seq_print_ip_sym(s, field->ip, flags))
  721. goto partial;
  722. if (!trace_seq_puts(s, ": "))
  723. goto partial;
  724. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  725. goto partial;
  726. return TRACE_TYPE_HANDLED;
  727. partial:
  728. return TRACE_TYPE_PARTIAL_LINE;
  729. }
  730. static enum print_line_t
  731. trace_bprint_raw(struct trace_iterator *iter, int flags)
  732. {
  733. struct bprint_entry *field;
  734. struct trace_seq *s = &iter->seq;
  735. trace_assign_type(field, iter->ent);
  736. if (!trace_seq_printf(s, ": %lx : ", field->ip))
  737. goto partial;
  738. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  739. goto partial;
  740. return TRACE_TYPE_HANDLED;
  741. partial:
  742. return TRACE_TYPE_PARTIAL_LINE;
  743. }
  744. static struct trace_event trace_bprint_event = {
  745. .type = TRACE_BPRINT,
  746. .trace = trace_bprint_print,
  747. .raw = trace_bprint_raw,
  748. };
  749. /* TRACE_PRINT */
  750. static enum print_line_t trace_print_print(struct trace_iterator *iter,
  751. int flags)
  752. {
  753. struct print_entry *field;
  754. struct trace_seq *s = &iter->seq;
  755. trace_assign_type(field, iter->ent);
  756. if (!seq_print_ip_sym(s, field->ip, flags))
  757. goto partial;
  758. if (!trace_seq_printf(s, ": %s", field->buf))
  759. goto partial;
  760. return TRACE_TYPE_HANDLED;
  761. partial:
  762. return TRACE_TYPE_PARTIAL_LINE;
  763. }
  764. static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
  765. {
  766. struct print_entry *field;
  767. trace_assign_type(field, iter->ent);
  768. if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
  769. goto partial;
  770. return TRACE_TYPE_HANDLED;
  771. partial:
  772. return TRACE_TYPE_PARTIAL_LINE;
  773. }
  774. static struct trace_event trace_print_event = {
  775. .type = TRACE_PRINT,
  776. .trace = trace_print_print,
  777. .raw = trace_print_raw,
  778. };
  779. static struct trace_event *events[] __initdata = {
  780. &trace_fn_event,
  781. &trace_ctx_event,
  782. &trace_wake_event,
  783. &trace_special_event,
  784. &trace_stack_event,
  785. &trace_user_stack_event,
  786. &trace_bprint_event,
  787. &trace_print_event,
  788. NULL
  789. };
  790. __init static int init_events(void)
  791. {
  792. struct trace_event *event;
  793. int i, ret;
  794. for (i = 0; events[i]; i++) {
  795. event = events[i];
  796. ret = register_ftrace_event(event);
  797. if (!ret) {
  798. printk(KERN_WARNING "event %d failed to register\n",
  799. event->type);
  800. WARN_ON_ONCE(1);
  801. }
  802. }
  803. return 0;
  804. }
  805. device_initcall(init_events);