trace_output.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343
  1. /*
  2. * trace_output.c
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/ftrace.h>
  10. #include "trace_output.h"
  11. /* must be a power of 2 */
  12. #define EVENT_HASHSIZE 128
  13. DECLARE_RWSEM(trace_event_mutex);
  14. DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
  15. EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
  16. static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
  17. static int next_event_type = __TRACE_LAST_TYPE + 1;
  18. int trace_print_seq(struct seq_file *m, struct trace_seq *s)
  19. {
  20. int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
  21. int ret;
  22. ret = seq_write(m, s->buffer, len);
  23. /*
  24. * Only reset this buffer if we successfully wrote to the
  25. * seq_file buffer.
  26. */
  27. if (!ret)
  28. trace_seq_init(s);
  29. return ret;
  30. }
  31. enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
  32. {
  33. struct trace_seq *s = &iter->seq;
  34. struct trace_entry *entry = iter->ent;
  35. struct bprint_entry *field;
  36. int ret;
  37. trace_assign_type(field, entry);
  38. ret = trace_seq_bprintf(s, field->fmt, field->buf);
  39. if (!ret)
  40. return TRACE_TYPE_PARTIAL_LINE;
  41. return TRACE_TYPE_HANDLED;
  42. }
  43. enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
  44. {
  45. struct trace_seq *s = &iter->seq;
  46. struct trace_entry *entry = iter->ent;
  47. struct print_entry *field;
  48. int ret;
  49. trace_assign_type(field, entry);
  50. ret = trace_seq_printf(s, "%s", field->buf);
  51. if (!ret)
  52. return TRACE_TYPE_PARTIAL_LINE;
  53. return TRACE_TYPE_HANDLED;
  54. }
  55. /**
  56. * trace_seq_printf - sequence printing of trace information
  57. * @s: trace sequence descriptor
  58. * @fmt: printf format string
  59. *
  60. * It returns 0 if the trace oversizes the buffer's free
  61. * space, 1 otherwise.
  62. *
  63. * The tracer may use either sequence operations or its own
  64. * copy to user routines. To simplify formating of a trace
  65. * trace_seq_printf is used to store strings into a special
  66. * buffer (@s). Then the output may be either used by
  67. * the sequencer or pulled into another buffer.
  68. */
  69. int
  70. trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
  71. {
  72. int len = (PAGE_SIZE - 1) - s->len;
  73. va_list ap;
  74. int ret;
  75. if (s->full || !len)
  76. return 0;
  77. va_start(ap, fmt);
  78. ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
  79. va_end(ap);
  80. /* If we can't write it all, don't bother writing anything */
  81. if (ret >= len) {
  82. s->full = 1;
  83. return 0;
  84. }
  85. s->len += ret;
  86. return 1;
  87. }
  88. EXPORT_SYMBOL_GPL(trace_seq_printf);
  89. /**
  90. * trace_seq_vprintf - sequence printing of trace information
  91. * @s: trace sequence descriptor
  92. * @fmt: printf format string
  93. *
  94. * The tracer may use either sequence operations or its own
  95. * copy to user routines. To simplify formating of a trace
  96. * trace_seq_printf is used to store strings into a special
  97. * buffer (@s). Then the output may be either used by
  98. * the sequencer or pulled into another buffer.
  99. */
  100. int
  101. trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
  102. {
  103. int len = (PAGE_SIZE - 1) - s->len;
  104. int ret;
  105. if (s->full || !len)
  106. return 0;
  107. ret = vsnprintf(s->buffer + s->len, len, fmt, args);
  108. /* If we can't write it all, don't bother writing anything */
  109. if (ret >= len) {
  110. s->full = 1;
  111. return 0;
  112. }
  113. s->len += ret;
  114. return len;
  115. }
  116. EXPORT_SYMBOL_GPL(trace_seq_vprintf);
  117. int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
  118. {
  119. int len = (PAGE_SIZE - 1) - s->len;
  120. int ret;
  121. if (s->full || !len)
  122. return 0;
  123. ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
  124. /* If we can't write it all, don't bother writing anything */
  125. if (ret >= len) {
  126. s->full = 1;
  127. return 0;
  128. }
  129. s->len += ret;
  130. return len;
  131. }
  132. /**
  133. * trace_seq_puts - trace sequence printing of simple string
  134. * @s: trace sequence descriptor
  135. * @str: simple string to record
  136. *
  137. * The tracer may use either the sequence operations or its own
  138. * copy to user routines. This function records a simple string
  139. * into a special buffer (@s) for later retrieval by a sequencer
  140. * or other mechanism.
  141. */
  142. int trace_seq_puts(struct trace_seq *s, const char *str)
  143. {
  144. int len = strlen(str);
  145. if (s->full)
  146. return 0;
  147. if (len > ((PAGE_SIZE - 1) - s->len)) {
  148. s->full = 1;
  149. return 0;
  150. }
  151. memcpy(s->buffer + s->len, str, len);
  152. s->len += len;
  153. return len;
  154. }
  155. int trace_seq_putc(struct trace_seq *s, unsigned char c)
  156. {
  157. if (s->full)
  158. return 0;
  159. if (s->len >= (PAGE_SIZE - 1)) {
  160. s->full = 1;
  161. return 0;
  162. }
  163. s->buffer[s->len++] = c;
  164. return 1;
  165. }
  166. EXPORT_SYMBOL(trace_seq_putc);
  167. int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
  168. {
  169. if (s->full)
  170. return 0;
  171. if (len > ((PAGE_SIZE - 1) - s->len)) {
  172. s->full = 1;
  173. return 0;
  174. }
  175. memcpy(s->buffer + s->len, mem, len);
  176. s->len += len;
  177. return len;
  178. }
  179. int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
  180. {
  181. unsigned char hex[HEX_CHARS];
  182. const unsigned char *data = mem;
  183. int i, j;
  184. if (s->full)
  185. return 0;
  186. #ifdef __BIG_ENDIAN
  187. for (i = 0, j = 0; i < len; i++) {
  188. #else
  189. for (i = len-1, j = 0; i >= 0; i--) {
  190. #endif
  191. hex[j++] = hex_asc_hi(data[i]);
  192. hex[j++] = hex_asc_lo(data[i]);
  193. }
  194. hex[j++] = ' ';
  195. return trace_seq_putmem(s, hex, j);
  196. }
  197. void *trace_seq_reserve(struct trace_seq *s, size_t len)
  198. {
  199. void *ret;
  200. if (s->full)
  201. return NULL;
  202. if (len > ((PAGE_SIZE - 1) - s->len)) {
  203. s->full = 1;
  204. return NULL;
  205. }
  206. ret = s->buffer + s->len;
  207. s->len += len;
  208. return ret;
  209. }
  210. int trace_seq_path(struct trace_seq *s, struct path *path)
  211. {
  212. unsigned char *p;
  213. if (s->full)
  214. return 0;
  215. if (s->len >= (PAGE_SIZE - 1)) {
  216. s->full = 1;
  217. return 0;
  218. }
  219. p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
  220. if (!IS_ERR(p)) {
  221. p = mangle_path(s->buffer + s->len, p, "\n");
  222. if (p) {
  223. s->len = p - s->buffer;
  224. return 1;
  225. }
  226. } else {
  227. s->buffer[s->len++] = '?';
  228. return 1;
  229. }
  230. s->full = 1;
  231. return 0;
  232. }
  233. const char *
  234. ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
  235. unsigned long flags,
  236. const struct trace_print_flags *flag_array)
  237. {
  238. unsigned long mask;
  239. const char *str;
  240. const char *ret = p->buffer + p->len;
  241. int i;
  242. for (i = 0; flag_array[i].name && flags; i++) {
  243. mask = flag_array[i].mask;
  244. if ((flags & mask) != mask)
  245. continue;
  246. str = flag_array[i].name;
  247. flags &= ~mask;
  248. if (p->len && delim)
  249. trace_seq_puts(p, delim);
  250. trace_seq_puts(p, str);
  251. }
  252. /* check for left over flags */
  253. if (flags) {
  254. if (p->len && delim)
  255. trace_seq_puts(p, delim);
  256. trace_seq_printf(p, "0x%lx", flags);
  257. }
  258. trace_seq_putc(p, 0);
  259. return ret;
  260. }
  261. EXPORT_SYMBOL(ftrace_print_flags_seq);
  262. const char *
  263. ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  264. const struct trace_print_flags *symbol_array)
  265. {
  266. int i;
  267. const char *ret = p->buffer + p->len;
  268. for (i = 0; symbol_array[i].name; i++) {
  269. if (val != symbol_array[i].mask)
  270. continue;
  271. trace_seq_puts(p, symbol_array[i].name);
  272. break;
  273. }
  274. if (!p->len)
  275. trace_seq_printf(p, "0x%lx", val);
  276. trace_seq_putc(p, 0);
  277. return ret;
  278. }
  279. EXPORT_SYMBOL(ftrace_print_symbols_seq);
  280. const char *
  281. ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
  282. {
  283. int i;
  284. const char *ret = p->buffer + p->len;
  285. for (i = 0; i < buf_len; i++)
  286. trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
  287. trace_seq_putc(p, 0);
  288. return ret;
  289. }
  290. EXPORT_SYMBOL(ftrace_print_hex_seq);
  291. #ifdef CONFIG_KRETPROBES
  292. static inline const char *kretprobed(const char *name)
  293. {
  294. static const char tramp_name[] = "kretprobe_trampoline";
  295. int size = sizeof(tramp_name);
  296. if (strncmp(tramp_name, name, size) == 0)
  297. return "[unknown/kretprobe'd]";
  298. return name;
  299. }
  300. #else
  301. static inline const char *kretprobed(const char *name)
  302. {
  303. return name;
  304. }
  305. #endif /* CONFIG_KRETPROBES */
  306. static int
  307. seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
  308. {
  309. #ifdef CONFIG_KALLSYMS
  310. char str[KSYM_SYMBOL_LEN];
  311. const char *name;
  312. kallsyms_lookup(address, NULL, NULL, NULL, str);
  313. name = kretprobed(str);
  314. return trace_seq_printf(s, fmt, name);
  315. #endif
  316. return 1;
  317. }
  318. static int
  319. seq_print_sym_offset(struct trace_seq *s, const char *fmt,
  320. unsigned long address)
  321. {
  322. #ifdef CONFIG_KALLSYMS
  323. char str[KSYM_SYMBOL_LEN];
  324. const char *name;
  325. sprint_symbol(str, address);
  326. name = kretprobed(str);
  327. return trace_seq_printf(s, fmt, name);
  328. #endif
  329. return 1;
  330. }
  331. #ifndef CONFIG_64BIT
  332. # define IP_FMT "%08lx"
  333. #else
  334. # define IP_FMT "%016lx"
  335. #endif
  336. int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
  337. unsigned long ip, unsigned long sym_flags)
  338. {
  339. struct file *file = NULL;
  340. unsigned long vmstart = 0;
  341. int ret = 1;
  342. if (s->full)
  343. return 0;
  344. if (mm) {
  345. const struct vm_area_struct *vma;
  346. down_read(&mm->mmap_sem);
  347. vma = find_vma(mm, ip);
  348. if (vma) {
  349. file = vma->vm_file;
  350. vmstart = vma->vm_start;
  351. }
  352. if (file) {
  353. ret = trace_seq_path(s, &file->f_path);
  354. if (ret)
  355. ret = trace_seq_printf(s, "[+0x%lx]",
  356. ip - vmstart);
  357. }
  358. up_read(&mm->mmap_sem);
  359. }
  360. if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
  361. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  362. return ret;
  363. }
  364. int
  365. seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
  366. unsigned long sym_flags)
  367. {
  368. struct mm_struct *mm = NULL;
  369. int ret = 1;
  370. unsigned int i;
  371. if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
  372. struct task_struct *task;
  373. /*
  374. * we do the lookup on the thread group leader,
  375. * since individual threads might have already quit!
  376. */
  377. rcu_read_lock();
  378. task = find_task_by_vpid(entry->tgid);
  379. if (task)
  380. mm = get_task_mm(task);
  381. rcu_read_unlock();
  382. }
  383. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  384. unsigned long ip = entry->caller[i];
  385. if (ip == ULONG_MAX || !ret)
  386. break;
  387. if (ret)
  388. ret = trace_seq_puts(s, " => ");
  389. if (!ip) {
  390. if (ret)
  391. ret = trace_seq_puts(s, "??");
  392. if (ret)
  393. ret = trace_seq_puts(s, "\n");
  394. continue;
  395. }
  396. if (!ret)
  397. break;
  398. if (ret)
  399. ret = seq_print_user_ip(s, mm, ip, sym_flags);
  400. ret = trace_seq_puts(s, "\n");
  401. }
  402. if (mm)
  403. mmput(mm);
  404. return ret;
  405. }
  406. int
  407. seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  408. {
  409. int ret;
  410. if (!ip)
  411. return trace_seq_printf(s, "0");
  412. if (sym_flags & TRACE_ITER_SYM_OFFSET)
  413. ret = seq_print_sym_offset(s, "%s", ip);
  414. else
  415. ret = seq_print_sym_short(s, "%s", ip);
  416. if (!ret)
  417. return 0;
  418. if (sym_flags & TRACE_ITER_SYM_ADDR)
  419. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  420. return ret;
  421. }
  422. /**
  423. * trace_print_lat_fmt - print the irq, preempt and lockdep fields
  424. * @s: trace seq struct to write to
  425. * @entry: The trace entry field from the ring buffer
  426. *
  427. * Prints the generic fields of irqs off, in hard or softirq, preempt
  428. * count and lock depth.
  429. */
  430. int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  431. {
  432. int hardirq, softirq;
  433. int ret;
  434. hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
  435. softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
  436. if (!trace_seq_printf(s, "%c%c%c",
  437. (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
  438. (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
  439. 'X' : '.',
  440. (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
  441. 'N' : '.',
  442. (hardirq && softirq) ? 'H' :
  443. hardirq ? 'h' : softirq ? 's' : '.'))
  444. return 0;
  445. if (entry->preempt_count)
  446. ret = trace_seq_printf(s, "%x", entry->preempt_count);
  447. else
  448. ret = trace_seq_putc(s, '.');
  449. if (!ret)
  450. return 0;
  451. if (entry->lock_depth < 0)
  452. return trace_seq_putc(s, '.');
  453. return trace_seq_printf(s, "%d", entry->lock_depth);
  454. }
  455. static int
  456. lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
  457. {
  458. char comm[TASK_COMM_LEN];
  459. trace_find_cmdline(entry->pid, comm);
  460. if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
  461. comm, entry->pid, cpu))
  462. return 0;
  463. return trace_print_lat_fmt(s, entry);
  464. }
  465. static unsigned long preempt_mark_thresh = 100;
  466. static int
  467. lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
  468. unsigned long rel_usecs)
  469. {
  470. return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
  471. rel_usecs > preempt_mark_thresh ? '!' :
  472. rel_usecs > 1 ? '+' : ' ');
  473. }
  474. int trace_print_context(struct trace_iterator *iter)
  475. {
  476. struct trace_seq *s = &iter->seq;
  477. struct trace_entry *entry = iter->ent;
  478. unsigned long long t = ns2usecs(iter->ts);
  479. unsigned long usec_rem = do_div(t, USEC_PER_SEC);
  480. unsigned long secs = (unsigned long)t;
  481. char comm[TASK_COMM_LEN];
  482. trace_find_cmdline(entry->pid, comm);
  483. return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
  484. comm, entry->pid, iter->cpu, secs, usec_rem);
  485. }
  486. int trace_print_lat_context(struct trace_iterator *iter)
  487. {
  488. u64 next_ts;
  489. int ret;
  490. struct trace_seq *s = &iter->seq;
  491. struct trace_entry *entry = iter->ent,
  492. *next_entry = trace_find_next_entry(iter, NULL,
  493. &next_ts);
  494. unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
  495. unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
  496. unsigned long rel_usecs;
  497. if (!next_entry)
  498. next_ts = iter->ts;
  499. rel_usecs = ns2usecs(next_ts - iter->ts);
  500. if (verbose) {
  501. char comm[TASK_COMM_LEN];
  502. trace_find_cmdline(entry->pid, comm);
  503. ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
  504. " %ld.%03ldms (+%ld.%03ldms): ", comm,
  505. entry->pid, iter->cpu, entry->flags,
  506. entry->preempt_count, iter->idx,
  507. ns2usecs(iter->ts),
  508. abs_usecs / USEC_PER_MSEC,
  509. abs_usecs % USEC_PER_MSEC,
  510. rel_usecs / USEC_PER_MSEC,
  511. rel_usecs % USEC_PER_MSEC);
  512. } else {
  513. ret = lat_print_generic(s, entry, iter->cpu);
  514. if (ret)
  515. ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
  516. }
  517. return ret;
  518. }
  519. static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
  520. static int task_state_char(unsigned long state)
  521. {
  522. int bit = state ? __ffs(state) + 1 : 0;
  523. return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
  524. }
  525. /**
  526. * ftrace_find_event - find a registered event
  527. * @type: the type of event to look for
  528. *
  529. * Returns an event of type @type otherwise NULL
  530. * Called with trace_event_read_lock() held.
  531. */
  532. struct trace_event *ftrace_find_event(int type)
  533. {
  534. struct trace_event *event;
  535. struct hlist_node *n;
  536. unsigned key;
  537. key = type & (EVENT_HASHSIZE - 1);
  538. hlist_for_each_entry(event, n, &event_hash[key], node) {
  539. if (event->type == type)
  540. return event;
  541. }
  542. return NULL;
  543. }
  544. static LIST_HEAD(ftrace_event_list);
  545. static int trace_search_list(struct list_head **list)
  546. {
  547. struct trace_event *e;
  548. int last = __TRACE_LAST_TYPE;
  549. if (list_empty(&ftrace_event_list)) {
  550. *list = &ftrace_event_list;
  551. return last + 1;
  552. }
  553. /*
  554. * We used up all possible max events,
  555. * lets see if somebody freed one.
  556. */
  557. list_for_each_entry(e, &ftrace_event_list, list) {
  558. if (e->type != last + 1)
  559. break;
  560. last++;
  561. }
  562. /* Did we used up all 65 thousand events??? */
  563. if ((last + 1) > FTRACE_MAX_EVENT)
  564. return 0;
  565. *list = &e->list;
  566. return last + 1;
  567. }
  568. void trace_event_read_lock(void)
  569. {
  570. down_read(&trace_event_mutex);
  571. }
  572. void trace_event_read_unlock(void)
  573. {
  574. up_read(&trace_event_mutex);
  575. }
  576. /**
  577. * register_ftrace_event - register output for an event type
  578. * @event: the event type to register
  579. *
  580. * Event types are stored in a hash and this hash is used to
  581. * find a way to print an event. If the @event->type is set
  582. * then it will use that type, otherwise it will assign a
  583. * type to use.
  584. *
  585. * If you assign your own type, please make sure it is added
  586. * to the trace_type enum in trace.h, to avoid collisions
  587. * with the dynamic types.
  588. *
  589. * Returns the event type number or zero on error.
  590. */
  591. int register_ftrace_event(struct trace_event *event)
  592. {
  593. unsigned key;
  594. int ret = 0;
  595. down_write(&trace_event_mutex);
  596. if (WARN_ON(!event))
  597. goto out;
  598. if (WARN_ON(!event->funcs))
  599. goto out;
  600. INIT_LIST_HEAD(&event->list);
  601. if (!event->type) {
  602. struct list_head *list = NULL;
  603. if (next_event_type > FTRACE_MAX_EVENT) {
  604. event->type = trace_search_list(&list);
  605. if (!event->type)
  606. goto out;
  607. } else {
  608. event->type = next_event_type++;
  609. list = &ftrace_event_list;
  610. }
  611. if (WARN_ON(ftrace_find_event(event->type)))
  612. goto out;
  613. list_add_tail(&event->list, list);
  614. } else if (event->type > __TRACE_LAST_TYPE) {
  615. printk(KERN_WARNING "Need to add type to trace.h\n");
  616. WARN_ON(1);
  617. goto out;
  618. } else {
  619. /* Is this event already used */
  620. if (ftrace_find_event(event->type))
  621. goto out;
  622. }
  623. if (event->funcs->trace == NULL)
  624. event->funcs->trace = trace_nop_print;
  625. if (event->funcs->raw == NULL)
  626. event->funcs->raw = trace_nop_print;
  627. if (event->funcs->hex == NULL)
  628. event->funcs->hex = trace_nop_print;
  629. if (event->funcs->binary == NULL)
  630. event->funcs->binary = trace_nop_print;
  631. key = event->type & (EVENT_HASHSIZE - 1);
  632. hlist_add_head(&event->node, &event_hash[key]);
  633. ret = event->type;
  634. out:
  635. up_write(&trace_event_mutex);
  636. return ret;
  637. }
  638. EXPORT_SYMBOL_GPL(register_ftrace_event);
  639. /*
  640. * Used by module code with the trace_event_mutex held for write.
  641. */
  642. int __unregister_ftrace_event(struct trace_event *event)
  643. {
  644. hlist_del(&event->node);
  645. list_del(&event->list);
  646. return 0;
  647. }
  648. /**
  649. * unregister_ftrace_event - remove a no longer used event
  650. * @event: the event to remove
  651. */
  652. int unregister_ftrace_event(struct trace_event *event)
  653. {
  654. down_write(&trace_event_mutex);
  655. __unregister_ftrace_event(event);
  656. up_write(&trace_event_mutex);
  657. return 0;
  658. }
  659. EXPORT_SYMBOL_GPL(unregister_ftrace_event);
  660. /*
  661. * Standard events
  662. */
  663. enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
  664. struct trace_event *event)
  665. {
  666. return TRACE_TYPE_HANDLED;
  667. }
  668. /* TRACE_FN */
  669. static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
  670. struct trace_event *event)
  671. {
  672. struct ftrace_entry *field;
  673. struct trace_seq *s = &iter->seq;
  674. trace_assign_type(field, iter->ent);
  675. if (!seq_print_ip_sym(s, field->ip, flags))
  676. goto partial;
  677. if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
  678. if (!trace_seq_printf(s, " <-"))
  679. goto partial;
  680. if (!seq_print_ip_sym(s,
  681. field->parent_ip,
  682. flags))
  683. goto partial;
  684. }
  685. if (!trace_seq_printf(s, "\n"))
  686. goto partial;
  687. return TRACE_TYPE_HANDLED;
  688. partial:
  689. return TRACE_TYPE_PARTIAL_LINE;
  690. }
  691. static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
  692. struct trace_event *event)
  693. {
  694. struct ftrace_entry *field;
  695. trace_assign_type(field, iter->ent);
  696. if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
  697. field->ip,
  698. field->parent_ip))
  699. return TRACE_TYPE_PARTIAL_LINE;
  700. return TRACE_TYPE_HANDLED;
  701. }
  702. static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
  703. struct trace_event *event)
  704. {
  705. struct ftrace_entry *field;
  706. struct trace_seq *s = &iter->seq;
  707. trace_assign_type(field, iter->ent);
  708. SEQ_PUT_HEX_FIELD_RET(s, field->ip);
  709. SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
  710. return TRACE_TYPE_HANDLED;
  711. }
  712. static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
  713. struct trace_event *event)
  714. {
  715. struct ftrace_entry *field;
  716. struct trace_seq *s = &iter->seq;
  717. trace_assign_type(field, iter->ent);
  718. SEQ_PUT_FIELD_RET(s, field->ip);
  719. SEQ_PUT_FIELD_RET(s, field->parent_ip);
  720. return TRACE_TYPE_HANDLED;
  721. }
  722. static struct trace_event_functions trace_fn_funcs = {
  723. .trace = trace_fn_trace,
  724. .raw = trace_fn_raw,
  725. .hex = trace_fn_hex,
  726. .binary = trace_fn_bin,
  727. };
  728. static struct trace_event trace_fn_event = {
  729. .type = TRACE_FN,
  730. .funcs = &trace_fn_funcs,
  731. };
  732. /* TRACE_CTX an TRACE_WAKE */
  733. static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
  734. char *delim)
  735. {
  736. struct ctx_switch_entry *field;
  737. char comm[TASK_COMM_LEN];
  738. int S, T;
  739. trace_assign_type(field, iter->ent);
  740. T = task_state_char(field->next_state);
  741. S = task_state_char(field->prev_state);
  742. trace_find_cmdline(field->next_pid, comm);
  743. if (!trace_seq_printf(&iter->seq,
  744. " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
  745. field->prev_pid,
  746. field->prev_prio,
  747. S, delim,
  748. field->next_cpu,
  749. field->next_pid,
  750. field->next_prio,
  751. T, comm))
  752. return TRACE_TYPE_PARTIAL_LINE;
  753. return TRACE_TYPE_HANDLED;
  754. }
  755. static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
  756. struct trace_event *event)
  757. {
  758. return trace_ctxwake_print(iter, "==>");
  759. }
  760. static enum print_line_t trace_wake_print(struct trace_iterator *iter,
  761. int flags, struct trace_event *event)
  762. {
  763. return trace_ctxwake_print(iter, " +");
  764. }
  765. static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
  766. {
  767. struct ctx_switch_entry *field;
  768. int T;
  769. trace_assign_type(field, iter->ent);
  770. if (!S)
  771. S = task_state_char(field->prev_state);
  772. T = task_state_char(field->next_state);
  773. if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
  774. field->prev_pid,
  775. field->prev_prio,
  776. S,
  777. field->next_cpu,
  778. field->next_pid,
  779. field->next_prio,
  780. T))
  781. return TRACE_TYPE_PARTIAL_LINE;
  782. return TRACE_TYPE_HANDLED;
  783. }
  784. static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
  785. struct trace_event *event)
  786. {
  787. return trace_ctxwake_raw(iter, 0);
  788. }
  789. static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
  790. struct trace_event *event)
  791. {
  792. return trace_ctxwake_raw(iter, '+');
  793. }
  794. static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
  795. {
  796. struct ctx_switch_entry *field;
  797. struct trace_seq *s = &iter->seq;
  798. int T;
  799. trace_assign_type(field, iter->ent);
  800. if (!S)
  801. S = task_state_char(field->prev_state);
  802. T = task_state_char(field->next_state);
  803. SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
  804. SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
  805. SEQ_PUT_HEX_FIELD_RET(s, S);
  806. SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
  807. SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
  808. SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
  809. SEQ_PUT_HEX_FIELD_RET(s, T);
  810. return TRACE_TYPE_HANDLED;
  811. }
  812. static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
  813. struct trace_event *event)
  814. {
  815. return trace_ctxwake_hex(iter, 0);
  816. }
  817. static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
  818. struct trace_event *event)
  819. {
  820. return trace_ctxwake_hex(iter, '+');
  821. }
  822. static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
  823. int flags, struct trace_event *event)
  824. {
  825. struct ctx_switch_entry *field;
  826. struct trace_seq *s = &iter->seq;
  827. trace_assign_type(field, iter->ent);
  828. SEQ_PUT_FIELD_RET(s, field->prev_pid);
  829. SEQ_PUT_FIELD_RET(s, field->prev_prio);
  830. SEQ_PUT_FIELD_RET(s, field->prev_state);
  831. SEQ_PUT_FIELD_RET(s, field->next_pid);
  832. SEQ_PUT_FIELD_RET(s, field->next_prio);
  833. SEQ_PUT_FIELD_RET(s, field->next_state);
  834. return TRACE_TYPE_HANDLED;
  835. }
  836. static struct trace_event_functions trace_ctx_funcs = {
  837. .trace = trace_ctx_print,
  838. .raw = trace_ctx_raw,
  839. .hex = trace_ctx_hex,
  840. .binary = trace_ctxwake_bin,
  841. };
  842. static struct trace_event trace_ctx_event = {
  843. .type = TRACE_CTX,
  844. .funcs = &trace_ctx_funcs,
  845. };
  846. static struct trace_event_functions trace_wake_funcs = {
  847. .trace = trace_wake_print,
  848. .raw = trace_wake_raw,
  849. .hex = trace_wake_hex,
  850. .binary = trace_ctxwake_bin,
  851. };
  852. static struct trace_event trace_wake_event = {
  853. .type = TRACE_WAKE,
  854. .funcs = &trace_wake_funcs,
  855. };
  856. /* TRACE_SPECIAL */
  857. static enum print_line_t trace_special_print(struct trace_iterator *iter,
  858. int flags, struct trace_event *event)
  859. {
  860. struct special_entry *field;
  861. trace_assign_type(field, iter->ent);
  862. if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
  863. field->arg1,
  864. field->arg2,
  865. field->arg3))
  866. return TRACE_TYPE_PARTIAL_LINE;
  867. return TRACE_TYPE_HANDLED;
  868. }
  869. static enum print_line_t trace_special_hex(struct trace_iterator *iter,
  870. int flags, struct trace_event *event)
  871. {
  872. struct special_entry *field;
  873. struct trace_seq *s = &iter->seq;
  874. trace_assign_type(field, iter->ent);
  875. SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
  876. SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
  877. SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
  878. return TRACE_TYPE_HANDLED;
  879. }
  880. static enum print_line_t trace_special_bin(struct trace_iterator *iter,
  881. int flags, struct trace_event *event)
  882. {
  883. struct special_entry *field;
  884. struct trace_seq *s = &iter->seq;
  885. trace_assign_type(field, iter->ent);
  886. SEQ_PUT_FIELD_RET(s, field->arg1);
  887. SEQ_PUT_FIELD_RET(s, field->arg2);
  888. SEQ_PUT_FIELD_RET(s, field->arg3);
  889. return TRACE_TYPE_HANDLED;
  890. }
  891. static struct trace_event_functions trace_special_funcs = {
  892. .trace = trace_special_print,
  893. .raw = trace_special_print,
  894. .hex = trace_special_hex,
  895. .binary = trace_special_bin,
  896. };
  897. static struct trace_event trace_special_event = {
  898. .type = TRACE_SPECIAL,
  899. .funcs = &trace_special_funcs,
  900. };
  901. /* TRACE_STACK */
  902. static enum print_line_t trace_stack_print(struct trace_iterator *iter,
  903. int flags, struct trace_event *event)
  904. {
  905. struct stack_entry *field;
  906. struct trace_seq *s = &iter->seq;
  907. int i;
  908. trace_assign_type(field, iter->ent);
  909. if (!trace_seq_puts(s, "<stack trace>\n"))
  910. goto partial;
  911. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  912. if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
  913. break;
  914. if (!trace_seq_puts(s, " => "))
  915. goto partial;
  916. if (!seq_print_ip_sym(s, field->caller[i], flags))
  917. goto partial;
  918. if (!trace_seq_puts(s, "\n"))
  919. goto partial;
  920. }
  921. return TRACE_TYPE_HANDLED;
  922. partial:
  923. return TRACE_TYPE_PARTIAL_LINE;
  924. }
  925. static struct trace_event_functions trace_stack_funcs = {
  926. .trace = trace_stack_print,
  927. .raw = trace_special_print,
  928. .hex = trace_special_hex,
  929. .binary = trace_special_bin,
  930. };
  931. static struct trace_event trace_stack_event = {
  932. .type = TRACE_STACK,
  933. .funcs = &trace_stack_funcs,
  934. };
  935. /* TRACE_USER_STACK */
  936. static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
  937. int flags, struct trace_event *event)
  938. {
  939. struct userstack_entry *field;
  940. struct trace_seq *s = &iter->seq;
  941. trace_assign_type(field, iter->ent);
  942. if (!trace_seq_puts(s, "<user stack trace>\n"))
  943. goto partial;
  944. if (!seq_print_userip_objs(field, s, flags))
  945. goto partial;
  946. return TRACE_TYPE_HANDLED;
  947. partial:
  948. return TRACE_TYPE_PARTIAL_LINE;
  949. }
  950. static struct trace_event_functions trace_user_stack_funcs = {
  951. .trace = trace_user_stack_print,
  952. .raw = trace_special_print,
  953. .hex = trace_special_hex,
  954. .binary = trace_special_bin,
  955. };
  956. static struct trace_event trace_user_stack_event = {
  957. .type = TRACE_USER_STACK,
  958. .funcs = &trace_user_stack_funcs,
  959. };
  960. /* TRACE_BPRINT */
  961. static enum print_line_t
  962. trace_bprint_print(struct trace_iterator *iter, int flags,
  963. struct trace_event *event)
  964. {
  965. struct trace_entry *entry = iter->ent;
  966. struct trace_seq *s = &iter->seq;
  967. struct bprint_entry *field;
  968. trace_assign_type(field, entry);
  969. if (!seq_print_ip_sym(s, field->ip, flags))
  970. goto partial;
  971. if (!trace_seq_puts(s, ": "))
  972. goto partial;
  973. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  974. goto partial;
  975. return TRACE_TYPE_HANDLED;
  976. partial:
  977. return TRACE_TYPE_PARTIAL_LINE;
  978. }
  979. static enum print_line_t
  980. trace_bprint_raw(struct trace_iterator *iter, int flags,
  981. struct trace_event *event)
  982. {
  983. struct bprint_entry *field;
  984. struct trace_seq *s = &iter->seq;
  985. trace_assign_type(field, iter->ent);
  986. if (!trace_seq_printf(s, ": %lx : ", field->ip))
  987. goto partial;
  988. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  989. goto partial;
  990. return TRACE_TYPE_HANDLED;
  991. partial:
  992. return TRACE_TYPE_PARTIAL_LINE;
  993. }
  994. static struct trace_event_functions trace_bprint_funcs = {
  995. .trace = trace_bprint_print,
  996. .raw = trace_bprint_raw,
  997. };
  998. static struct trace_event trace_bprint_event = {
  999. .type = TRACE_BPRINT,
  1000. .funcs = &trace_bprint_funcs,
  1001. };
  1002. /* TRACE_PRINT */
  1003. static enum print_line_t trace_print_print(struct trace_iterator *iter,
  1004. int flags, struct trace_event *event)
  1005. {
  1006. struct print_entry *field;
  1007. struct trace_seq *s = &iter->seq;
  1008. trace_assign_type(field, iter->ent);
  1009. if (!seq_print_ip_sym(s, field->ip, flags))
  1010. goto partial;
  1011. if (!trace_seq_printf(s, ": %s", field->buf))
  1012. goto partial;
  1013. return TRACE_TYPE_HANDLED;
  1014. partial:
  1015. return TRACE_TYPE_PARTIAL_LINE;
  1016. }
  1017. static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
  1018. struct trace_event *event)
  1019. {
  1020. struct print_entry *field;
  1021. trace_assign_type(field, iter->ent);
  1022. if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
  1023. goto partial;
  1024. return TRACE_TYPE_HANDLED;
  1025. partial:
  1026. return TRACE_TYPE_PARTIAL_LINE;
  1027. }
  1028. static struct trace_event_functions trace_print_funcs = {
  1029. .trace = trace_print_print,
  1030. .raw = trace_print_raw,
  1031. };
  1032. static struct trace_event trace_print_event = {
  1033. .type = TRACE_PRINT,
  1034. .funcs = &trace_print_funcs,
  1035. };
  1036. static struct trace_event *events[] __initdata = {
  1037. &trace_fn_event,
  1038. &trace_ctx_event,
  1039. &trace_wake_event,
  1040. &trace_special_event,
  1041. &trace_stack_event,
  1042. &trace_user_stack_event,
  1043. &trace_bprint_event,
  1044. &trace_print_event,
  1045. NULL
  1046. };
  1047. __init static int init_events(void)
  1048. {
  1049. struct trace_event *event;
  1050. int i, ret;
  1051. for (i = 0; events[i]; i++) {
  1052. event = events[i];
  1053. ret = register_ftrace_event(event);
  1054. if (!ret) {
  1055. printk(KERN_WARNING "event %d failed to register\n",
  1056. event->type);
  1057. WARN_ON_ONCE(1);
  1058. }
  1059. }
  1060. return 0;
  1061. }
  1062. device_initcall(init_events);