trace_functions_graph.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/slab.h>
  13. #include <linux/fs.h>
  14. #include "trace.h"
  15. #include "trace_output.h"
  16. struct fgraph_cpu_data {
  17. pid_t last_pid;
  18. int depth;
  19. int depth_irq;
  20. int ignore;
  21. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  22. };
  23. struct fgraph_data {
  24. struct fgraph_cpu_data __percpu *cpu_data;
  25. /* Place to preserve last processed entry. */
  26. struct ftrace_graph_ent_entry ent;
  27. struct ftrace_graph_ret_entry ret;
  28. int failed;
  29. int cpu;
  30. };
  31. #define TRACE_GRAPH_INDENT 2
  32. /* Flag options */
  33. #define TRACE_GRAPH_PRINT_OVERRUN 0x1
  34. #define TRACE_GRAPH_PRINT_CPU 0x2
  35. #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
  36. #define TRACE_GRAPH_PRINT_PROC 0x8
  37. #define TRACE_GRAPH_PRINT_DURATION 0x10
  38. #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
  39. #define TRACE_GRAPH_PRINT_IRQS 0x40
  40. static struct tracer_opt trace_opts[] = {
  41. /* Display overruns? (for self-debug purpose) */
  42. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  43. /* Display CPU ? */
  44. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  45. /* Display Overhead ? */
  46. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  47. /* Display proc name/pid */
  48. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  49. /* Display duration of execution */
  50. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  51. /* Display absolute time of an entry */
  52. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  53. /* Display interrupts */
  54. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  55. { } /* Empty entry */
  56. };
  57. static struct tracer_flags tracer_flags = {
  58. /* Don't display overruns and proc by default */
  59. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  60. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
  61. .opts = trace_opts
  62. };
  63. static struct trace_array *graph_array;
  64. /* Add a function return address to the trace stack on thread info.*/
  65. int
  66. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  67. unsigned long frame_pointer)
  68. {
  69. unsigned long long calltime;
  70. int index;
  71. if (!current->ret_stack)
  72. return -EBUSY;
  73. /*
  74. * We must make sure the ret_stack is tested before we read
  75. * anything else.
  76. */
  77. smp_rmb();
  78. /* The return trace stack is full */
  79. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  80. atomic_inc(&current->trace_overrun);
  81. return -EBUSY;
  82. }
  83. calltime = trace_clock_local();
  84. index = ++current->curr_ret_stack;
  85. barrier();
  86. current->ret_stack[index].ret = ret;
  87. current->ret_stack[index].func = func;
  88. current->ret_stack[index].calltime = calltime;
  89. current->ret_stack[index].subtime = 0;
  90. current->ret_stack[index].fp = frame_pointer;
  91. *depth = index;
  92. return 0;
  93. }
  94. /* Retrieve a function return address to the trace stack on thread info.*/
  95. static void
  96. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  97. unsigned long frame_pointer)
  98. {
  99. int index;
  100. index = current->curr_ret_stack;
  101. if (unlikely(index < 0)) {
  102. ftrace_graph_stop();
  103. WARN_ON(1);
  104. /* Might as well panic, otherwise we have no where to go */
  105. *ret = (unsigned long)panic;
  106. return;
  107. }
  108. #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
  109. /*
  110. * The arch may choose to record the frame pointer used
  111. * and check it here to make sure that it is what we expect it
  112. * to be. If gcc does not set the place holder of the return
  113. * address in the frame pointer, and does a copy instead, then
  114. * the function graph trace will fail. This test detects this
  115. * case.
  116. *
  117. * Currently, x86_32 with optimize for size (-Os) makes the latest
  118. * gcc do the above.
  119. */
  120. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  121. ftrace_graph_stop();
  122. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  123. " from func %ps return to %lx\n",
  124. current->ret_stack[index].fp,
  125. frame_pointer,
  126. (void *)current->ret_stack[index].func,
  127. current->ret_stack[index].ret);
  128. *ret = (unsigned long)panic;
  129. return;
  130. }
  131. #endif
  132. *ret = current->ret_stack[index].ret;
  133. trace->func = current->ret_stack[index].func;
  134. trace->calltime = current->ret_stack[index].calltime;
  135. trace->overrun = atomic_read(&current->trace_overrun);
  136. trace->depth = index;
  137. }
  138. /*
  139. * Send the trace to the ring-buffer.
  140. * @return the original return address.
  141. */
  142. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  143. {
  144. struct ftrace_graph_ret trace;
  145. unsigned long ret;
  146. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  147. trace.rettime = trace_clock_local();
  148. ftrace_graph_return(&trace);
  149. barrier();
  150. current->curr_ret_stack--;
  151. if (unlikely(!ret)) {
  152. ftrace_graph_stop();
  153. WARN_ON(1);
  154. /* Might as well panic. What else to do? */
  155. ret = (unsigned long)panic;
  156. }
  157. return ret;
  158. }
  159. int __trace_graph_entry(struct trace_array *tr,
  160. struct ftrace_graph_ent *trace,
  161. unsigned long flags,
  162. int pc)
  163. {
  164. struct ftrace_event_call *call = &event_funcgraph_entry;
  165. struct ring_buffer_event *event;
  166. struct ring_buffer *buffer = tr->buffer;
  167. struct ftrace_graph_ent_entry *entry;
  168. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  169. return 0;
  170. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  171. sizeof(*entry), flags, pc);
  172. if (!event)
  173. return 0;
  174. entry = ring_buffer_event_data(event);
  175. entry->graph_ent = *trace;
  176. if (!filter_current_check_discard(buffer, call, entry, event))
  177. ring_buffer_unlock_commit(buffer, event);
  178. return 1;
  179. }
  180. int trace_graph_entry(struct ftrace_graph_ent *trace)
  181. {
  182. struct trace_array *tr = graph_array;
  183. struct trace_array_cpu *data;
  184. unsigned long flags;
  185. long disabled;
  186. int ret;
  187. int cpu;
  188. int pc;
  189. if (!ftrace_trace_task(current))
  190. return 0;
  191. /* trace it when it is-nested-in or is a function enabled. */
  192. if (!(trace->depth || ftrace_graph_addr(trace->func)))
  193. return 0;
  194. local_irq_save(flags);
  195. cpu = raw_smp_processor_id();
  196. data = tr->data[cpu];
  197. disabled = atomic_inc_return(&data->disabled);
  198. if (likely(disabled == 1)) {
  199. pc = preempt_count();
  200. ret = __trace_graph_entry(tr, trace, flags, pc);
  201. } else {
  202. ret = 0;
  203. }
  204. atomic_dec(&data->disabled);
  205. local_irq_restore(flags);
  206. return ret;
  207. }
  208. int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
  209. {
  210. if (tracing_thresh)
  211. return 1;
  212. else
  213. return trace_graph_entry(trace);
  214. }
  215. void __trace_graph_return(struct trace_array *tr,
  216. struct ftrace_graph_ret *trace,
  217. unsigned long flags,
  218. int pc)
  219. {
  220. struct ftrace_event_call *call = &event_funcgraph_exit;
  221. struct ring_buffer_event *event;
  222. struct ring_buffer *buffer = tr->buffer;
  223. struct ftrace_graph_ret_entry *entry;
  224. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  225. return;
  226. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  227. sizeof(*entry), flags, pc);
  228. if (!event)
  229. return;
  230. entry = ring_buffer_event_data(event);
  231. entry->ret = *trace;
  232. if (!filter_current_check_discard(buffer, call, entry, event))
  233. ring_buffer_unlock_commit(buffer, event);
  234. }
  235. void trace_graph_return(struct ftrace_graph_ret *trace)
  236. {
  237. struct trace_array *tr = graph_array;
  238. struct trace_array_cpu *data;
  239. unsigned long flags;
  240. long disabled;
  241. int cpu;
  242. int pc;
  243. local_irq_save(flags);
  244. cpu = raw_smp_processor_id();
  245. data = tr->data[cpu];
  246. disabled = atomic_inc_return(&data->disabled);
  247. if (likely(disabled == 1)) {
  248. pc = preempt_count();
  249. __trace_graph_return(tr, trace, flags, pc);
  250. }
  251. atomic_dec(&data->disabled);
  252. local_irq_restore(flags);
  253. }
  254. void set_graph_array(struct trace_array *tr)
  255. {
  256. graph_array = tr;
  257. /* Make graph_array visible before we start tracing */
  258. smp_mb();
  259. }
  260. void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  261. {
  262. if (tracing_thresh &&
  263. (trace->rettime - trace->calltime < tracing_thresh))
  264. return;
  265. else
  266. trace_graph_return(trace);
  267. }
  268. static int graph_trace_init(struct trace_array *tr)
  269. {
  270. int ret;
  271. set_graph_array(tr);
  272. if (tracing_thresh)
  273. ret = register_ftrace_graph(&trace_graph_thresh_return,
  274. &trace_graph_thresh_entry);
  275. else
  276. ret = register_ftrace_graph(&trace_graph_return,
  277. &trace_graph_entry);
  278. if (ret)
  279. return ret;
  280. tracing_start_cmdline_record();
  281. return 0;
  282. }
  283. static void graph_trace_reset(struct trace_array *tr)
  284. {
  285. tracing_stop_cmdline_record();
  286. unregister_ftrace_graph();
  287. }
  288. static int max_bytes_for_cpu;
  289. static enum print_line_t
  290. print_graph_cpu(struct trace_seq *s, int cpu)
  291. {
  292. int ret;
  293. /*
  294. * Start with a space character - to make it stand out
  295. * to the right a bit when trace output is pasted into
  296. * email:
  297. */
  298. ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  299. if (!ret)
  300. return TRACE_TYPE_PARTIAL_LINE;
  301. return TRACE_TYPE_HANDLED;
  302. }
  303. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  304. static enum print_line_t
  305. print_graph_proc(struct trace_seq *s, pid_t pid)
  306. {
  307. char comm[TASK_COMM_LEN];
  308. /* sign + log10(MAX_INT) + '\0' */
  309. char pid_str[11];
  310. int spaces = 0;
  311. int ret;
  312. int len;
  313. int i;
  314. trace_find_cmdline(pid, comm);
  315. comm[7] = '\0';
  316. sprintf(pid_str, "%d", pid);
  317. /* 1 stands for the "-" character */
  318. len = strlen(comm) + strlen(pid_str) + 1;
  319. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  320. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  321. /* First spaces to align center */
  322. for (i = 0; i < spaces / 2; i++) {
  323. ret = trace_seq_printf(s, " ");
  324. if (!ret)
  325. return TRACE_TYPE_PARTIAL_LINE;
  326. }
  327. ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
  328. if (!ret)
  329. return TRACE_TYPE_PARTIAL_LINE;
  330. /* Last spaces to align center */
  331. for (i = 0; i < spaces - (spaces / 2); i++) {
  332. ret = trace_seq_printf(s, " ");
  333. if (!ret)
  334. return TRACE_TYPE_PARTIAL_LINE;
  335. }
  336. return TRACE_TYPE_HANDLED;
  337. }
  338. static enum print_line_t
  339. print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  340. {
  341. if (!trace_seq_putc(s, ' '))
  342. return 0;
  343. return trace_print_lat_fmt(s, entry);
  344. }
  345. /* If the pid changed since the last trace, output this event */
  346. static enum print_line_t
  347. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  348. {
  349. pid_t prev_pid;
  350. pid_t *last_pid;
  351. int ret;
  352. if (!data)
  353. return TRACE_TYPE_HANDLED;
  354. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  355. if (*last_pid == pid)
  356. return TRACE_TYPE_HANDLED;
  357. prev_pid = *last_pid;
  358. *last_pid = pid;
  359. if (prev_pid == -1)
  360. return TRACE_TYPE_HANDLED;
  361. /*
  362. * Context-switch trace line:
  363. ------------------------------------------
  364. | 1) migration/0--1 => sshd-1755
  365. ------------------------------------------
  366. */
  367. ret = trace_seq_printf(s,
  368. " ------------------------------------------\n");
  369. if (!ret)
  370. return TRACE_TYPE_PARTIAL_LINE;
  371. ret = print_graph_cpu(s, cpu);
  372. if (ret == TRACE_TYPE_PARTIAL_LINE)
  373. return TRACE_TYPE_PARTIAL_LINE;
  374. ret = print_graph_proc(s, prev_pid);
  375. if (ret == TRACE_TYPE_PARTIAL_LINE)
  376. return TRACE_TYPE_PARTIAL_LINE;
  377. ret = trace_seq_printf(s, " => ");
  378. if (!ret)
  379. return TRACE_TYPE_PARTIAL_LINE;
  380. ret = print_graph_proc(s, pid);
  381. if (ret == TRACE_TYPE_PARTIAL_LINE)
  382. return TRACE_TYPE_PARTIAL_LINE;
  383. ret = trace_seq_printf(s,
  384. "\n ------------------------------------------\n\n");
  385. if (!ret)
  386. return TRACE_TYPE_PARTIAL_LINE;
  387. return TRACE_TYPE_HANDLED;
  388. }
  389. static struct ftrace_graph_ret_entry *
  390. get_return_for_leaf(struct trace_iterator *iter,
  391. struct ftrace_graph_ent_entry *curr)
  392. {
  393. struct fgraph_data *data = iter->private;
  394. struct ring_buffer_iter *ring_iter = NULL;
  395. struct ring_buffer_event *event;
  396. struct ftrace_graph_ret_entry *next;
  397. /*
  398. * If the previous output failed to write to the seq buffer,
  399. * then we just reuse the data from before.
  400. */
  401. if (data && data->failed) {
  402. curr = &data->ent;
  403. next = &data->ret;
  404. } else {
  405. ring_iter = iter->buffer_iter[iter->cpu];
  406. /* First peek to compare current entry and the next one */
  407. if (ring_iter)
  408. event = ring_buffer_iter_peek(ring_iter, NULL);
  409. else {
  410. /*
  411. * We need to consume the current entry to see
  412. * the next one.
  413. */
  414. ring_buffer_consume(iter->tr->buffer, iter->cpu,
  415. NULL, NULL);
  416. event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
  417. NULL, NULL);
  418. }
  419. if (!event)
  420. return NULL;
  421. next = ring_buffer_event_data(event);
  422. if (data) {
  423. /*
  424. * Save current and next entries for later reference
  425. * if the output fails.
  426. */
  427. data->ent = *curr;
  428. /*
  429. * If the next event is not a return type, then
  430. * we only care about what type it is. Otherwise we can
  431. * safely copy the entire event.
  432. */
  433. if (next->ent.type == TRACE_GRAPH_RET)
  434. data->ret = *next;
  435. else
  436. data->ret.ent.type = next->ent.type;
  437. }
  438. }
  439. if (next->ent.type != TRACE_GRAPH_RET)
  440. return NULL;
  441. if (curr->ent.pid != next->ent.pid ||
  442. curr->graph_ent.func != next->ret.func)
  443. return NULL;
  444. /* this is a leaf, now advance the iterator */
  445. if (ring_iter)
  446. ring_buffer_read(ring_iter, NULL);
  447. return next;
  448. }
  449. /* Signal a overhead of time execution to the output */
  450. static int
  451. print_graph_overhead(unsigned long long duration, struct trace_seq *s,
  452. u32 flags)
  453. {
  454. /* If duration disappear, we don't need anything */
  455. if (!(flags & TRACE_GRAPH_PRINT_DURATION))
  456. return 1;
  457. /* Non nested entry or return */
  458. if (duration == -1)
  459. return trace_seq_printf(s, " ");
  460. if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
  461. /* Duration exceeded 100 msecs */
  462. if (duration > 100000ULL)
  463. return trace_seq_printf(s, "! ");
  464. /* Duration exceeded 10 msecs */
  465. if (duration > 10000ULL)
  466. return trace_seq_printf(s, "+ ");
  467. }
  468. return trace_seq_printf(s, " ");
  469. }
  470. static int print_graph_abs_time(u64 t, struct trace_seq *s)
  471. {
  472. unsigned long usecs_rem;
  473. usecs_rem = do_div(t, NSEC_PER_SEC);
  474. usecs_rem /= 1000;
  475. return trace_seq_printf(s, "%5lu.%06lu | ",
  476. (unsigned long)t, usecs_rem);
  477. }
  478. static enum print_line_t
  479. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  480. enum trace_type type, int cpu, pid_t pid, u32 flags)
  481. {
  482. int ret;
  483. struct trace_seq *s = &iter->seq;
  484. if (addr < (unsigned long)__irqentry_text_start ||
  485. addr >= (unsigned long)__irqentry_text_end)
  486. return TRACE_TYPE_UNHANDLED;
  487. /* Absolute time */
  488. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  489. ret = print_graph_abs_time(iter->ts, s);
  490. if (!ret)
  491. return TRACE_TYPE_PARTIAL_LINE;
  492. }
  493. /* Cpu */
  494. if (flags & TRACE_GRAPH_PRINT_CPU) {
  495. ret = print_graph_cpu(s, cpu);
  496. if (ret == TRACE_TYPE_PARTIAL_LINE)
  497. return TRACE_TYPE_PARTIAL_LINE;
  498. }
  499. /* Proc */
  500. if (flags & TRACE_GRAPH_PRINT_PROC) {
  501. ret = print_graph_proc(s, pid);
  502. if (ret == TRACE_TYPE_PARTIAL_LINE)
  503. return TRACE_TYPE_PARTIAL_LINE;
  504. ret = trace_seq_printf(s, " | ");
  505. if (!ret)
  506. return TRACE_TYPE_PARTIAL_LINE;
  507. }
  508. /* No overhead */
  509. ret = print_graph_overhead(-1, s, flags);
  510. if (!ret)
  511. return TRACE_TYPE_PARTIAL_LINE;
  512. if (type == TRACE_GRAPH_ENT)
  513. ret = trace_seq_printf(s, "==========>");
  514. else
  515. ret = trace_seq_printf(s, "<==========");
  516. if (!ret)
  517. return TRACE_TYPE_PARTIAL_LINE;
  518. /* Don't close the duration column if haven't one */
  519. if (flags & TRACE_GRAPH_PRINT_DURATION)
  520. trace_seq_printf(s, " |");
  521. ret = trace_seq_printf(s, "\n");
  522. if (!ret)
  523. return TRACE_TYPE_PARTIAL_LINE;
  524. return TRACE_TYPE_HANDLED;
  525. }
  526. enum print_line_t
  527. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  528. {
  529. unsigned long nsecs_rem = do_div(duration, 1000);
  530. /* log10(ULONG_MAX) + '\0' */
  531. char msecs_str[21];
  532. char nsecs_str[5];
  533. int ret, len;
  534. int i;
  535. sprintf(msecs_str, "%lu", (unsigned long) duration);
  536. /* Print msecs */
  537. ret = trace_seq_printf(s, "%s", msecs_str);
  538. if (!ret)
  539. return TRACE_TYPE_PARTIAL_LINE;
  540. len = strlen(msecs_str);
  541. /* Print nsecs (we don't want to exceed 7 numbers) */
  542. if (len < 7) {
  543. snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu",
  544. nsecs_rem);
  545. ret = trace_seq_printf(s, ".%s", nsecs_str);
  546. if (!ret)
  547. return TRACE_TYPE_PARTIAL_LINE;
  548. len += strlen(nsecs_str);
  549. }
  550. ret = trace_seq_printf(s, " us ");
  551. if (!ret)
  552. return TRACE_TYPE_PARTIAL_LINE;
  553. /* Print remaining spaces to fit the row's width */
  554. for (i = len; i < 7; i++) {
  555. ret = trace_seq_printf(s, " ");
  556. if (!ret)
  557. return TRACE_TYPE_PARTIAL_LINE;
  558. }
  559. return TRACE_TYPE_HANDLED;
  560. }
  561. static enum print_line_t
  562. print_graph_duration(unsigned long long duration, struct trace_seq *s)
  563. {
  564. int ret;
  565. ret = trace_print_graph_duration(duration, s);
  566. if (ret != TRACE_TYPE_HANDLED)
  567. return ret;
  568. ret = trace_seq_printf(s, "| ");
  569. if (!ret)
  570. return TRACE_TYPE_PARTIAL_LINE;
  571. return TRACE_TYPE_HANDLED;
  572. }
  573. /* Case of a leaf function on its call entry */
  574. static enum print_line_t
  575. print_graph_entry_leaf(struct trace_iterator *iter,
  576. struct ftrace_graph_ent_entry *entry,
  577. struct ftrace_graph_ret_entry *ret_entry,
  578. struct trace_seq *s, u32 flags)
  579. {
  580. struct fgraph_data *data = iter->private;
  581. struct ftrace_graph_ret *graph_ret;
  582. struct ftrace_graph_ent *call;
  583. unsigned long long duration;
  584. int ret;
  585. int i;
  586. graph_ret = &ret_entry->ret;
  587. call = &entry->graph_ent;
  588. duration = graph_ret->rettime - graph_ret->calltime;
  589. if (data) {
  590. struct fgraph_cpu_data *cpu_data;
  591. int cpu = iter->cpu;
  592. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  593. /*
  594. * Comments display at + 1 to depth. Since
  595. * this is a leaf function, keep the comments
  596. * equal to this depth.
  597. */
  598. cpu_data->depth = call->depth - 1;
  599. /* No need to keep this function around for this depth */
  600. if (call->depth < FTRACE_RETFUNC_DEPTH)
  601. cpu_data->enter_funcs[call->depth] = 0;
  602. }
  603. /* Overhead */
  604. ret = print_graph_overhead(duration, s, flags);
  605. if (!ret)
  606. return TRACE_TYPE_PARTIAL_LINE;
  607. /* Duration */
  608. if (flags & TRACE_GRAPH_PRINT_DURATION) {
  609. ret = print_graph_duration(duration, s);
  610. if (ret == TRACE_TYPE_PARTIAL_LINE)
  611. return TRACE_TYPE_PARTIAL_LINE;
  612. }
  613. /* Function */
  614. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  615. ret = trace_seq_printf(s, " ");
  616. if (!ret)
  617. return TRACE_TYPE_PARTIAL_LINE;
  618. }
  619. ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
  620. if (!ret)
  621. return TRACE_TYPE_PARTIAL_LINE;
  622. return TRACE_TYPE_HANDLED;
  623. }
  624. static enum print_line_t
  625. print_graph_entry_nested(struct trace_iterator *iter,
  626. struct ftrace_graph_ent_entry *entry,
  627. struct trace_seq *s, int cpu, u32 flags)
  628. {
  629. struct ftrace_graph_ent *call = &entry->graph_ent;
  630. struct fgraph_data *data = iter->private;
  631. int ret;
  632. int i;
  633. if (data) {
  634. struct fgraph_cpu_data *cpu_data;
  635. int cpu = iter->cpu;
  636. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  637. cpu_data->depth = call->depth;
  638. /* Save this function pointer to see if the exit matches */
  639. if (call->depth < FTRACE_RETFUNC_DEPTH)
  640. cpu_data->enter_funcs[call->depth] = call->func;
  641. }
  642. /* No overhead */
  643. ret = print_graph_overhead(-1, s, flags);
  644. if (!ret)
  645. return TRACE_TYPE_PARTIAL_LINE;
  646. /* No time */
  647. if (flags & TRACE_GRAPH_PRINT_DURATION) {
  648. ret = trace_seq_printf(s, " | ");
  649. if (!ret)
  650. return TRACE_TYPE_PARTIAL_LINE;
  651. }
  652. /* Function */
  653. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  654. ret = trace_seq_printf(s, " ");
  655. if (!ret)
  656. return TRACE_TYPE_PARTIAL_LINE;
  657. }
  658. ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  659. if (!ret)
  660. return TRACE_TYPE_PARTIAL_LINE;
  661. /*
  662. * we already consumed the current entry to check the next one
  663. * and see if this is a leaf.
  664. */
  665. return TRACE_TYPE_NO_CONSUME;
  666. }
  667. static enum print_line_t
  668. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  669. int type, unsigned long addr, u32 flags)
  670. {
  671. struct fgraph_data *data = iter->private;
  672. struct trace_entry *ent = iter->ent;
  673. int cpu = iter->cpu;
  674. int ret;
  675. /* Pid */
  676. if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
  677. return TRACE_TYPE_PARTIAL_LINE;
  678. if (type) {
  679. /* Interrupt */
  680. ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  681. if (ret == TRACE_TYPE_PARTIAL_LINE)
  682. return TRACE_TYPE_PARTIAL_LINE;
  683. }
  684. /* Absolute time */
  685. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  686. ret = print_graph_abs_time(iter->ts, s);
  687. if (!ret)
  688. return TRACE_TYPE_PARTIAL_LINE;
  689. }
  690. /* Cpu */
  691. if (flags & TRACE_GRAPH_PRINT_CPU) {
  692. ret = print_graph_cpu(s, cpu);
  693. if (ret == TRACE_TYPE_PARTIAL_LINE)
  694. return TRACE_TYPE_PARTIAL_LINE;
  695. }
  696. /* Proc */
  697. if (flags & TRACE_GRAPH_PRINT_PROC) {
  698. ret = print_graph_proc(s, ent->pid);
  699. if (ret == TRACE_TYPE_PARTIAL_LINE)
  700. return TRACE_TYPE_PARTIAL_LINE;
  701. ret = trace_seq_printf(s, " | ");
  702. if (!ret)
  703. return TRACE_TYPE_PARTIAL_LINE;
  704. }
  705. /* Latency format */
  706. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  707. ret = print_graph_lat_fmt(s, ent);
  708. if (ret == TRACE_TYPE_PARTIAL_LINE)
  709. return TRACE_TYPE_PARTIAL_LINE;
  710. }
  711. return 0;
  712. }
  713. /*
  714. * Entry check for irq code
  715. *
  716. * returns 1 if
  717. * - we are inside irq code
  718. * - we just extered irq code
  719. *
  720. * retunns 0 if
  721. * - funcgraph-interrupts option is set
  722. * - we are not inside irq code
  723. */
  724. static int
  725. check_irq_entry(struct trace_iterator *iter, u32 flags,
  726. unsigned long addr, int depth)
  727. {
  728. int cpu = iter->cpu;
  729. struct fgraph_data *data = iter->private;
  730. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  731. if (flags & TRACE_GRAPH_PRINT_IRQS)
  732. return 0;
  733. /*
  734. * We are inside the irq code
  735. */
  736. if (*depth_irq >= 0)
  737. return 1;
  738. if ((addr < (unsigned long)__irqentry_text_start) ||
  739. (addr >= (unsigned long)__irqentry_text_end))
  740. return 0;
  741. /*
  742. * We are entering irq code.
  743. */
  744. *depth_irq = depth;
  745. return 1;
  746. }
  747. /*
  748. * Return check for irq code
  749. *
  750. * returns 1 if
  751. * - we are inside irq code
  752. * - we just left irq code
  753. *
  754. * returns 0 if
  755. * - funcgraph-interrupts option is set
  756. * - we are not inside irq code
  757. */
  758. static int
  759. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  760. {
  761. int cpu = iter->cpu;
  762. struct fgraph_data *data = iter->private;
  763. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  764. if (flags & TRACE_GRAPH_PRINT_IRQS)
  765. return 0;
  766. /*
  767. * We are not inside the irq code.
  768. */
  769. if (*depth_irq == -1)
  770. return 0;
  771. /*
  772. * We are inside the irq code, and this is returning entry.
  773. * Let's not trace it and clear the entry depth, since
  774. * we are out of irq code.
  775. *
  776. * This condition ensures that we 'leave the irq code' once
  777. * we are out of the entry depth. Thus protecting us from
  778. * the RETURN entry loss.
  779. */
  780. if (*depth_irq >= depth) {
  781. *depth_irq = -1;
  782. return 1;
  783. }
  784. /*
  785. * We are inside the irq code, and this is not the entry.
  786. */
  787. return 1;
  788. }
  789. static enum print_line_t
  790. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  791. struct trace_iterator *iter, u32 flags)
  792. {
  793. struct fgraph_data *data = iter->private;
  794. struct ftrace_graph_ent *call = &field->graph_ent;
  795. struct ftrace_graph_ret_entry *leaf_ret;
  796. static enum print_line_t ret;
  797. int cpu = iter->cpu;
  798. if (check_irq_entry(iter, flags, call->func, call->depth))
  799. return TRACE_TYPE_HANDLED;
  800. if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
  801. return TRACE_TYPE_PARTIAL_LINE;
  802. leaf_ret = get_return_for_leaf(iter, field);
  803. if (leaf_ret)
  804. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  805. else
  806. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  807. if (data) {
  808. /*
  809. * If we failed to write our output, then we need to make
  810. * note of it. Because we already consumed our entry.
  811. */
  812. if (s->full) {
  813. data->failed = 1;
  814. data->cpu = cpu;
  815. } else
  816. data->failed = 0;
  817. }
  818. return ret;
  819. }
  820. static enum print_line_t
  821. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  822. struct trace_entry *ent, struct trace_iterator *iter,
  823. u32 flags)
  824. {
  825. unsigned long long duration = trace->rettime - trace->calltime;
  826. struct fgraph_data *data = iter->private;
  827. pid_t pid = ent->pid;
  828. int cpu = iter->cpu;
  829. int func_match = 1;
  830. int ret;
  831. int i;
  832. if (check_irq_return(iter, flags, trace->depth))
  833. return TRACE_TYPE_HANDLED;
  834. if (data) {
  835. struct fgraph_cpu_data *cpu_data;
  836. int cpu = iter->cpu;
  837. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  838. /*
  839. * Comments display at + 1 to depth. This is the
  840. * return from a function, we now want the comments
  841. * to display at the same level of the bracket.
  842. */
  843. cpu_data->depth = trace->depth - 1;
  844. if (trace->depth < FTRACE_RETFUNC_DEPTH) {
  845. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  846. func_match = 0;
  847. cpu_data->enter_funcs[trace->depth] = 0;
  848. }
  849. }
  850. if (print_graph_prologue(iter, s, 0, 0, flags))
  851. return TRACE_TYPE_PARTIAL_LINE;
  852. /* Overhead */
  853. ret = print_graph_overhead(duration, s, flags);
  854. if (!ret)
  855. return TRACE_TYPE_PARTIAL_LINE;
  856. /* Duration */
  857. if (flags & TRACE_GRAPH_PRINT_DURATION) {
  858. ret = print_graph_duration(duration, s);
  859. if (ret == TRACE_TYPE_PARTIAL_LINE)
  860. return TRACE_TYPE_PARTIAL_LINE;
  861. }
  862. /* Closing brace */
  863. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  864. ret = trace_seq_printf(s, " ");
  865. if (!ret)
  866. return TRACE_TYPE_PARTIAL_LINE;
  867. }
  868. /*
  869. * If the return function does not have a matching entry,
  870. * then the entry was lost. Instead of just printing
  871. * the '}' and letting the user guess what function this
  872. * belongs to, write out the function name.
  873. */
  874. if (func_match) {
  875. ret = trace_seq_printf(s, "}\n");
  876. if (!ret)
  877. return TRACE_TYPE_PARTIAL_LINE;
  878. } else {
  879. ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  880. if (!ret)
  881. return TRACE_TYPE_PARTIAL_LINE;
  882. }
  883. /* Overrun */
  884. if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
  885. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  886. trace->overrun);
  887. if (!ret)
  888. return TRACE_TYPE_PARTIAL_LINE;
  889. }
  890. ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  891. cpu, pid, flags);
  892. if (ret == TRACE_TYPE_PARTIAL_LINE)
  893. return TRACE_TYPE_PARTIAL_LINE;
  894. return TRACE_TYPE_HANDLED;
  895. }
  896. static enum print_line_t
  897. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  898. struct trace_iterator *iter, u32 flags)
  899. {
  900. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  901. struct fgraph_data *data = iter->private;
  902. struct trace_event *event;
  903. int depth = 0;
  904. int ret;
  905. int i;
  906. if (data)
  907. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  908. if (print_graph_prologue(iter, s, 0, 0, flags))
  909. return TRACE_TYPE_PARTIAL_LINE;
  910. /* No overhead */
  911. ret = print_graph_overhead(-1, s, flags);
  912. if (!ret)
  913. return TRACE_TYPE_PARTIAL_LINE;
  914. /* No time */
  915. if (flags & TRACE_GRAPH_PRINT_DURATION) {
  916. ret = trace_seq_printf(s, " | ");
  917. if (!ret)
  918. return TRACE_TYPE_PARTIAL_LINE;
  919. }
  920. /* Indentation */
  921. if (depth > 0)
  922. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
  923. ret = trace_seq_printf(s, " ");
  924. if (!ret)
  925. return TRACE_TYPE_PARTIAL_LINE;
  926. }
  927. /* The comment */
  928. ret = trace_seq_printf(s, "/* ");
  929. if (!ret)
  930. return TRACE_TYPE_PARTIAL_LINE;
  931. switch (iter->ent->type) {
  932. case TRACE_BPRINT:
  933. ret = trace_print_bprintk_msg_only(iter);
  934. if (ret != TRACE_TYPE_HANDLED)
  935. return ret;
  936. break;
  937. case TRACE_PRINT:
  938. ret = trace_print_printk_msg_only(iter);
  939. if (ret != TRACE_TYPE_HANDLED)
  940. return ret;
  941. break;
  942. default:
  943. event = ftrace_find_event(ent->type);
  944. if (!event)
  945. return TRACE_TYPE_UNHANDLED;
  946. ret = event->funcs->trace(iter, sym_flags, event);
  947. if (ret != TRACE_TYPE_HANDLED)
  948. return ret;
  949. }
  950. /* Strip ending newline */
  951. if (s->buffer[s->len - 1] == '\n') {
  952. s->buffer[s->len - 1] = '\0';
  953. s->len--;
  954. }
  955. ret = trace_seq_printf(s, " */\n");
  956. if (!ret)
  957. return TRACE_TYPE_PARTIAL_LINE;
  958. return TRACE_TYPE_HANDLED;
  959. }
  960. enum print_line_t
  961. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  962. {
  963. struct ftrace_graph_ent_entry *field;
  964. struct fgraph_data *data = iter->private;
  965. struct trace_entry *entry = iter->ent;
  966. struct trace_seq *s = &iter->seq;
  967. int cpu = iter->cpu;
  968. int ret;
  969. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  970. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  971. return TRACE_TYPE_HANDLED;
  972. }
  973. /*
  974. * If the last output failed, there's a possibility we need
  975. * to print out the missing entry which would never go out.
  976. */
  977. if (data && data->failed) {
  978. field = &data->ent;
  979. iter->cpu = data->cpu;
  980. ret = print_graph_entry(field, s, iter, flags);
  981. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  982. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  983. ret = TRACE_TYPE_NO_CONSUME;
  984. }
  985. iter->cpu = cpu;
  986. return ret;
  987. }
  988. switch (entry->type) {
  989. case TRACE_GRAPH_ENT: {
  990. /*
  991. * print_graph_entry() may consume the current event,
  992. * thus @field may become invalid, so we need to save it.
  993. * sizeof(struct ftrace_graph_ent_entry) is very small,
  994. * it can be safely saved at the stack.
  995. */
  996. struct ftrace_graph_ent_entry saved;
  997. trace_assign_type(field, entry);
  998. saved = *field;
  999. return print_graph_entry(&saved, s, iter, flags);
  1000. }
  1001. case TRACE_GRAPH_RET: {
  1002. struct ftrace_graph_ret_entry *field;
  1003. trace_assign_type(field, entry);
  1004. return print_graph_return(&field->ret, s, entry, iter, flags);
  1005. }
  1006. case TRACE_STACK:
  1007. case TRACE_FN:
  1008. /* dont trace stack and functions as comments */
  1009. return TRACE_TYPE_UNHANDLED;
  1010. default:
  1011. return print_graph_comment(s, entry, iter, flags);
  1012. }
  1013. return TRACE_TYPE_HANDLED;
  1014. }
  1015. static enum print_line_t
  1016. print_graph_function(struct trace_iterator *iter)
  1017. {
  1018. return print_graph_function_flags(iter, tracer_flags.val);
  1019. }
  1020. static enum print_line_t
  1021. print_graph_function_event(struct trace_iterator *iter, int flags,
  1022. struct trace_event *event)
  1023. {
  1024. return print_graph_function(iter);
  1025. }
  1026. static void print_lat_header(struct seq_file *s, u32 flags)
  1027. {
  1028. static const char spaces[] = " " /* 16 spaces */
  1029. " " /* 4 spaces */
  1030. " "; /* 17 spaces */
  1031. int size = 0;
  1032. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1033. size += 16;
  1034. if (flags & TRACE_GRAPH_PRINT_CPU)
  1035. size += 4;
  1036. if (flags & TRACE_GRAPH_PRINT_PROC)
  1037. size += 17;
  1038. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1039. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1040. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1041. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1042. seq_printf(s, "#%.*s||| / _-=> lock-depth \n", size, spaces);
  1043. seq_printf(s, "#%.*s|||| / \n", size, spaces);
  1044. }
  1045. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1046. {
  1047. int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
  1048. if (lat)
  1049. print_lat_header(s, flags);
  1050. /* 1st line */
  1051. seq_printf(s, "#");
  1052. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1053. seq_printf(s, " TIME ");
  1054. if (flags & TRACE_GRAPH_PRINT_CPU)
  1055. seq_printf(s, " CPU");
  1056. if (flags & TRACE_GRAPH_PRINT_PROC)
  1057. seq_printf(s, " TASK/PID ");
  1058. if (lat)
  1059. seq_printf(s, "|||||");
  1060. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1061. seq_printf(s, " DURATION ");
  1062. seq_printf(s, " FUNCTION CALLS\n");
  1063. /* 2nd line */
  1064. seq_printf(s, "#");
  1065. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1066. seq_printf(s, " | ");
  1067. if (flags & TRACE_GRAPH_PRINT_CPU)
  1068. seq_printf(s, " | ");
  1069. if (flags & TRACE_GRAPH_PRINT_PROC)
  1070. seq_printf(s, " | | ");
  1071. if (lat)
  1072. seq_printf(s, "|||||");
  1073. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1074. seq_printf(s, " | | ");
  1075. seq_printf(s, " | | | |\n");
  1076. }
  1077. void print_graph_headers(struct seq_file *s)
  1078. {
  1079. print_graph_headers_flags(s, tracer_flags.val);
  1080. }
  1081. void graph_trace_open(struct trace_iterator *iter)
  1082. {
  1083. /* pid and depth on the last trace processed */
  1084. struct fgraph_data *data;
  1085. int cpu;
  1086. iter->private = NULL;
  1087. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1088. if (!data)
  1089. goto out_err;
  1090. data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
  1091. if (!data->cpu_data)
  1092. goto out_err_free;
  1093. for_each_possible_cpu(cpu) {
  1094. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1095. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1096. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1097. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1098. *pid = -1;
  1099. *depth = 0;
  1100. *ignore = 0;
  1101. *depth_irq = -1;
  1102. }
  1103. iter->private = data;
  1104. return;
  1105. out_err_free:
  1106. kfree(data);
  1107. out_err:
  1108. pr_warning("function graph tracer: not enough memory\n");
  1109. }
  1110. void graph_trace_close(struct trace_iterator *iter)
  1111. {
  1112. struct fgraph_data *data = iter->private;
  1113. if (data) {
  1114. free_percpu(data->cpu_data);
  1115. kfree(data);
  1116. }
  1117. }
  1118. static struct trace_event_functions graph_functions = {
  1119. .trace = print_graph_function_event,
  1120. };
  1121. static struct trace_event graph_trace_entry_event = {
  1122. .type = TRACE_GRAPH_ENT,
  1123. .funcs = &graph_functions,
  1124. };
  1125. static struct trace_event graph_trace_ret_event = {
  1126. .type = TRACE_GRAPH_RET,
  1127. .funcs = &graph_functions
  1128. };
  1129. static struct tracer graph_trace __read_mostly = {
  1130. .name = "function_graph",
  1131. .open = graph_trace_open,
  1132. .pipe_open = graph_trace_open,
  1133. .close = graph_trace_close,
  1134. .pipe_close = graph_trace_close,
  1135. .wait_pipe = poll_wait_pipe,
  1136. .init = graph_trace_init,
  1137. .reset = graph_trace_reset,
  1138. .print_line = print_graph_function,
  1139. .print_header = print_graph_headers,
  1140. .flags = &tracer_flags,
  1141. #ifdef CONFIG_FTRACE_SELFTEST
  1142. .selftest = trace_selftest_startup_function_graph,
  1143. #endif
  1144. };
  1145. static __init int init_graph_trace(void)
  1146. {
  1147. max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
  1148. if (!register_ftrace_event(&graph_trace_entry_event)) {
  1149. pr_warning("Warning: could not register graph trace events\n");
  1150. return 1;
  1151. }
  1152. if (!register_ftrace_event(&graph_trace_ret_event)) {
  1153. pr_warning("Warning: could not register graph trace events\n");
  1154. return 1;
  1155. }
  1156. return register_tracer(&graph_trace);
  1157. }
  1158. device_initcall(init_graph_trace);