trace_functions_graph.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/slab.h>
  13. #include <linux/fs.h>
  14. #include "trace.h"
  15. #include "trace_output.h"
  16. /* When set, irq functions will be ignored */
  17. static int ftrace_graph_skip_irqs;
  18. struct fgraph_cpu_data {
  19. pid_t last_pid;
  20. int depth;
  21. int depth_irq;
  22. int ignore;
  23. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  24. };
  25. struct fgraph_data {
  26. struct fgraph_cpu_data __percpu *cpu_data;
  27. /* Place to preserve last processed entry. */
  28. struct ftrace_graph_ent_entry ent;
  29. struct ftrace_graph_ret_entry ret;
  30. int failed;
  31. int cpu;
  32. };
  33. #define TRACE_GRAPH_INDENT 2
  34. /* Flag options */
  35. #define TRACE_GRAPH_PRINT_OVERRUN 0x1
  36. #define TRACE_GRAPH_PRINT_CPU 0x2
  37. #define TRACE_GRAPH_PRINT_OVERHEAD 0x4
  38. #define TRACE_GRAPH_PRINT_PROC 0x8
  39. #define TRACE_GRAPH_PRINT_DURATION 0x10
  40. #define TRACE_GRAPH_PRINT_ABS_TIME 0x20
  41. #define TRACE_GRAPH_PRINT_IRQS 0x40
  42. static unsigned int max_depth;
  43. static struct tracer_opt trace_opts[] = {
  44. /* Display overruns? (for self-debug purpose) */
  45. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  46. /* Display CPU ? */
  47. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  48. /* Display Overhead ? */
  49. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  50. /* Display proc name/pid */
  51. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  52. /* Display duration of execution */
  53. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  54. /* Display absolute time of an entry */
  55. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  56. /* Display interrupts */
  57. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  58. { } /* Empty entry */
  59. };
  60. static struct tracer_flags tracer_flags = {
  61. /* Don't display overruns and proc by default */
  62. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  63. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
  64. .opts = trace_opts
  65. };
  66. static struct trace_array *graph_array;
  67. /*
  68. * DURATION column is being also used to display IRQ signs,
  69. * following values are used by print_graph_irq and others
  70. * to fill in space into DURATION column.
  71. */
  72. enum {
  73. DURATION_FILL_FULL = -1,
  74. DURATION_FILL_START = -2,
  75. DURATION_FILL_END = -3,
  76. };
  77. static enum print_line_t
  78. print_graph_duration(unsigned long long duration, struct trace_seq *s,
  79. u32 flags);
  80. /* Add a function return address to the trace stack on thread info.*/
  81. int
  82. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  83. unsigned long frame_pointer)
  84. {
  85. unsigned long long calltime;
  86. int index;
  87. if (!current->ret_stack)
  88. return -EBUSY;
  89. /*
  90. * We must make sure the ret_stack is tested before we read
  91. * anything else.
  92. */
  93. smp_rmb();
  94. /* The return trace stack is full */
  95. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  96. atomic_inc(&current->trace_overrun);
  97. return -EBUSY;
  98. }
  99. calltime = trace_clock_local();
  100. index = ++current->curr_ret_stack;
  101. barrier();
  102. current->ret_stack[index].ret = ret;
  103. current->ret_stack[index].func = func;
  104. current->ret_stack[index].calltime = calltime;
  105. current->ret_stack[index].subtime = 0;
  106. current->ret_stack[index].fp = frame_pointer;
  107. *depth = index;
  108. return 0;
  109. }
  110. /* Retrieve a function return address to the trace stack on thread info.*/
  111. static void
  112. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  113. unsigned long frame_pointer)
  114. {
  115. int index;
  116. index = current->curr_ret_stack;
  117. if (unlikely(index < 0)) {
  118. ftrace_graph_stop();
  119. WARN_ON(1);
  120. /* Might as well panic, otherwise we have no where to go */
  121. *ret = (unsigned long)panic;
  122. return;
  123. }
  124. #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
  125. /*
  126. * The arch may choose to record the frame pointer used
  127. * and check it here to make sure that it is what we expect it
  128. * to be. If gcc does not set the place holder of the return
  129. * address in the frame pointer, and does a copy instead, then
  130. * the function graph trace will fail. This test detects this
  131. * case.
  132. *
  133. * Currently, x86_32 with optimize for size (-Os) makes the latest
  134. * gcc do the above.
  135. *
  136. * Note, -mfentry does not use frame pointers, and this test
  137. * is not needed if CC_USING_FENTRY is set.
  138. */
  139. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  140. ftrace_graph_stop();
  141. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  142. " from func %ps return to %lx\n",
  143. current->ret_stack[index].fp,
  144. frame_pointer,
  145. (void *)current->ret_stack[index].func,
  146. current->ret_stack[index].ret);
  147. *ret = (unsigned long)panic;
  148. return;
  149. }
  150. #endif
  151. *ret = current->ret_stack[index].ret;
  152. trace->func = current->ret_stack[index].func;
  153. trace->calltime = current->ret_stack[index].calltime;
  154. trace->overrun = atomic_read(&current->trace_overrun);
  155. trace->depth = index;
  156. }
  157. /*
  158. * Send the trace to the ring-buffer.
  159. * @return the original return address.
  160. */
  161. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  162. {
  163. struct ftrace_graph_ret trace;
  164. unsigned long ret;
  165. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  166. trace.rettime = trace_clock_local();
  167. barrier();
  168. current->curr_ret_stack--;
  169. /*
  170. * The trace should run after decrementing the ret counter
  171. * in case an interrupt were to come in. We don't want to
  172. * lose the interrupt if max_depth is set.
  173. */
  174. ftrace_graph_return(&trace);
  175. if (unlikely(!ret)) {
  176. ftrace_graph_stop();
  177. WARN_ON(1);
  178. /* Might as well panic. What else to do? */
  179. ret = (unsigned long)panic;
  180. }
  181. return ret;
  182. }
  183. int __trace_graph_entry(struct trace_array *tr,
  184. struct ftrace_graph_ent *trace,
  185. unsigned long flags,
  186. int pc)
  187. {
  188. struct ftrace_event_call *call = &event_funcgraph_entry;
  189. struct ring_buffer_event *event;
  190. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  191. struct ftrace_graph_ent_entry *entry;
  192. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  193. return 0;
  194. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  195. sizeof(*entry), flags, pc);
  196. if (!event)
  197. return 0;
  198. entry = ring_buffer_event_data(event);
  199. entry->graph_ent = *trace;
  200. if (!filter_current_check_discard(buffer, call, entry, event))
  201. __buffer_unlock_commit(buffer, event);
  202. return 1;
  203. }
  204. static inline int ftrace_graph_ignore_irqs(void)
  205. {
  206. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  207. return 0;
  208. return in_irq();
  209. }
  210. int trace_graph_entry(struct ftrace_graph_ent *trace)
  211. {
  212. struct trace_array *tr = graph_array;
  213. struct trace_array_cpu *data;
  214. unsigned long flags;
  215. long disabled;
  216. int ret;
  217. int cpu;
  218. int pc;
  219. if (!ftrace_trace_task(current))
  220. return 0;
  221. /* trace it when it is-nested-in or is a function enabled. */
  222. if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
  223. ftrace_graph_ignore_irqs()) ||
  224. (max_depth && trace->depth >= max_depth))
  225. return 0;
  226. local_irq_save(flags);
  227. cpu = raw_smp_processor_id();
  228. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  229. disabled = atomic_inc_return(&data->disabled);
  230. if (likely(disabled == 1)) {
  231. pc = preempt_count();
  232. ret = __trace_graph_entry(tr, trace, flags, pc);
  233. } else {
  234. ret = 0;
  235. }
  236. atomic_dec(&data->disabled);
  237. local_irq_restore(flags);
  238. return ret;
  239. }
  240. int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
  241. {
  242. if (tracing_thresh)
  243. return 1;
  244. else
  245. return trace_graph_entry(trace);
  246. }
  247. static void
  248. __trace_graph_function(struct trace_array *tr,
  249. unsigned long ip, unsigned long flags, int pc)
  250. {
  251. u64 time = trace_clock_local();
  252. struct ftrace_graph_ent ent = {
  253. .func = ip,
  254. .depth = 0,
  255. };
  256. struct ftrace_graph_ret ret = {
  257. .func = ip,
  258. .depth = 0,
  259. .calltime = time,
  260. .rettime = time,
  261. };
  262. __trace_graph_entry(tr, &ent, flags, pc);
  263. __trace_graph_return(tr, &ret, flags, pc);
  264. }
  265. void
  266. trace_graph_function(struct trace_array *tr,
  267. unsigned long ip, unsigned long parent_ip,
  268. unsigned long flags, int pc)
  269. {
  270. __trace_graph_function(tr, ip, flags, pc);
  271. }
  272. void __trace_graph_return(struct trace_array *tr,
  273. struct ftrace_graph_ret *trace,
  274. unsigned long flags,
  275. int pc)
  276. {
  277. struct ftrace_event_call *call = &event_funcgraph_exit;
  278. struct ring_buffer_event *event;
  279. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  280. struct ftrace_graph_ret_entry *entry;
  281. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  282. return;
  283. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  284. sizeof(*entry), flags, pc);
  285. if (!event)
  286. return;
  287. entry = ring_buffer_event_data(event);
  288. entry->ret = *trace;
  289. if (!filter_current_check_discard(buffer, call, entry, event))
  290. __buffer_unlock_commit(buffer, event);
  291. }
  292. void trace_graph_return(struct ftrace_graph_ret *trace)
  293. {
  294. struct trace_array *tr = graph_array;
  295. struct trace_array_cpu *data;
  296. unsigned long flags;
  297. long disabled;
  298. int cpu;
  299. int pc;
  300. local_irq_save(flags);
  301. cpu = raw_smp_processor_id();
  302. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  303. disabled = atomic_inc_return(&data->disabled);
  304. if (likely(disabled == 1)) {
  305. pc = preempt_count();
  306. __trace_graph_return(tr, trace, flags, pc);
  307. }
  308. atomic_dec(&data->disabled);
  309. local_irq_restore(flags);
  310. }
  311. void set_graph_array(struct trace_array *tr)
  312. {
  313. graph_array = tr;
  314. /* Make graph_array visible before we start tracing */
  315. smp_mb();
  316. }
  317. void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  318. {
  319. if (tracing_thresh &&
  320. (trace->rettime - trace->calltime < tracing_thresh))
  321. return;
  322. else
  323. trace_graph_return(trace);
  324. }
  325. static int graph_trace_init(struct trace_array *tr)
  326. {
  327. int ret;
  328. set_graph_array(tr);
  329. if (tracing_thresh)
  330. ret = register_ftrace_graph(&trace_graph_thresh_return,
  331. &trace_graph_thresh_entry);
  332. else
  333. ret = register_ftrace_graph(&trace_graph_return,
  334. &trace_graph_entry);
  335. if (ret)
  336. return ret;
  337. tracing_start_cmdline_record();
  338. return 0;
  339. }
  340. static void graph_trace_reset(struct trace_array *tr)
  341. {
  342. tracing_stop_cmdline_record();
  343. unregister_ftrace_graph();
  344. }
  345. static int max_bytes_for_cpu;
  346. static enum print_line_t
  347. print_graph_cpu(struct trace_seq *s, int cpu)
  348. {
  349. int ret;
  350. /*
  351. * Start with a space character - to make it stand out
  352. * to the right a bit when trace output is pasted into
  353. * email:
  354. */
  355. ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  356. if (!ret)
  357. return TRACE_TYPE_PARTIAL_LINE;
  358. return TRACE_TYPE_HANDLED;
  359. }
  360. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  361. static enum print_line_t
  362. print_graph_proc(struct trace_seq *s, pid_t pid)
  363. {
  364. char comm[TASK_COMM_LEN];
  365. /* sign + log10(MAX_INT) + '\0' */
  366. char pid_str[11];
  367. int spaces = 0;
  368. int ret;
  369. int len;
  370. int i;
  371. trace_find_cmdline(pid, comm);
  372. comm[7] = '\0';
  373. sprintf(pid_str, "%d", pid);
  374. /* 1 stands for the "-" character */
  375. len = strlen(comm) + strlen(pid_str) + 1;
  376. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  377. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  378. /* First spaces to align center */
  379. for (i = 0; i < spaces / 2; i++) {
  380. ret = trace_seq_putc(s, ' ');
  381. if (!ret)
  382. return TRACE_TYPE_PARTIAL_LINE;
  383. }
  384. ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
  385. if (!ret)
  386. return TRACE_TYPE_PARTIAL_LINE;
  387. /* Last spaces to align center */
  388. for (i = 0; i < spaces - (spaces / 2); i++) {
  389. ret = trace_seq_putc(s, ' ');
  390. if (!ret)
  391. return TRACE_TYPE_PARTIAL_LINE;
  392. }
  393. return TRACE_TYPE_HANDLED;
  394. }
  395. static enum print_line_t
  396. print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  397. {
  398. if (!trace_seq_putc(s, ' '))
  399. return 0;
  400. return trace_print_lat_fmt(s, entry);
  401. }
  402. /* If the pid changed since the last trace, output this event */
  403. static enum print_line_t
  404. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  405. {
  406. pid_t prev_pid;
  407. pid_t *last_pid;
  408. int ret;
  409. if (!data)
  410. return TRACE_TYPE_HANDLED;
  411. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  412. if (*last_pid == pid)
  413. return TRACE_TYPE_HANDLED;
  414. prev_pid = *last_pid;
  415. *last_pid = pid;
  416. if (prev_pid == -1)
  417. return TRACE_TYPE_HANDLED;
  418. /*
  419. * Context-switch trace line:
  420. ------------------------------------------
  421. | 1) migration/0--1 => sshd-1755
  422. ------------------------------------------
  423. */
  424. ret = trace_seq_puts(s,
  425. " ------------------------------------------\n");
  426. if (!ret)
  427. return TRACE_TYPE_PARTIAL_LINE;
  428. ret = print_graph_cpu(s, cpu);
  429. if (ret == TRACE_TYPE_PARTIAL_LINE)
  430. return TRACE_TYPE_PARTIAL_LINE;
  431. ret = print_graph_proc(s, prev_pid);
  432. if (ret == TRACE_TYPE_PARTIAL_LINE)
  433. return TRACE_TYPE_PARTIAL_LINE;
  434. ret = trace_seq_puts(s, " => ");
  435. if (!ret)
  436. return TRACE_TYPE_PARTIAL_LINE;
  437. ret = print_graph_proc(s, pid);
  438. if (ret == TRACE_TYPE_PARTIAL_LINE)
  439. return TRACE_TYPE_PARTIAL_LINE;
  440. ret = trace_seq_puts(s,
  441. "\n ------------------------------------------\n\n");
  442. if (!ret)
  443. return TRACE_TYPE_PARTIAL_LINE;
  444. return TRACE_TYPE_HANDLED;
  445. }
  446. static struct ftrace_graph_ret_entry *
  447. get_return_for_leaf(struct trace_iterator *iter,
  448. struct ftrace_graph_ent_entry *curr)
  449. {
  450. struct fgraph_data *data = iter->private;
  451. struct ring_buffer_iter *ring_iter = NULL;
  452. struct ring_buffer_event *event;
  453. struct ftrace_graph_ret_entry *next;
  454. /*
  455. * If the previous output failed to write to the seq buffer,
  456. * then we just reuse the data from before.
  457. */
  458. if (data && data->failed) {
  459. curr = &data->ent;
  460. next = &data->ret;
  461. } else {
  462. ring_iter = trace_buffer_iter(iter, iter->cpu);
  463. /* First peek to compare current entry and the next one */
  464. if (ring_iter)
  465. event = ring_buffer_iter_peek(ring_iter, NULL);
  466. else {
  467. /*
  468. * We need to consume the current entry to see
  469. * the next one.
  470. */
  471. ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
  472. NULL, NULL);
  473. event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
  474. NULL, NULL);
  475. }
  476. if (!event)
  477. return NULL;
  478. next = ring_buffer_event_data(event);
  479. if (data) {
  480. /*
  481. * Save current and next entries for later reference
  482. * if the output fails.
  483. */
  484. data->ent = *curr;
  485. /*
  486. * If the next event is not a return type, then
  487. * we only care about what type it is. Otherwise we can
  488. * safely copy the entire event.
  489. */
  490. if (next->ent.type == TRACE_GRAPH_RET)
  491. data->ret = *next;
  492. else
  493. data->ret.ent.type = next->ent.type;
  494. }
  495. }
  496. if (next->ent.type != TRACE_GRAPH_RET)
  497. return NULL;
  498. if (curr->ent.pid != next->ent.pid ||
  499. curr->graph_ent.func != next->ret.func)
  500. return NULL;
  501. /* this is a leaf, now advance the iterator */
  502. if (ring_iter)
  503. ring_buffer_read(ring_iter, NULL);
  504. return next;
  505. }
  506. static int print_graph_abs_time(u64 t, struct trace_seq *s)
  507. {
  508. unsigned long usecs_rem;
  509. usecs_rem = do_div(t, NSEC_PER_SEC);
  510. usecs_rem /= 1000;
  511. return trace_seq_printf(s, "%5lu.%06lu | ",
  512. (unsigned long)t, usecs_rem);
  513. }
  514. static enum print_line_t
  515. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  516. enum trace_type type, int cpu, pid_t pid, u32 flags)
  517. {
  518. int ret;
  519. struct trace_seq *s = &iter->seq;
  520. if (addr < (unsigned long)__irqentry_text_start ||
  521. addr >= (unsigned long)__irqentry_text_end)
  522. return TRACE_TYPE_UNHANDLED;
  523. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  524. /* Absolute time */
  525. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  526. ret = print_graph_abs_time(iter->ts, s);
  527. if (!ret)
  528. return TRACE_TYPE_PARTIAL_LINE;
  529. }
  530. /* Cpu */
  531. if (flags & TRACE_GRAPH_PRINT_CPU) {
  532. ret = print_graph_cpu(s, cpu);
  533. if (ret == TRACE_TYPE_PARTIAL_LINE)
  534. return TRACE_TYPE_PARTIAL_LINE;
  535. }
  536. /* Proc */
  537. if (flags & TRACE_GRAPH_PRINT_PROC) {
  538. ret = print_graph_proc(s, pid);
  539. if (ret == TRACE_TYPE_PARTIAL_LINE)
  540. return TRACE_TYPE_PARTIAL_LINE;
  541. ret = trace_seq_puts(s, " | ");
  542. if (!ret)
  543. return TRACE_TYPE_PARTIAL_LINE;
  544. }
  545. }
  546. /* No overhead */
  547. ret = print_graph_duration(DURATION_FILL_START, s, flags);
  548. if (ret != TRACE_TYPE_HANDLED)
  549. return ret;
  550. if (type == TRACE_GRAPH_ENT)
  551. ret = trace_seq_puts(s, "==========>");
  552. else
  553. ret = trace_seq_puts(s, "<==========");
  554. if (!ret)
  555. return TRACE_TYPE_PARTIAL_LINE;
  556. ret = print_graph_duration(DURATION_FILL_END, s, flags);
  557. if (ret != TRACE_TYPE_HANDLED)
  558. return ret;
  559. ret = trace_seq_putc(s, '\n');
  560. if (!ret)
  561. return TRACE_TYPE_PARTIAL_LINE;
  562. return TRACE_TYPE_HANDLED;
  563. }
  564. enum print_line_t
  565. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  566. {
  567. unsigned long nsecs_rem = do_div(duration, 1000);
  568. /* log10(ULONG_MAX) + '\0' */
  569. char msecs_str[21];
  570. char nsecs_str[5];
  571. int ret, len;
  572. int i;
  573. sprintf(msecs_str, "%lu", (unsigned long) duration);
  574. /* Print msecs */
  575. ret = trace_seq_printf(s, "%s", msecs_str);
  576. if (!ret)
  577. return TRACE_TYPE_PARTIAL_LINE;
  578. len = strlen(msecs_str);
  579. /* Print nsecs (we don't want to exceed 7 numbers) */
  580. if (len < 7) {
  581. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  582. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  583. ret = trace_seq_printf(s, ".%s", nsecs_str);
  584. if (!ret)
  585. return TRACE_TYPE_PARTIAL_LINE;
  586. len += strlen(nsecs_str);
  587. }
  588. ret = trace_seq_puts(s, " us ");
  589. if (!ret)
  590. return TRACE_TYPE_PARTIAL_LINE;
  591. /* Print remaining spaces to fit the row's width */
  592. for (i = len; i < 7; i++) {
  593. ret = trace_seq_putc(s, ' ');
  594. if (!ret)
  595. return TRACE_TYPE_PARTIAL_LINE;
  596. }
  597. return TRACE_TYPE_HANDLED;
  598. }
  599. static enum print_line_t
  600. print_graph_duration(unsigned long long duration, struct trace_seq *s,
  601. u32 flags)
  602. {
  603. int ret = -1;
  604. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  605. !(trace_flags & TRACE_ITER_CONTEXT_INFO))
  606. return TRACE_TYPE_HANDLED;
  607. /* No real adata, just filling the column with spaces */
  608. switch (duration) {
  609. case DURATION_FILL_FULL:
  610. ret = trace_seq_puts(s, " | ");
  611. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  612. case DURATION_FILL_START:
  613. ret = trace_seq_puts(s, " ");
  614. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  615. case DURATION_FILL_END:
  616. ret = trace_seq_puts(s, " |");
  617. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  618. }
  619. /* Signal a overhead of time execution to the output */
  620. if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
  621. /* Duration exceeded 100 msecs */
  622. if (duration > 100000ULL)
  623. ret = trace_seq_puts(s, "! ");
  624. /* Duration exceeded 10 msecs */
  625. else if (duration > 10000ULL)
  626. ret = trace_seq_puts(s, "+ ");
  627. }
  628. /*
  629. * The -1 means we either did not exceed the duration tresholds
  630. * or we dont want to print out the overhead. Either way we need
  631. * to fill out the space.
  632. */
  633. if (ret == -1)
  634. ret = trace_seq_puts(s, " ");
  635. /* Catching here any failure happenned above */
  636. if (!ret)
  637. return TRACE_TYPE_PARTIAL_LINE;
  638. ret = trace_print_graph_duration(duration, s);
  639. if (ret != TRACE_TYPE_HANDLED)
  640. return ret;
  641. ret = trace_seq_puts(s, "| ");
  642. if (!ret)
  643. return TRACE_TYPE_PARTIAL_LINE;
  644. return TRACE_TYPE_HANDLED;
  645. }
  646. /* Case of a leaf function on its call entry */
  647. static enum print_line_t
  648. print_graph_entry_leaf(struct trace_iterator *iter,
  649. struct ftrace_graph_ent_entry *entry,
  650. struct ftrace_graph_ret_entry *ret_entry,
  651. struct trace_seq *s, u32 flags)
  652. {
  653. struct fgraph_data *data = iter->private;
  654. struct ftrace_graph_ret *graph_ret;
  655. struct ftrace_graph_ent *call;
  656. unsigned long long duration;
  657. int ret;
  658. int i;
  659. graph_ret = &ret_entry->ret;
  660. call = &entry->graph_ent;
  661. duration = graph_ret->rettime - graph_ret->calltime;
  662. if (data) {
  663. struct fgraph_cpu_data *cpu_data;
  664. int cpu = iter->cpu;
  665. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  666. /*
  667. * Comments display at + 1 to depth. Since
  668. * this is a leaf function, keep the comments
  669. * equal to this depth.
  670. */
  671. cpu_data->depth = call->depth - 1;
  672. /* No need to keep this function around for this depth */
  673. if (call->depth < FTRACE_RETFUNC_DEPTH)
  674. cpu_data->enter_funcs[call->depth] = 0;
  675. }
  676. /* Overhead and duration */
  677. ret = print_graph_duration(duration, s, flags);
  678. if (ret == TRACE_TYPE_PARTIAL_LINE)
  679. return TRACE_TYPE_PARTIAL_LINE;
  680. /* Function */
  681. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  682. ret = trace_seq_putc(s, ' ');
  683. if (!ret)
  684. return TRACE_TYPE_PARTIAL_LINE;
  685. }
  686. ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
  687. if (!ret)
  688. return TRACE_TYPE_PARTIAL_LINE;
  689. return TRACE_TYPE_HANDLED;
  690. }
  691. static enum print_line_t
  692. print_graph_entry_nested(struct trace_iterator *iter,
  693. struct ftrace_graph_ent_entry *entry,
  694. struct trace_seq *s, int cpu, u32 flags)
  695. {
  696. struct ftrace_graph_ent *call = &entry->graph_ent;
  697. struct fgraph_data *data = iter->private;
  698. int ret;
  699. int i;
  700. if (data) {
  701. struct fgraph_cpu_data *cpu_data;
  702. int cpu = iter->cpu;
  703. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  704. cpu_data->depth = call->depth;
  705. /* Save this function pointer to see if the exit matches */
  706. if (call->depth < FTRACE_RETFUNC_DEPTH)
  707. cpu_data->enter_funcs[call->depth] = call->func;
  708. }
  709. /* No time */
  710. ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
  711. if (ret != TRACE_TYPE_HANDLED)
  712. return ret;
  713. /* Function */
  714. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  715. ret = trace_seq_putc(s, ' ');
  716. if (!ret)
  717. return TRACE_TYPE_PARTIAL_LINE;
  718. }
  719. ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  720. if (!ret)
  721. return TRACE_TYPE_PARTIAL_LINE;
  722. /*
  723. * we already consumed the current entry to check the next one
  724. * and see if this is a leaf.
  725. */
  726. return TRACE_TYPE_NO_CONSUME;
  727. }
  728. static enum print_line_t
  729. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  730. int type, unsigned long addr, u32 flags)
  731. {
  732. struct fgraph_data *data = iter->private;
  733. struct trace_entry *ent = iter->ent;
  734. int cpu = iter->cpu;
  735. int ret;
  736. /* Pid */
  737. if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
  738. return TRACE_TYPE_PARTIAL_LINE;
  739. if (type) {
  740. /* Interrupt */
  741. ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  742. if (ret == TRACE_TYPE_PARTIAL_LINE)
  743. return TRACE_TYPE_PARTIAL_LINE;
  744. }
  745. if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
  746. return 0;
  747. /* Absolute time */
  748. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  749. ret = print_graph_abs_time(iter->ts, s);
  750. if (!ret)
  751. return TRACE_TYPE_PARTIAL_LINE;
  752. }
  753. /* Cpu */
  754. if (flags & TRACE_GRAPH_PRINT_CPU) {
  755. ret = print_graph_cpu(s, cpu);
  756. if (ret == TRACE_TYPE_PARTIAL_LINE)
  757. return TRACE_TYPE_PARTIAL_LINE;
  758. }
  759. /* Proc */
  760. if (flags & TRACE_GRAPH_PRINT_PROC) {
  761. ret = print_graph_proc(s, ent->pid);
  762. if (ret == TRACE_TYPE_PARTIAL_LINE)
  763. return TRACE_TYPE_PARTIAL_LINE;
  764. ret = trace_seq_puts(s, " | ");
  765. if (!ret)
  766. return TRACE_TYPE_PARTIAL_LINE;
  767. }
  768. /* Latency format */
  769. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  770. ret = print_graph_lat_fmt(s, ent);
  771. if (ret == TRACE_TYPE_PARTIAL_LINE)
  772. return TRACE_TYPE_PARTIAL_LINE;
  773. }
  774. return 0;
  775. }
  776. /*
  777. * Entry check for irq code
  778. *
  779. * returns 1 if
  780. * - we are inside irq code
  781. * - we just entered irq code
  782. *
  783. * retunns 0 if
  784. * - funcgraph-interrupts option is set
  785. * - we are not inside irq code
  786. */
  787. static int
  788. check_irq_entry(struct trace_iterator *iter, u32 flags,
  789. unsigned long addr, int depth)
  790. {
  791. int cpu = iter->cpu;
  792. int *depth_irq;
  793. struct fgraph_data *data = iter->private;
  794. /*
  795. * If we are either displaying irqs, or we got called as
  796. * a graph event and private data does not exist,
  797. * then we bypass the irq check.
  798. */
  799. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  800. (!data))
  801. return 0;
  802. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  803. /*
  804. * We are inside the irq code
  805. */
  806. if (*depth_irq >= 0)
  807. return 1;
  808. if ((addr < (unsigned long)__irqentry_text_start) ||
  809. (addr >= (unsigned long)__irqentry_text_end))
  810. return 0;
  811. /*
  812. * We are entering irq code.
  813. */
  814. *depth_irq = depth;
  815. return 1;
  816. }
  817. /*
  818. * Return check for irq code
  819. *
  820. * returns 1 if
  821. * - we are inside irq code
  822. * - we just left irq code
  823. *
  824. * returns 0 if
  825. * - funcgraph-interrupts option is set
  826. * - we are not inside irq code
  827. */
  828. static int
  829. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  830. {
  831. int cpu = iter->cpu;
  832. int *depth_irq;
  833. struct fgraph_data *data = iter->private;
  834. /*
  835. * If we are either displaying irqs, or we got called as
  836. * a graph event and private data does not exist,
  837. * then we bypass the irq check.
  838. */
  839. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  840. (!data))
  841. return 0;
  842. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  843. /*
  844. * We are not inside the irq code.
  845. */
  846. if (*depth_irq == -1)
  847. return 0;
  848. /*
  849. * We are inside the irq code, and this is returning entry.
  850. * Let's not trace it and clear the entry depth, since
  851. * we are out of irq code.
  852. *
  853. * This condition ensures that we 'leave the irq code' once
  854. * we are out of the entry depth. Thus protecting us from
  855. * the RETURN entry loss.
  856. */
  857. if (*depth_irq >= depth) {
  858. *depth_irq = -1;
  859. return 1;
  860. }
  861. /*
  862. * We are inside the irq code, and this is not the entry.
  863. */
  864. return 1;
  865. }
  866. static enum print_line_t
  867. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  868. struct trace_iterator *iter, u32 flags)
  869. {
  870. struct fgraph_data *data = iter->private;
  871. struct ftrace_graph_ent *call = &field->graph_ent;
  872. struct ftrace_graph_ret_entry *leaf_ret;
  873. static enum print_line_t ret;
  874. int cpu = iter->cpu;
  875. if (check_irq_entry(iter, flags, call->func, call->depth))
  876. return TRACE_TYPE_HANDLED;
  877. if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
  878. return TRACE_TYPE_PARTIAL_LINE;
  879. leaf_ret = get_return_for_leaf(iter, field);
  880. if (leaf_ret)
  881. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  882. else
  883. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  884. if (data) {
  885. /*
  886. * If we failed to write our output, then we need to make
  887. * note of it. Because we already consumed our entry.
  888. */
  889. if (s->full) {
  890. data->failed = 1;
  891. data->cpu = cpu;
  892. } else
  893. data->failed = 0;
  894. }
  895. return ret;
  896. }
  897. static enum print_line_t
  898. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  899. struct trace_entry *ent, struct trace_iterator *iter,
  900. u32 flags)
  901. {
  902. unsigned long long duration = trace->rettime - trace->calltime;
  903. struct fgraph_data *data = iter->private;
  904. pid_t pid = ent->pid;
  905. int cpu = iter->cpu;
  906. int func_match = 1;
  907. int ret;
  908. int i;
  909. if (check_irq_return(iter, flags, trace->depth))
  910. return TRACE_TYPE_HANDLED;
  911. if (data) {
  912. struct fgraph_cpu_data *cpu_data;
  913. int cpu = iter->cpu;
  914. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  915. /*
  916. * Comments display at + 1 to depth. This is the
  917. * return from a function, we now want the comments
  918. * to display at the same level of the bracket.
  919. */
  920. cpu_data->depth = trace->depth - 1;
  921. if (trace->depth < FTRACE_RETFUNC_DEPTH) {
  922. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  923. func_match = 0;
  924. cpu_data->enter_funcs[trace->depth] = 0;
  925. }
  926. }
  927. if (print_graph_prologue(iter, s, 0, 0, flags))
  928. return TRACE_TYPE_PARTIAL_LINE;
  929. /* Overhead and duration */
  930. ret = print_graph_duration(duration, s, flags);
  931. if (ret == TRACE_TYPE_PARTIAL_LINE)
  932. return TRACE_TYPE_PARTIAL_LINE;
  933. /* Closing brace */
  934. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  935. ret = trace_seq_putc(s, ' ');
  936. if (!ret)
  937. return TRACE_TYPE_PARTIAL_LINE;
  938. }
  939. /*
  940. * If the return function does not have a matching entry,
  941. * then the entry was lost. Instead of just printing
  942. * the '}' and letting the user guess what function this
  943. * belongs to, write out the function name.
  944. */
  945. if (func_match) {
  946. ret = trace_seq_puts(s, "}\n");
  947. if (!ret)
  948. return TRACE_TYPE_PARTIAL_LINE;
  949. } else {
  950. ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  951. if (!ret)
  952. return TRACE_TYPE_PARTIAL_LINE;
  953. }
  954. /* Overrun */
  955. if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
  956. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  957. trace->overrun);
  958. if (!ret)
  959. return TRACE_TYPE_PARTIAL_LINE;
  960. }
  961. ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  962. cpu, pid, flags);
  963. if (ret == TRACE_TYPE_PARTIAL_LINE)
  964. return TRACE_TYPE_PARTIAL_LINE;
  965. return TRACE_TYPE_HANDLED;
  966. }
  967. static enum print_line_t
  968. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  969. struct trace_iterator *iter, u32 flags)
  970. {
  971. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  972. struct fgraph_data *data = iter->private;
  973. struct trace_event *event;
  974. int depth = 0;
  975. int ret;
  976. int i;
  977. if (data)
  978. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  979. if (print_graph_prologue(iter, s, 0, 0, flags))
  980. return TRACE_TYPE_PARTIAL_LINE;
  981. /* No time */
  982. ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
  983. if (ret != TRACE_TYPE_HANDLED)
  984. return ret;
  985. /* Indentation */
  986. if (depth > 0)
  987. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
  988. ret = trace_seq_putc(s, ' ');
  989. if (!ret)
  990. return TRACE_TYPE_PARTIAL_LINE;
  991. }
  992. /* The comment */
  993. ret = trace_seq_puts(s, "/* ");
  994. if (!ret)
  995. return TRACE_TYPE_PARTIAL_LINE;
  996. switch (iter->ent->type) {
  997. case TRACE_BPRINT:
  998. ret = trace_print_bprintk_msg_only(iter);
  999. if (ret != TRACE_TYPE_HANDLED)
  1000. return ret;
  1001. break;
  1002. case TRACE_PRINT:
  1003. ret = trace_print_printk_msg_only(iter);
  1004. if (ret != TRACE_TYPE_HANDLED)
  1005. return ret;
  1006. break;
  1007. default:
  1008. event = ftrace_find_event(ent->type);
  1009. if (!event)
  1010. return TRACE_TYPE_UNHANDLED;
  1011. ret = event->funcs->trace(iter, sym_flags, event);
  1012. if (ret != TRACE_TYPE_HANDLED)
  1013. return ret;
  1014. }
  1015. /* Strip ending newline */
  1016. if (s->buffer[s->len - 1] == '\n') {
  1017. s->buffer[s->len - 1] = '\0';
  1018. s->len--;
  1019. }
  1020. ret = trace_seq_puts(s, " */\n");
  1021. if (!ret)
  1022. return TRACE_TYPE_PARTIAL_LINE;
  1023. return TRACE_TYPE_HANDLED;
  1024. }
  1025. enum print_line_t
  1026. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  1027. {
  1028. struct ftrace_graph_ent_entry *field;
  1029. struct fgraph_data *data = iter->private;
  1030. struct trace_entry *entry = iter->ent;
  1031. struct trace_seq *s = &iter->seq;
  1032. int cpu = iter->cpu;
  1033. int ret;
  1034. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  1035. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  1036. return TRACE_TYPE_HANDLED;
  1037. }
  1038. /*
  1039. * If the last output failed, there's a possibility we need
  1040. * to print out the missing entry which would never go out.
  1041. */
  1042. if (data && data->failed) {
  1043. field = &data->ent;
  1044. iter->cpu = data->cpu;
  1045. ret = print_graph_entry(field, s, iter, flags);
  1046. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1047. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1048. ret = TRACE_TYPE_NO_CONSUME;
  1049. }
  1050. iter->cpu = cpu;
  1051. return ret;
  1052. }
  1053. switch (entry->type) {
  1054. case TRACE_GRAPH_ENT: {
  1055. /*
  1056. * print_graph_entry() may consume the current event,
  1057. * thus @field may become invalid, so we need to save it.
  1058. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1059. * it can be safely saved at the stack.
  1060. */
  1061. struct ftrace_graph_ent_entry saved;
  1062. trace_assign_type(field, entry);
  1063. saved = *field;
  1064. return print_graph_entry(&saved, s, iter, flags);
  1065. }
  1066. case TRACE_GRAPH_RET: {
  1067. struct ftrace_graph_ret_entry *field;
  1068. trace_assign_type(field, entry);
  1069. return print_graph_return(&field->ret, s, entry, iter, flags);
  1070. }
  1071. case TRACE_STACK:
  1072. case TRACE_FN:
  1073. /* dont trace stack and functions as comments */
  1074. return TRACE_TYPE_UNHANDLED;
  1075. default:
  1076. return print_graph_comment(s, entry, iter, flags);
  1077. }
  1078. return TRACE_TYPE_HANDLED;
  1079. }
  1080. static enum print_line_t
  1081. print_graph_function(struct trace_iterator *iter)
  1082. {
  1083. return print_graph_function_flags(iter, tracer_flags.val);
  1084. }
  1085. static enum print_line_t
  1086. print_graph_function_event(struct trace_iterator *iter, int flags,
  1087. struct trace_event *event)
  1088. {
  1089. return print_graph_function(iter);
  1090. }
  1091. static void print_lat_header(struct seq_file *s, u32 flags)
  1092. {
  1093. static const char spaces[] = " " /* 16 spaces */
  1094. " " /* 4 spaces */
  1095. " "; /* 17 spaces */
  1096. int size = 0;
  1097. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1098. size += 16;
  1099. if (flags & TRACE_GRAPH_PRINT_CPU)
  1100. size += 4;
  1101. if (flags & TRACE_GRAPH_PRINT_PROC)
  1102. size += 17;
  1103. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1104. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1105. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1106. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1107. seq_printf(s, "#%.*s||| / \n", size, spaces);
  1108. }
  1109. static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
  1110. {
  1111. int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
  1112. if (lat)
  1113. print_lat_header(s, flags);
  1114. /* 1st line */
  1115. seq_printf(s, "#");
  1116. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1117. seq_printf(s, " TIME ");
  1118. if (flags & TRACE_GRAPH_PRINT_CPU)
  1119. seq_printf(s, " CPU");
  1120. if (flags & TRACE_GRAPH_PRINT_PROC)
  1121. seq_printf(s, " TASK/PID ");
  1122. if (lat)
  1123. seq_printf(s, "||||");
  1124. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1125. seq_printf(s, " DURATION ");
  1126. seq_printf(s, " FUNCTION CALLS\n");
  1127. /* 2nd line */
  1128. seq_printf(s, "#");
  1129. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1130. seq_printf(s, " | ");
  1131. if (flags & TRACE_GRAPH_PRINT_CPU)
  1132. seq_printf(s, " | ");
  1133. if (flags & TRACE_GRAPH_PRINT_PROC)
  1134. seq_printf(s, " | | ");
  1135. if (lat)
  1136. seq_printf(s, "||||");
  1137. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1138. seq_printf(s, " | | ");
  1139. seq_printf(s, " | | | |\n");
  1140. }
  1141. void print_graph_headers(struct seq_file *s)
  1142. {
  1143. print_graph_headers_flags(s, tracer_flags.val);
  1144. }
  1145. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1146. {
  1147. struct trace_iterator *iter = s->private;
  1148. if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
  1149. return;
  1150. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  1151. /* print nothing if the buffers are empty */
  1152. if (trace_empty(iter))
  1153. return;
  1154. print_trace_header(s, iter);
  1155. }
  1156. __print_graph_headers_flags(s, flags);
  1157. }
  1158. void graph_trace_open(struct trace_iterator *iter)
  1159. {
  1160. /* pid and depth on the last trace processed */
  1161. struct fgraph_data *data;
  1162. int cpu;
  1163. iter->private = NULL;
  1164. data = kzalloc(sizeof(*data), GFP_KERNEL);
  1165. if (!data)
  1166. goto out_err;
  1167. data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
  1168. if (!data->cpu_data)
  1169. goto out_err_free;
  1170. for_each_possible_cpu(cpu) {
  1171. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1172. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1173. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1174. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1175. *pid = -1;
  1176. *depth = 0;
  1177. *ignore = 0;
  1178. *depth_irq = -1;
  1179. }
  1180. iter->private = data;
  1181. return;
  1182. out_err_free:
  1183. kfree(data);
  1184. out_err:
  1185. pr_warning("function graph tracer: not enough memory\n");
  1186. }
  1187. void graph_trace_close(struct trace_iterator *iter)
  1188. {
  1189. struct fgraph_data *data = iter->private;
  1190. if (data) {
  1191. free_percpu(data->cpu_data);
  1192. kfree(data);
  1193. }
  1194. }
  1195. static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
  1196. {
  1197. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1198. ftrace_graph_skip_irqs = !set;
  1199. return 0;
  1200. }
  1201. static struct trace_event_functions graph_functions = {
  1202. .trace = print_graph_function_event,
  1203. };
  1204. static struct trace_event graph_trace_entry_event = {
  1205. .type = TRACE_GRAPH_ENT,
  1206. .funcs = &graph_functions,
  1207. };
  1208. static struct trace_event graph_trace_ret_event = {
  1209. .type = TRACE_GRAPH_RET,
  1210. .funcs = &graph_functions
  1211. };
  1212. static struct tracer graph_trace __tracer_data = {
  1213. .name = "function_graph",
  1214. .open = graph_trace_open,
  1215. .pipe_open = graph_trace_open,
  1216. .close = graph_trace_close,
  1217. .pipe_close = graph_trace_close,
  1218. .wait_pipe = poll_wait_pipe,
  1219. .init = graph_trace_init,
  1220. .reset = graph_trace_reset,
  1221. .print_line = print_graph_function,
  1222. .print_header = print_graph_headers,
  1223. .flags = &tracer_flags,
  1224. .set_flag = func_graph_set_flag,
  1225. #ifdef CONFIG_FTRACE_SELFTEST
  1226. .selftest = trace_selftest_startup_function_graph,
  1227. #endif
  1228. };
  1229. static ssize_t
  1230. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1231. loff_t *ppos)
  1232. {
  1233. unsigned long val;
  1234. int ret;
  1235. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1236. if (ret)
  1237. return ret;
  1238. max_depth = val;
  1239. *ppos += cnt;
  1240. return cnt;
  1241. }
  1242. static ssize_t
  1243. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1244. loff_t *ppos)
  1245. {
  1246. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1247. int n;
  1248. n = sprintf(buf, "%d\n", max_depth);
  1249. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1250. }
  1251. static const struct file_operations graph_depth_fops = {
  1252. .open = tracing_open_generic,
  1253. .write = graph_depth_write,
  1254. .read = graph_depth_read,
  1255. .llseek = generic_file_llseek,
  1256. };
  1257. static __init int init_graph_debugfs(void)
  1258. {
  1259. struct dentry *d_tracer;
  1260. d_tracer = tracing_init_dentry();
  1261. if (!d_tracer)
  1262. return 0;
  1263. trace_create_file("max_graph_depth", 0644, d_tracer,
  1264. NULL, &graph_depth_fops);
  1265. return 0;
  1266. }
  1267. fs_initcall(init_graph_debugfs);
  1268. static __init int init_graph_trace(void)
  1269. {
  1270. max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
  1271. if (!register_ftrace_event(&graph_trace_entry_event)) {
  1272. pr_warning("Warning: could not register graph trace events\n");
  1273. return 1;
  1274. }
  1275. if (!register_ftrace_event(&graph_trace_ret_event)) {
  1276. pr_warning("Warning: could not register graph trace events\n");
  1277. return 1;
  1278. }
  1279. return register_tracer(&graph_trace);
  1280. }
  1281. core_initcall(init_graph_trace);