trace_irqsoff.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * trace irqs off critical timings
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * From code in the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/kallsyms.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. static struct trace_array *irqsoff_trace __read_mostly;
  20. static int tracer_enabled __read_mostly;
  21. static DEFINE_PER_CPU(int, tracing_cpu);
  22. static DEFINE_SPINLOCK(max_trace_lock);
  23. enum {
  24. TRACER_IRQS_OFF = (1 << 1),
  25. TRACER_PREEMPT_OFF = (1 << 2),
  26. };
  27. static int trace_type __read_mostly;
  28. static int save_lat_flag;
  29. static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
  30. static int start_irqsoff_tracer(struct trace_array *tr, int graph);
  31. #ifdef CONFIG_PREEMPT_TRACER
  32. static inline int
  33. preempt_trace(void)
  34. {
  35. return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
  36. }
  37. #else
  38. # define preempt_trace() (0)
  39. #endif
  40. #ifdef CONFIG_IRQSOFF_TRACER
  41. static inline int
  42. irq_trace(void)
  43. {
  44. return ((trace_type & TRACER_IRQS_OFF) &&
  45. irqs_disabled());
  46. }
  47. #else
  48. # define irq_trace() (0)
  49. #endif
  50. #define TRACE_DISPLAY_GRAPH 1
  51. static struct tracer_opt trace_opts[] = {
  52. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  53. /* display latency trace as call graph */
  54. { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
  55. #endif
  56. { } /* Empty entry */
  57. };
  58. static struct tracer_flags tracer_flags = {
  59. .val = 0,
  60. .opts = trace_opts,
  61. };
  62. #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
  63. /*
  64. * Sequence count - we record it when starting a measurement and
  65. * skip the latency if the sequence has changed - some other section
  66. * did a maximum and could disturb our measurement with serial console
  67. * printouts, etc. Truly coinciding maximum latencies should be rare
  68. * and what happens together happens separately as well, so this doesn't
  69. * decrease the validity of the maximum found:
  70. */
  71. static __cacheline_aligned_in_smp unsigned long max_sequence;
  72. #ifdef CONFIG_FUNCTION_TRACER
  73. /*
  74. * Prologue for the preempt and irqs off function tracers.
  75. *
  76. * Returns 1 if it is OK to continue, and data->disabled is
  77. * incremented.
  78. * 0 if the trace is to be ignored, and data->disabled
  79. * is kept the same.
  80. *
  81. * Note, this function is also used outside this ifdef but
  82. * inside the #ifdef of the function graph tracer below.
  83. * This is OK, since the function graph tracer is
  84. * dependent on the function tracer.
  85. */
  86. static int func_prolog_dec(struct trace_array *tr,
  87. struct trace_array_cpu **data,
  88. unsigned long *flags)
  89. {
  90. long disabled;
  91. int cpu;
  92. /*
  93. * Does not matter if we preempt. We test the flags
  94. * afterward, to see if irqs are disabled or not.
  95. * If we preempt and get a false positive, the flags
  96. * test will fail.
  97. */
  98. cpu = raw_smp_processor_id();
  99. if (likely(!per_cpu(tracing_cpu, cpu)))
  100. return 0;
  101. local_save_flags(*flags);
  102. /* slight chance to get a false positive on tracing_cpu */
  103. if (!irqs_disabled_flags(*flags))
  104. return 0;
  105. *data = tr->data[cpu];
  106. disabled = atomic_inc_return(&(*data)->disabled);
  107. if (likely(disabled == 1))
  108. return 1;
  109. atomic_dec(&(*data)->disabled);
  110. return 0;
  111. }
  112. /*
  113. * irqsoff uses its own tracer function to keep the overhead down:
  114. */
  115. static void
  116. irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
  117. {
  118. struct trace_array *tr = irqsoff_trace;
  119. struct trace_array_cpu *data;
  120. unsigned long flags;
  121. if (!func_prolog_dec(tr, &data, &flags))
  122. return;
  123. trace_function(tr, ip, parent_ip, flags, preempt_count());
  124. atomic_dec(&data->disabled);
  125. }
  126. static struct ftrace_ops trace_ops __read_mostly =
  127. {
  128. .func = irqsoff_tracer_call,
  129. .flags = FTRACE_OPS_FL_GLOBAL,
  130. };
  131. #endif /* CONFIG_FUNCTION_TRACER */
  132. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  133. static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
  134. {
  135. int cpu;
  136. if (!(bit & TRACE_DISPLAY_GRAPH))
  137. return -EINVAL;
  138. if (!(is_graph() ^ set))
  139. return 0;
  140. stop_irqsoff_tracer(irqsoff_trace, !set);
  141. for_each_possible_cpu(cpu)
  142. per_cpu(tracing_cpu, cpu) = 0;
  143. tracing_max_latency = 0;
  144. tracing_reset_online_cpus(irqsoff_trace);
  145. return start_irqsoff_tracer(irqsoff_trace, set);
  146. }
  147. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  148. {
  149. struct trace_array *tr = irqsoff_trace;
  150. struct trace_array_cpu *data;
  151. unsigned long flags;
  152. int ret;
  153. int pc;
  154. if (!func_prolog_dec(tr, &data, &flags))
  155. return 0;
  156. pc = preempt_count();
  157. ret = __trace_graph_entry(tr, trace, flags, pc);
  158. atomic_dec(&data->disabled);
  159. return ret;
  160. }
  161. static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
  162. {
  163. struct trace_array *tr = irqsoff_trace;
  164. struct trace_array_cpu *data;
  165. unsigned long flags;
  166. int pc;
  167. if (!func_prolog_dec(tr, &data, &flags))
  168. return;
  169. pc = preempt_count();
  170. __trace_graph_return(tr, trace, flags, pc);
  171. atomic_dec(&data->disabled);
  172. }
  173. static void irqsoff_trace_open(struct trace_iterator *iter)
  174. {
  175. if (is_graph())
  176. graph_trace_open(iter);
  177. }
  178. static void irqsoff_trace_close(struct trace_iterator *iter)
  179. {
  180. if (iter->private)
  181. graph_trace_close(iter);
  182. }
  183. #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
  184. TRACE_GRAPH_PRINT_PROC | \
  185. TRACE_GRAPH_PRINT_ABS_TIME | \
  186. TRACE_GRAPH_PRINT_DURATION)
  187. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  188. {
  189. /*
  190. * In graph mode call the graph tracer output function,
  191. * otherwise go with the TRACE_FN event handler
  192. */
  193. if (is_graph())
  194. return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
  195. return TRACE_TYPE_UNHANDLED;
  196. }
  197. static void irqsoff_print_header(struct seq_file *s)
  198. {
  199. if (is_graph())
  200. print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
  201. else
  202. trace_default_header(s);
  203. }
  204. static void
  205. __trace_function(struct trace_array *tr,
  206. unsigned long ip, unsigned long parent_ip,
  207. unsigned long flags, int pc)
  208. {
  209. if (is_graph())
  210. trace_graph_function(tr, ip, parent_ip, flags, pc);
  211. else
  212. trace_function(tr, ip, parent_ip, flags, pc);
  213. }
  214. #else
  215. #define __trace_function trace_function
  216. static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
  217. {
  218. return -EINVAL;
  219. }
  220. static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
  221. {
  222. return -1;
  223. }
  224. static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
  225. {
  226. return TRACE_TYPE_UNHANDLED;
  227. }
  228. static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
  229. static void irqsoff_print_header(struct seq_file *s) { }
  230. static void irqsoff_trace_open(struct trace_iterator *iter) { }
  231. static void irqsoff_trace_close(struct trace_iterator *iter) { }
  232. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  233. /*
  234. * Should this new latency be reported/recorded?
  235. */
  236. static int report_latency(cycle_t delta)
  237. {
  238. if (tracing_thresh) {
  239. if (delta < tracing_thresh)
  240. return 0;
  241. } else {
  242. if (delta <= tracing_max_latency)
  243. return 0;
  244. }
  245. return 1;
  246. }
  247. static void
  248. check_critical_timing(struct trace_array *tr,
  249. struct trace_array_cpu *data,
  250. unsigned long parent_ip,
  251. int cpu)
  252. {
  253. cycle_t T0, T1, delta;
  254. unsigned long flags;
  255. int pc;
  256. T0 = data->preempt_timestamp;
  257. T1 = ftrace_now(cpu);
  258. delta = T1-T0;
  259. local_save_flags(flags);
  260. pc = preempt_count();
  261. if (!report_latency(delta))
  262. goto out;
  263. spin_lock_irqsave(&max_trace_lock, flags);
  264. /* check if we are still the max latency */
  265. if (!report_latency(delta))
  266. goto out_unlock;
  267. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  268. /* Skip 5 functions to get to the irq/preempt enable function */
  269. __trace_stack(tr, flags, 5, pc);
  270. if (data->critical_sequence != max_sequence)
  271. goto out_unlock;
  272. data->critical_end = parent_ip;
  273. if (likely(!is_tracing_stopped())) {
  274. tracing_max_latency = delta;
  275. update_max_tr_single(tr, current, cpu);
  276. }
  277. max_sequence++;
  278. out_unlock:
  279. spin_unlock_irqrestore(&max_trace_lock, flags);
  280. out:
  281. data->critical_sequence = max_sequence;
  282. data->preempt_timestamp = ftrace_now(cpu);
  283. __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  284. }
  285. static inline void
  286. start_critical_timing(unsigned long ip, unsigned long parent_ip)
  287. {
  288. int cpu;
  289. struct trace_array *tr = irqsoff_trace;
  290. struct trace_array_cpu *data;
  291. unsigned long flags;
  292. if (likely(!tracer_enabled))
  293. return;
  294. cpu = raw_smp_processor_id();
  295. if (per_cpu(tracing_cpu, cpu))
  296. return;
  297. data = tr->data[cpu];
  298. if (unlikely(!data) || atomic_read(&data->disabled))
  299. return;
  300. atomic_inc(&data->disabled);
  301. data->critical_sequence = max_sequence;
  302. data->preempt_timestamp = ftrace_now(cpu);
  303. data->critical_start = parent_ip ? : ip;
  304. local_save_flags(flags);
  305. __trace_function(tr, ip, parent_ip, flags, preempt_count());
  306. per_cpu(tracing_cpu, cpu) = 1;
  307. atomic_dec(&data->disabled);
  308. }
  309. static inline void
  310. stop_critical_timing(unsigned long ip, unsigned long parent_ip)
  311. {
  312. int cpu;
  313. struct trace_array *tr = irqsoff_trace;
  314. struct trace_array_cpu *data;
  315. unsigned long flags;
  316. cpu = raw_smp_processor_id();
  317. /* Always clear the tracing cpu on stopping the trace */
  318. if (unlikely(per_cpu(tracing_cpu, cpu)))
  319. per_cpu(tracing_cpu, cpu) = 0;
  320. else
  321. return;
  322. if (!tracer_enabled)
  323. return;
  324. data = tr->data[cpu];
  325. if (unlikely(!data) ||
  326. !data->critical_start || atomic_read(&data->disabled))
  327. return;
  328. atomic_inc(&data->disabled);
  329. local_save_flags(flags);
  330. __trace_function(tr, ip, parent_ip, flags, preempt_count());
  331. check_critical_timing(tr, data, parent_ip ? : ip, cpu);
  332. data->critical_start = 0;
  333. atomic_dec(&data->disabled);
  334. }
  335. /* start and stop critical timings used to for stoppage (in idle) */
  336. void start_critical_timings(void)
  337. {
  338. if (preempt_trace() || irq_trace())
  339. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  340. }
  341. EXPORT_SYMBOL_GPL(start_critical_timings);
  342. void stop_critical_timings(void)
  343. {
  344. if (preempt_trace() || irq_trace())
  345. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  346. }
  347. EXPORT_SYMBOL_GPL(stop_critical_timings);
  348. #ifdef CONFIG_IRQSOFF_TRACER
  349. #ifdef CONFIG_PROVE_LOCKING
  350. void time_hardirqs_on(unsigned long a0, unsigned long a1)
  351. {
  352. if (!preempt_trace() && irq_trace())
  353. stop_critical_timing(a0, a1);
  354. }
  355. void time_hardirqs_off(unsigned long a0, unsigned long a1)
  356. {
  357. if (!preempt_trace() && irq_trace())
  358. start_critical_timing(a0, a1);
  359. }
  360. #else /* !CONFIG_PROVE_LOCKING */
  361. /*
  362. * Stubs:
  363. */
  364. void trace_softirqs_on(unsigned long ip)
  365. {
  366. }
  367. void trace_softirqs_off(unsigned long ip)
  368. {
  369. }
  370. inline void print_irqtrace_events(struct task_struct *curr)
  371. {
  372. }
  373. /*
  374. * We are only interested in hardirq on/off events:
  375. */
  376. void trace_hardirqs_on(void)
  377. {
  378. if (!preempt_trace() && irq_trace())
  379. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  380. }
  381. EXPORT_SYMBOL(trace_hardirqs_on);
  382. void trace_hardirqs_off(void)
  383. {
  384. if (!preempt_trace() && irq_trace())
  385. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  386. }
  387. EXPORT_SYMBOL(trace_hardirqs_off);
  388. void trace_hardirqs_on_caller(unsigned long caller_addr)
  389. {
  390. if (!preempt_trace() && irq_trace())
  391. stop_critical_timing(CALLER_ADDR0, caller_addr);
  392. }
  393. EXPORT_SYMBOL(trace_hardirqs_on_caller);
  394. void trace_hardirqs_off_caller(unsigned long caller_addr)
  395. {
  396. if (!preempt_trace() && irq_trace())
  397. start_critical_timing(CALLER_ADDR0, caller_addr);
  398. }
  399. EXPORT_SYMBOL(trace_hardirqs_off_caller);
  400. #endif /* CONFIG_PROVE_LOCKING */
  401. #endif /* CONFIG_IRQSOFF_TRACER */
  402. #ifdef CONFIG_PREEMPT_TRACER
  403. void trace_preempt_on(unsigned long a0, unsigned long a1)
  404. {
  405. if (preempt_trace())
  406. stop_critical_timing(a0, a1);
  407. }
  408. void trace_preempt_off(unsigned long a0, unsigned long a1)
  409. {
  410. if (preempt_trace())
  411. start_critical_timing(a0, a1);
  412. }
  413. #endif /* CONFIG_PREEMPT_TRACER */
  414. static int start_irqsoff_tracer(struct trace_array *tr, int graph)
  415. {
  416. int ret = 0;
  417. if (!graph)
  418. ret = register_ftrace_function(&trace_ops);
  419. else
  420. ret = register_ftrace_graph(&irqsoff_graph_return,
  421. &irqsoff_graph_entry);
  422. if (!ret && tracing_is_enabled())
  423. tracer_enabled = 1;
  424. else
  425. tracer_enabled = 0;
  426. return ret;
  427. }
  428. static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
  429. {
  430. tracer_enabled = 0;
  431. if (!graph)
  432. unregister_ftrace_function(&trace_ops);
  433. else
  434. unregister_ftrace_graph();
  435. }
  436. static void __irqsoff_tracer_init(struct trace_array *tr)
  437. {
  438. save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
  439. trace_flags |= TRACE_ITER_LATENCY_FMT;
  440. tracing_max_latency = 0;
  441. irqsoff_trace = tr;
  442. /* make sure that the tracer is visible */
  443. smp_wmb();
  444. tracing_reset_online_cpus(tr);
  445. if (start_irqsoff_tracer(tr, is_graph()))
  446. printk(KERN_ERR "failed to start irqsoff tracer\n");
  447. }
  448. static void irqsoff_tracer_reset(struct trace_array *tr)
  449. {
  450. stop_irqsoff_tracer(tr, is_graph());
  451. if (!save_lat_flag)
  452. trace_flags &= ~TRACE_ITER_LATENCY_FMT;
  453. }
  454. static void irqsoff_tracer_start(struct trace_array *tr)
  455. {
  456. tracer_enabled = 1;
  457. }
  458. static void irqsoff_tracer_stop(struct trace_array *tr)
  459. {
  460. tracer_enabled = 0;
  461. }
  462. #ifdef CONFIG_IRQSOFF_TRACER
  463. static int irqsoff_tracer_init(struct trace_array *tr)
  464. {
  465. trace_type = TRACER_IRQS_OFF;
  466. __irqsoff_tracer_init(tr);
  467. return 0;
  468. }
  469. static struct tracer irqsoff_tracer __read_mostly =
  470. {
  471. .name = "irqsoff",
  472. .init = irqsoff_tracer_init,
  473. .reset = irqsoff_tracer_reset,
  474. .start = irqsoff_tracer_start,
  475. .stop = irqsoff_tracer_stop,
  476. .print_max = 1,
  477. .print_header = irqsoff_print_header,
  478. .print_line = irqsoff_print_line,
  479. .flags = &tracer_flags,
  480. .set_flag = irqsoff_set_flag,
  481. #ifdef CONFIG_FTRACE_SELFTEST
  482. .selftest = trace_selftest_startup_irqsoff,
  483. #endif
  484. .open = irqsoff_trace_open,
  485. .close = irqsoff_trace_close,
  486. .use_max_tr = 1,
  487. };
  488. # define register_irqsoff(trace) register_tracer(&trace)
  489. #else
  490. # define register_irqsoff(trace) do { } while (0)
  491. #endif
  492. #ifdef CONFIG_PREEMPT_TRACER
  493. static int preemptoff_tracer_init(struct trace_array *tr)
  494. {
  495. trace_type = TRACER_PREEMPT_OFF;
  496. __irqsoff_tracer_init(tr);
  497. return 0;
  498. }
  499. static struct tracer preemptoff_tracer __read_mostly =
  500. {
  501. .name = "preemptoff",
  502. .init = preemptoff_tracer_init,
  503. .reset = irqsoff_tracer_reset,
  504. .start = irqsoff_tracer_start,
  505. .stop = irqsoff_tracer_stop,
  506. .print_max = 1,
  507. .print_header = irqsoff_print_header,
  508. .print_line = irqsoff_print_line,
  509. .flags = &tracer_flags,
  510. .set_flag = irqsoff_set_flag,
  511. #ifdef CONFIG_FTRACE_SELFTEST
  512. .selftest = trace_selftest_startup_preemptoff,
  513. #endif
  514. .open = irqsoff_trace_open,
  515. .close = irqsoff_trace_close,
  516. .use_max_tr = 1,
  517. };
  518. # define register_preemptoff(trace) register_tracer(&trace)
  519. #else
  520. # define register_preemptoff(trace) do { } while (0)
  521. #endif
  522. #if defined(CONFIG_IRQSOFF_TRACER) && \
  523. defined(CONFIG_PREEMPT_TRACER)
  524. static int preemptirqsoff_tracer_init(struct trace_array *tr)
  525. {
  526. trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
  527. __irqsoff_tracer_init(tr);
  528. return 0;
  529. }
  530. static struct tracer preemptirqsoff_tracer __read_mostly =
  531. {
  532. .name = "preemptirqsoff",
  533. .init = preemptirqsoff_tracer_init,
  534. .reset = irqsoff_tracer_reset,
  535. .start = irqsoff_tracer_start,
  536. .stop = irqsoff_tracer_stop,
  537. .print_max = 1,
  538. .print_header = irqsoff_print_header,
  539. .print_line = irqsoff_print_line,
  540. .flags = &tracer_flags,
  541. .set_flag = irqsoff_set_flag,
  542. #ifdef CONFIG_FTRACE_SELFTEST
  543. .selftest = trace_selftest_startup_preemptirqsoff,
  544. #endif
  545. .open = irqsoff_trace_open,
  546. .close = irqsoff_trace_close,
  547. .use_max_tr = 1,
  548. };
  549. # define register_preemptirqsoff(trace) register_tracer(&trace)
  550. #else
  551. # define register_preemptirqsoff(trace) do { } while (0)
  552. #endif
  553. __init static int init_irqsoff_tracer(void)
  554. {
  555. register_irqsoff(irqsoff_tracer);
  556. register_preemptoff(preemptoff_tracer);
  557. register_preemptirqsoff(preemptirqsoff_tracer);
  558. return 0;
  559. }
  560. device_initcall(init_irqsoff_tracer);