trace_irqsoff.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486
  1. /*
  2. * trace irqs off criticall timings
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * From code in the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/kallsyms.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. static struct trace_array *irqsoff_trace __read_mostly;
  20. static int tracer_enabled __read_mostly;
  21. static DEFINE_PER_CPU(int, tracing_cpu);
  22. enum {
  23. TRACER_IRQS_OFF = (1 << 1),
  24. TRACER_PREEMPT_OFF = (1 << 2),
  25. };
  26. static int trace_type __read_mostly;
  27. #ifdef CONFIG_PREEMPT_TRACER
  28. static inline int notrace
  29. preempt_trace(void)
  30. {
  31. return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
  32. }
  33. #else
  34. # define preempt_trace() (0)
  35. #endif
  36. #ifdef CONFIG_IRQSOFF_TRACER
  37. static inline int notrace
  38. irq_trace(void)
  39. {
  40. return ((trace_type & TRACER_IRQS_OFF) &&
  41. irqs_disabled());
  42. }
  43. #else
  44. # define irq_trace() (0)
  45. #endif
  46. /*
  47. * Sequence count - we record it when starting a measurement and
  48. * skip the latency if the sequence has changed - some other section
  49. * did a maximum and could disturb our measurement with serial console
  50. * printouts, etc. Truly coinciding maximum latencies should be rare
  51. * and what happens together happens separately as well, so this doesnt
  52. * decrease the validity of the maximum found:
  53. */
  54. static __cacheline_aligned_in_smp unsigned long max_sequence;
  55. #ifdef CONFIG_FTRACE
  56. /*
  57. * irqsoff uses its own tracer function to keep the overhead down:
  58. */
  59. static void notrace
  60. irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
  61. {
  62. struct trace_array *tr = irqsoff_trace;
  63. struct trace_array_cpu *data;
  64. unsigned long flags;
  65. long disabled;
  66. int cpu;
  67. if (likely(!__get_cpu_var(tracing_cpu)))
  68. return;
  69. local_save_flags(flags);
  70. cpu = raw_smp_processor_id();
  71. data = tr->data[cpu];
  72. disabled = atomic_inc_return(&data->disabled);
  73. if (likely(disabled == 1))
  74. ftrace(tr, data, ip, parent_ip, flags);
  75. atomic_dec(&data->disabled);
  76. }
  77. static struct ftrace_ops trace_ops __read_mostly =
  78. {
  79. .func = irqsoff_tracer_call,
  80. };
  81. #endif /* CONFIG_FTRACE */
  82. /*
  83. * Should this new latency be reported/recorded?
  84. */
  85. static int notrace report_latency(cycle_t delta)
  86. {
  87. if (tracing_thresh) {
  88. if (delta < tracing_thresh)
  89. return 0;
  90. } else {
  91. if (delta <= tracing_max_latency)
  92. return 0;
  93. }
  94. return 1;
  95. }
  96. static void notrace
  97. check_critical_timing(struct trace_array *tr,
  98. struct trace_array_cpu *data,
  99. unsigned long parent_ip,
  100. int cpu)
  101. {
  102. unsigned long latency, t0, t1;
  103. cycle_t T0, T1, T2, delta;
  104. unsigned long flags;
  105. /*
  106. * usecs conversion is slow so we try to delay the conversion
  107. * as long as possible:
  108. */
  109. T0 = data->preempt_timestamp;
  110. T1 = now(cpu);
  111. delta = T1-T0;
  112. local_save_flags(flags);
  113. if (!report_latency(delta))
  114. goto out;
  115. ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
  116. /*
  117. * Update the timestamp, because the trace entry above
  118. * might change it (it can only get larger so the latency
  119. * is fair to be reported):
  120. */
  121. T2 = now(cpu);
  122. delta = T2-T0;
  123. latency = nsecs_to_usecs(delta);
  124. if (data->critical_sequence != max_sequence)
  125. goto out;
  126. tracing_max_latency = delta;
  127. t0 = nsecs_to_usecs(T0);
  128. t1 = nsecs_to_usecs(T1);
  129. data->critical_end = parent_ip;
  130. update_max_tr_single(tr, current, cpu);
  131. if (tracing_thresh)
  132. printk(KERN_INFO "(%16s-%-5d|#%d): %lu us critical section "
  133. "violates %lu us threshold.\n"
  134. " => started at timestamp %lu: ",
  135. current->comm, current->pid,
  136. raw_smp_processor_id(),
  137. latency, nsecs_to_usecs(tracing_thresh), t0);
  138. else
  139. printk(KERN_INFO "(%16s-%-5d|#%d):"
  140. " new %lu us maximum-latency "
  141. "critical section.\n => started at timestamp %lu: ",
  142. current->comm, current->pid,
  143. raw_smp_processor_id(),
  144. latency, t0);
  145. print_symbol(KERN_CONT "<%s>\n", data->critical_start);
  146. printk(KERN_CONT " => ended at timestamp %lu: ", t1);
  147. print_symbol(KERN_CONT "<%s>\n", data->critical_end);
  148. dump_stack();
  149. t1 = nsecs_to_usecs(now(cpu));
  150. printk(KERN_CONT " => dump-end timestamp %lu\n\n", t1);
  151. max_sequence++;
  152. out:
  153. data->critical_sequence = max_sequence;
  154. data->preempt_timestamp = now(cpu);
  155. tracing_reset(data);
  156. ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
  157. }
  158. static inline void notrace
  159. start_critical_timing(unsigned long ip, unsigned long parent_ip)
  160. {
  161. int cpu;
  162. struct trace_array *tr = irqsoff_trace;
  163. struct trace_array_cpu *data;
  164. unsigned long flags;
  165. if (likely(!tracer_enabled))
  166. return;
  167. if (__get_cpu_var(tracing_cpu))
  168. return;
  169. cpu = raw_smp_processor_id();
  170. data = tr->data[cpu];
  171. if (unlikely(!data) || unlikely(!data->trace) ||
  172. atomic_read(&data->disabled))
  173. return;
  174. atomic_inc(&data->disabled);
  175. data->critical_sequence = max_sequence;
  176. data->preempt_timestamp = now(cpu);
  177. data->critical_start = parent_ip ? : ip;
  178. tracing_reset(data);
  179. local_save_flags(flags);
  180. ftrace(tr, data, ip, parent_ip, flags);
  181. __get_cpu_var(tracing_cpu) = 1;
  182. atomic_dec(&data->disabled);
  183. }
  184. static inline void notrace
  185. stop_critical_timing(unsigned long ip, unsigned long parent_ip)
  186. {
  187. int cpu;
  188. struct trace_array *tr = irqsoff_trace;
  189. struct trace_array_cpu *data;
  190. unsigned long flags;
  191. /* Always clear the tracing cpu on stopping the trace */
  192. if (unlikely(__get_cpu_var(tracing_cpu)))
  193. __get_cpu_var(tracing_cpu) = 0;
  194. else
  195. return;
  196. if (!tracer_enabled)
  197. return;
  198. cpu = raw_smp_processor_id();
  199. data = tr->data[cpu];
  200. if (unlikely(!data) || unlikely(!data->trace) ||
  201. !data->critical_start || atomic_read(&data->disabled))
  202. return;
  203. atomic_inc(&data->disabled);
  204. local_save_flags(flags);
  205. ftrace(tr, data, ip, parent_ip, flags);
  206. check_critical_timing(tr, data, parent_ip ? : ip, cpu);
  207. data->critical_start = 0;
  208. atomic_dec(&data->disabled);
  209. }
  210. /* start and stop critical timings used to for stoppage (in idle) */
  211. void notrace start_critical_timings(void)
  212. {
  213. if (preempt_trace() || irq_trace())
  214. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  215. }
  216. void notrace stop_critical_timings(void)
  217. {
  218. if (preempt_trace() || irq_trace())
  219. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  220. }
  221. #ifdef CONFIG_IRQSOFF_TRACER
  222. #ifdef CONFIG_PROVE_LOCKING
  223. void notrace time_hardirqs_on(unsigned long a0, unsigned long a1)
  224. {
  225. if (!preempt_trace() && irq_trace())
  226. stop_critical_timing(a0, a1);
  227. }
  228. void notrace time_hardirqs_off(unsigned long a0, unsigned long a1)
  229. {
  230. if (!preempt_trace() && irq_trace())
  231. start_critical_timing(a0, a1);
  232. }
  233. #else /* !CONFIG_PROVE_LOCKING */
  234. /*
  235. * Stubs:
  236. */
  237. void early_boot_irqs_off(void)
  238. {
  239. }
  240. void early_boot_irqs_on(void)
  241. {
  242. }
  243. void trace_softirqs_on(unsigned long ip)
  244. {
  245. }
  246. void trace_softirqs_off(unsigned long ip)
  247. {
  248. }
  249. inline void print_irqtrace_events(struct task_struct *curr)
  250. {
  251. }
  252. /*
  253. * We are only interested in hardirq on/off events:
  254. */
  255. void notrace trace_hardirqs_on(void)
  256. {
  257. if (!preempt_trace() && irq_trace())
  258. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  259. }
  260. EXPORT_SYMBOL(trace_hardirqs_on);
  261. void notrace trace_hardirqs_off(void)
  262. {
  263. if (!preempt_trace() && irq_trace())
  264. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  265. }
  266. EXPORT_SYMBOL(trace_hardirqs_off);
  267. void notrace trace_hardirqs_on_caller(unsigned long caller_addr)
  268. {
  269. if (!preempt_trace() && irq_trace())
  270. stop_critical_timing(CALLER_ADDR0, caller_addr);
  271. }
  272. EXPORT_SYMBOL(trace_hardirqs_on_caller);
  273. void notrace trace_hardirqs_off_caller(unsigned long caller_addr)
  274. {
  275. if (!preempt_trace() && irq_trace())
  276. start_critical_timing(CALLER_ADDR0, caller_addr);
  277. }
  278. EXPORT_SYMBOL(trace_hardirqs_off_caller);
  279. #endif /* CONFIG_PROVE_LOCKING */
  280. #endif /* CONFIG_IRQSOFF_TRACER */
  281. #ifdef CONFIG_PREEMPT_TRACER
  282. void notrace trace_preempt_on(unsigned long a0, unsigned long a1)
  283. {
  284. stop_critical_timing(a0, a1);
  285. }
  286. void notrace trace_preempt_off(unsigned long a0, unsigned long a1)
  287. {
  288. start_critical_timing(a0, a1);
  289. }
  290. #endif /* CONFIG_PREEMPT_TRACER */
  291. static void start_irqsoff_tracer(struct trace_array *tr)
  292. {
  293. tracer_enabled = 1;
  294. register_ftrace_function(&trace_ops);
  295. }
  296. static void stop_irqsoff_tracer(struct trace_array *tr)
  297. {
  298. unregister_ftrace_function(&trace_ops);
  299. tracer_enabled = 0;
  300. }
  301. static void __irqsoff_tracer_init(struct trace_array *tr)
  302. {
  303. irqsoff_trace = tr;
  304. /* make sure that the tracer is visibel */
  305. smp_wmb();
  306. if (tr->ctrl)
  307. start_irqsoff_tracer(tr);
  308. }
  309. static void irqsoff_tracer_reset(struct trace_array *tr)
  310. {
  311. if (tr->ctrl)
  312. stop_irqsoff_tracer(tr);
  313. }
  314. static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
  315. {
  316. if (tr->ctrl)
  317. start_irqsoff_tracer(tr);
  318. else
  319. stop_irqsoff_tracer(tr);
  320. }
  321. static void notrace irqsoff_tracer_open(struct trace_iterator *iter)
  322. {
  323. /* stop the trace while dumping */
  324. if (iter->tr->ctrl)
  325. stop_irqsoff_tracer(iter->tr);
  326. }
  327. static void notrace irqsoff_tracer_close(struct trace_iterator *iter)
  328. {
  329. if (iter->tr->ctrl)
  330. start_irqsoff_tracer(iter->tr);
  331. }
  332. #ifdef CONFIG_IRQSOFF_TRACER
  333. static void irqsoff_tracer_init(struct trace_array *tr)
  334. {
  335. trace_type = TRACER_IRQS_OFF;
  336. __irqsoff_tracer_init(tr);
  337. }
  338. static struct tracer irqsoff_tracer __read_mostly =
  339. {
  340. .name = "irqsoff",
  341. .init = irqsoff_tracer_init,
  342. .reset = irqsoff_tracer_reset,
  343. .open = irqsoff_tracer_open,
  344. .close = irqsoff_tracer_close,
  345. .ctrl_update = irqsoff_tracer_ctrl_update,
  346. .print_max = 1,
  347. };
  348. # define register_irqsoff(trace) register_tracer(&trace)
  349. #else
  350. # define register_irqsoff(trace) do { } while (0)
  351. #endif
  352. #ifdef CONFIG_PREEMPT_TRACER
  353. static void preemptoff_tracer_init(struct trace_array *tr)
  354. {
  355. trace_type = TRACER_PREEMPT_OFF;
  356. __irqsoff_tracer_init(tr);
  357. }
  358. static struct tracer preemptoff_tracer __read_mostly =
  359. {
  360. .name = "preemptoff",
  361. .init = preemptoff_tracer_init,
  362. .reset = irqsoff_tracer_reset,
  363. .open = irqsoff_tracer_open,
  364. .close = irqsoff_tracer_close,
  365. .ctrl_update = irqsoff_tracer_ctrl_update,
  366. .print_max = 1,
  367. };
  368. # define register_preemptoff(trace) register_tracer(&trace)
  369. #else
  370. # define register_preemptoff(trace) do { } while (0)
  371. #endif
  372. #if defined(CONFIG_IRQSOFF_TRACER) && \
  373. defined(CONFIG_PREEMPT_TRACER)
  374. static void preemptirqsoff_tracer_init(struct trace_array *tr)
  375. {
  376. trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
  377. __irqsoff_tracer_init(tr);
  378. }
  379. static struct tracer preemptirqsoff_tracer __read_mostly =
  380. {
  381. .name = "preemptirqsoff",
  382. .init = preemptirqsoff_tracer_init,
  383. .reset = irqsoff_tracer_reset,
  384. .open = irqsoff_tracer_open,
  385. .close = irqsoff_tracer_close,
  386. .ctrl_update = irqsoff_tracer_ctrl_update,
  387. .print_max = 1,
  388. };
  389. # define register_preemptirqsoff(trace) register_tracer(&trace)
  390. #else
  391. # define register_preemptirqsoff(trace) do { } while (0)
  392. #endif
  393. __init static int init_irqsoff_tracer(void)
  394. {
  395. register_irqsoff(irqsoff_tracer);
  396. register_preemptoff(preemptoff_tracer);
  397. register_preemptirqsoff(preemptirqsoff_tracer);
  398. return 0;
  399. }
  400. device_initcall(init_irqsoff_tracer);