trace_irqsoff.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. /*
  2. * trace irqs off critical timings
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * From code in the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/kallsyms.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/module.h>
  16. #include <linux/ftrace.h>
  17. #include <linux/fs.h>
  18. #include "trace.h"
  19. static struct trace_array *irqsoff_trace __read_mostly;
  20. static int tracer_enabled __read_mostly;
  21. static DEFINE_PER_CPU(int, tracing_cpu);
  22. static DEFINE_SPINLOCK(max_trace_lock);
  23. enum {
  24. TRACER_IRQS_OFF = (1 << 1),
  25. TRACER_PREEMPT_OFF = (1 << 2),
  26. };
  27. static int trace_type __read_mostly;
  28. static int save_lat_flag;
  29. #ifdef CONFIG_PREEMPT_TRACER
  30. static inline int
  31. preempt_trace(void)
  32. {
  33. return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
  34. }
  35. #else
  36. # define preempt_trace() (0)
  37. #endif
  38. #ifdef CONFIG_IRQSOFF_TRACER
  39. static inline int
  40. irq_trace(void)
  41. {
  42. return ((trace_type & TRACER_IRQS_OFF) &&
  43. irqs_disabled());
  44. }
  45. #else
  46. # define irq_trace() (0)
  47. #endif
  48. /*
  49. * Sequence count - we record it when starting a measurement and
  50. * skip the latency if the sequence has changed - some other section
  51. * did a maximum and could disturb our measurement with serial console
  52. * printouts, etc. Truly coinciding maximum latencies should be rare
  53. * and what happens together happens separately as well, so this doesnt
  54. * decrease the validity of the maximum found:
  55. */
  56. static __cacheline_aligned_in_smp unsigned long max_sequence;
  57. #ifdef CONFIG_FUNCTION_TRACER
  58. /*
  59. * irqsoff uses its own tracer function to keep the overhead down:
  60. */
  61. static void
  62. irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
  63. {
  64. struct trace_array *tr = irqsoff_trace;
  65. struct trace_array_cpu *data;
  66. unsigned long flags;
  67. long disabled;
  68. int cpu;
  69. /*
  70. * Does not matter if we preempt. We test the flags
  71. * afterward, to see if irqs are disabled or not.
  72. * If we preempt and get a false positive, the flags
  73. * test will fail.
  74. */
  75. cpu = raw_smp_processor_id();
  76. if (likely(!per_cpu(tracing_cpu, cpu)))
  77. return;
  78. local_save_flags(flags);
  79. /* slight chance to get a false positive on tracing_cpu */
  80. if (!irqs_disabled_flags(flags))
  81. return;
  82. data = tr->data[cpu];
  83. disabled = atomic_inc_return(&data->disabled);
  84. if (likely(disabled == 1))
  85. trace_function(tr, ip, parent_ip, flags, preempt_count());
  86. atomic_dec(&data->disabled);
  87. }
  88. static struct ftrace_ops trace_ops __read_mostly =
  89. {
  90. .func = irqsoff_tracer_call,
  91. };
  92. #endif /* CONFIG_FUNCTION_TRACER */
  93. /*
  94. * Should this new latency be reported/recorded?
  95. */
  96. static int report_latency(cycle_t delta)
  97. {
  98. if (tracing_thresh) {
  99. if (delta < tracing_thresh)
  100. return 0;
  101. } else {
  102. if (delta <= tracing_max_latency)
  103. return 0;
  104. }
  105. return 1;
  106. }
  107. static void
  108. check_critical_timing(struct trace_array *tr,
  109. struct trace_array_cpu *data,
  110. unsigned long parent_ip,
  111. int cpu)
  112. {
  113. cycle_t T0, T1, delta;
  114. unsigned long flags;
  115. int pc;
  116. T0 = data->preempt_timestamp;
  117. T1 = ftrace_now(cpu);
  118. delta = T1-T0;
  119. local_save_flags(flags);
  120. pc = preempt_count();
  121. if (!report_latency(delta))
  122. goto out;
  123. spin_lock_irqsave(&max_trace_lock, flags);
  124. /* check if we are still the max latency */
  125. if (!report_latency(delta))
  126. goto out_unlock;
  127. trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  128. /* Skip 5 functions to get to the irq/preempt enable function */
  129. __trace_stack(tr, flags, 5, pc);
  130. if (data->critical_sequence != max_sequence)
  131. goto out_unlock;
  132. data->critical_end = parent_ip;
  133. if (likely(!is_tracing_stopped())) {
  134. tracing_max_latency = delta;
  135. update_max_tr_single(tr, current, cpu);
  136. }
  137. max_sequence++;
  138. out_unlock:
  139. spin_unlock_irqrestore(&max_trace_lock, flags);
  140. out:
  141. data->critical_sequence = max_sequence;
  142. data->preempt_timestamp = ftrace_now(cpu);
  143. trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
  144. }
  145. static inline void
  146. start_critical_timing(unsigned long ip, unsigned long parent_ip)
  147. {
  148. int cpu;
  149. struct trace_array *tr = irqsoff_trace;
  150. struct trace_array_cpu *data;
  151. unsigned long flags;
  152. if (likely(!tracer_enabled))
  153. return;
  154. cpu = raw_smp_processor_id();
  155. if (per_cpu(tracing_cpu, cpu))
  156. return;
  157. data = tr->data[cpu];
  158. if (unlikely(!data) || atomic_read(&data->disabled))
  159. return;
  160. atomic_inc(&data->disabled);
  161. data->critical_sequence = max_sequence;
  162. data->preempt_timestamp = ftrace_now(cpu);
  163. data->critical_start = parent_ip ? : ip;
  164. local_save_flags(flags);
  165. trace_function(tr, ip, parent_ip, flags, preempt_count());
  166. per_cpu(tracing_cpu, cpu) = 1;
  167. atomic_dec(&data->disabled);
  168. }
  169. static inline void
  170. stop_critical_timing(unsigned long ip, unsigned long parent_ip)
  171. {
  172. int cpu;
  173. struct trace_array *tr = irqsoff_trace;
  174. struct trace_array_cpu *data;
  175. unsigned long flags;
  176. cpu = raw_smp_processor_id();
  177. /* Always clear the tracing cpu on stopping the trace */
  178. if (unlikely(per_cpu(tracing_cpu, cpu)))
  179. per_cpu(tracing_cpu, cpu) = 0;
  180. else
  181. return;
  182. if (!tracer_enabled)
  183. return;
  184. data = tr->data[cpu];
  185. if (unlikely(!data) ||
  186. !data->critical_start || atomic_read(&data->disabled))
  187. return;
  188. atomic_inc(&data->disabled);
  189. local_save_flags(flags);
  190. trace_function(tr, ip, parent_ip, flags, preempt_count());
  191. check_critical_timing(tr, data, parent_ip ? : ip, cpu);
  192. data->critical_start = 0;
  193. atomic_dec(&data->disabled);
  194. }
  195. /* start and stop critical timings used to for stoppage (in idle) */
  196. void start_critical_timings(void)
  197. {
  198. if (preempt_trace() || irq_trace())
  199. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  200. }
  201. EXPORT_SYMBOL_GPL(start_critical_timings);
  202. void stop_critical_timings(void)
  203. {
  204. if (preempt_trace() || irq_trace())
  205. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  206. }
  207. EXPORT_SYMBOL_GPL(stop_critical_timings);
  208. #ifdef CONFIG_IRQSOFF_TRACER
  209. #ifdef CONFIG_PROVE_LOCKING
  210. void time_hardirqs_on(unsigned long a0, unsigned long a1)
  211. {
  212. if (!preempt_trace() && irq_trace())
  213. stop_critical_timing(a0, a1);
  214. }
  215. void time_hardirqs_off(unsigned long a0, unsigned long a1)
  216. {
  217. if (!preempt_trace() && irq_trace())
  218. start_critical_timing(a0, a1);
  219. }
  220. #else /* !CONFIG_PROVE_LOCKING */
  221. /*
  222. * Stubs:
  223. */
  224. void early_boot_irqs_off(void)
  225. {
  226. }
  227. void early_boot_irqs_on(void)
  228. {
  229. }
  230. void trace_softirqs_on(unsigned long ip)
  231. {
  232. }
  233. void trace_softirqs_off(unsigned long ip)
  234. {
  235. }
  236. inline void print_irqtrace_events(struct task_struct *curr)
  237. {
  238. }
  239. /*
  240. * We are only interested in hardirq on/off events:
  241. */
  242. void trace_hardirqs_on(void)
  243. {
  244. if (!preempt_trace() && irq_trace())
  245. stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  246. }
  247. EXPORT_SYMBOL(trace_hardirqs_on);
  248. void trace_hardirqs_off(void)
  249. {
  250. if (!preempt_trace() && irq_trace())
  251. start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
  252. }
  253. EXPORT_SYMBOL(trace_hardirqs_off);
  254. void trace_hardirqs_on_caller(unsigned long caller_addr)
  255. {
  256. if (!preempt_trace() && irq_trace())
  257. stop_critical_timing(CALLER_ADDR0, caller_addr);
  258. }
  259. EXPORT_SYMBOL(trace_hardirqs_on_caller);
  260. void trace_hardirqs_off_caller(unsigned long caller_addr)
  261. {
  262. if (!preempt_trace() && irq_trace())
  263. start_critical_timing(CALLER_ADDR0, caller_addr);
  264. }
  265. EXPORT_SYMBOL(trace_hardirqs_off_caller);
  266. #endif /* CONFIG_PROVE_LOCKING */
  267. #endif /* CONFIG_IRQSOFF_TRACER */
  268. #ifdef CONFIG_PREEMPT_TRACER
  269. void trace_preempt_on(unsigned long a0, unsigned long a1)
  270. {
  271. if (preempt_trace())
  272. stop_critical_timing(a0, a1);
  273. }
  274. void trace_preempt_off(unsigned long a0, unsigned long a1)
  275. {
  276. if (preempt_trace())
  277. start_critical_timing(a0, a1);
  278. }
  279. #endif /* CONFIG_PREEMPT_TRACER */
  280. static void start_irqsoff_tracer(struct trace_array *tr)
  281. {
  282. register_ftrace_function(&trace_ops);
  283. if (tracing_is_enabled())
  284. tracer_enabled = 1;
  285. else
  286. tracer_enabled = 0;
  287. }
  288. static void stop_irqsoff_tracer(struct trace_array *tr)
  289. {
  290. tracer_enabled = 0;
  291. unregister_ftrace_function(&trace_ops);
  292. }
  293. static void __irqsoff_tracer_init(struct trace_array *tr)
  294. {
  295. save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
  296. trace_flags |= TRACE_ITER_LATENCY_FMT;
  297. tracing_max_latency = 0;
  298. irqsoff_trace = tr;
  299. /* make sure that the tracer is visible */
  300. smp_wmb();
  301. tracing_reset_online_cpus(tr);
  302. start_irqsoff_tracer(tr);
  303. }
  304. static void irqsoff_tracer_reset(struct trace_array *tr)
  305. {
  306. stop_irqsoff_tracer(tr);
  307. if (!save_lat_flag)
  308. trace_flags &= ~TRACE_ITER_LATENCY_FMT;
  309. }
  310. static void irqsoff_tracer_start(struct trace_array *tr)
  311. {
  312. tracer_enabled = 1;
  313. }
  314. static void irqsoff_tracer_stop(struct trace_array *tr)
  315. {
  316. tracer_enabled = 0;
  317. }
  318. #ifdef CONFIG_IRQSOFF_TRACER
  319. static int irqsoff_tracer_init(struct trace_array *tr)
  320. {
  321. trace_type = TRACER_IRQS_OFF;
  322. __irqsoff_tracer_init(tr);
  323. return 0;
  324. }
  325. static struct tracer irqsoff_tracer __read_mostly =
  326. {
  327. .name = "irqsoff",
  328. .init = irqsoff_tracer_init,
  329. .reset = irqsoff_tracer_reset,
  330. .start = irqsoff_tracer_start,
  331. .stop = irqsoff_tracer_stop,
  332. .print_max = 1,
  333. #ifdef CONFIG_FTRACE_SELFTEST
  334. .selftest = trace_selftest_startup_irqsoff,
  335. #endif
  336. };
  337. # define register_irqsoff(trace) register_tracer(&trace)
  338. #else
  339. # define register_irqsoff(trace) do { } while (0)
  340. #endif
  341. #ifdef CONFIG_PREEMPT_TRACER
  342. static int preemptoff_tracer_init(struct trace_array *tr)
  343. {
  344. trace_type = TRACER_PREEMPT_OFF;
  345. __irqsoff_tracer_init(tr);
  346. return 0;
  347. }
  348. static struct tracer preemptoff_tracer __read_mostly =
  349. {
  350. .name = "preemptoff",
  351. .init = preemptoff_tracer_init,
  352. .reset = irqsoff_tracer_reset,
  353. .start = irqsoff_tracer_start,
  354. .stop = irqsoff_tracer_stop,
  355. .print_max = 1,
  356. #ifdef CONFIG_FTRACE_SELFTEST
  357. .selftest = trace_selftest_startup_preemptoff,
  358. #endif
  359. };
  360. # define register_preemptoff(trace) register_tracer(&trace)
  361. #else
  362. # define register_preemptoff(trace) do { } while (0)
  363. #endif
  364. #if defined(CONFIG_IRQSOFF_TRACER) && \
  365. defined(CONFIG_PREEMPT_TRACER)
  366. static int preemptirqsoff_tracer_init(struct trace_array *tr)
  367. {
  368. trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
  369. __irqsoff_tracer_init(tr);
  370. return 0;
  371. }
  372. static struct tracer preemptirqsoff_tracer __read_mostly =
  373. {
  374. .name = "preemptirqsoff",
  375. .init = preemptirqsoff_tracer_init,
  376. .reset = irqsoff_tracer_reset,
  377. .start = irqsoff_tracer_start,
  378. .stop = irqsoff_tracer_stop,
  379. .print_max = 1,
  380. #ifdef CONFIG_FTRACE_SELFTEST
  381. .selftest = trace_selftest_startup_preemptirqsoff,
  382. #endif
  383. };
  384. # define register_preemptirqsoff(trace) register_tracer(&trace)
  385. #else
  386. # define register_preemptirqsoff(trace) do { } while (0)
  387. #endif
  388. __init static int init_irqsoff_tracer(void)
  389. {
  390. register_irqsoff(irqsoff_tracer);
  391. register_preemptoff(preemptoff_tracer);
  392. register_preemptirqsoff(preemptirqsoff_tracer);
  393. return 0;
  394. }
  395. device_initcall(init_irqsoff_tracer);