trace_sched_wakeup.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310
  1. /*
  2. * trace task wakeup timings
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * Based on code from the latency_tracer, that is:
  8. *
  9. * Copyright (C) 2004-2006 Ingo Molnar
  10. * Copyright (C) 2004 William Lee Irwin III
  11. */
  12. #include <linux/module.h>
  13. #include <linux/fs.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/kallsyms.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/ftrace.h>
  18. #include "trace.h"
  19. static struct trace_array *wakeup_trace;
  20. static int __read_mostly tracer_enabled;
  21. static struct task_struct *wakeup_task;
  22. static int wakeup_cpu;
  23. static unsigned wakeup_prio = -1;
  24. static DEFINE_SPINLOCK(wakeup_lock);
  25. static void notrace __wakeup_reset(struct trace_array *tr);
  26. /*
  27. * Should this new latency be reported/recorded?
  28. */
  29. static int notrace report_latency(cycle_t delta)
  30. {
  31. if (tracing_thresh) {
  32. if (delta < tracing_thresh)
  33. return 0;
  34. } else {
  35. if (delta <= tracing_max_latency)
  36. return 0;
  37. }
  38. return 1;
  39. }
  40. void notrace
  41. wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
  42. {
  43. unsigned long latency = 0, t0 = 0, t1 = 0;
  44. struct trace_array *tr = wakeup_trace;
  45. struct trace_array_cpu *data;
  46. cycle_t T0, T1, delta;
  47. unsigned long flags;
  48. long disabled;
  49. int cpu;
  50. if (unlikely(!tracer_enabled))
  51. return;
  52. /*
  53. * When we start a new trace, we set wakeup_task to NULL
  54. * and then set tracer_enabled = 1. We want to make sure
  55. * that another CPU does not see the tracer_enabled = 1
  56. * and the wakeup_task with an older task, that might
  57. * actually be the same as next.
  58. */
  59. smp_rmb();
  60. if (next != wakeup_task)
  61. return;
  62. /* The task we are waitng for is waking up */
  63. data = tr->data[wakeup_cpu];
  64. /* disable local data, not wakeup_cpu data */
  65. cpu = raw_smp_processor_id();
  66. disabled = atomic_inc_return(&tr->data[cpu]->disabled);
  67. if (likely(disabled != 1))
  68. goto out;
  69. spin_lock_irqsave(&wakeup_lock, flags);
  70. /* We could race with grabbing wakeup_lock */
  71. if (unlikely(!tracer_enabled || next != wakeup_task))
  72. goto out_unlock;
  73. ftrace(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags);
  74. /*
  75. * usecs conversion is slow so we try to delay the conversion
  76. * as long as possible:
  77. */
  78. T0 = data->preempt_timestamp;
  79. T1 = now(cpu);
  80. delta = T1-T0;
  81. if (!report_latency(delta))
  82. goto out_unlock;
  83. latency = nsecs_to_usecs(delta);
  84. tracing_max_latency = delta;
  85. t0 = nsecs_to_usecs(T0);
  86. t1 = nsecs_to_usecs(T1);
  87. update_max_tr(tr, wakeup_task, wakeup_cpu);
  88. if (tracing_thresh) {
  89. printk(KERN_INFO "(%16s-%-5d|#%d): %lu us wakeup latency "
  90. "violates %lu us threshold.\n"
  91. " => started at timestamp %lu: ",
  92. wakeup_task->comm, wakeup_task->pid,
  93. raw_smp_processor_id(),
  94. latency, nsecs_to_usecs(tracing_thresh), t0);
  95. } else {
  96. printk(KERN_INFO "(%16s-%-5d|#%d): new %lu us maximum "
  97. "wakeup latency.\n => started at timestamp %lu: ",
  98. wakeup_task->comm, wakeup_task->pid,
  99. cpu, latency, t0);
  100. }
  101. printk(KERN_CONT " ended at timestamp %lu: ", t1);
  102. dump_stack();
  103. t1 = nsecs_to_usecs(now(cpu));
  104. printk(KERN_CONT " dump-end timestamp %lu\n\n", t1);
  105. out_unlock:
  106. __wakeup_reset(tr);
  107. spin_unlock_irqrestore(&wakeup_lock, flags);
  108. out:
  109. atomic_dec(&tr->data[cpu]->disabled);
  110. }
  111. static void notrace __wakeup_reset(struct trace_array *tr)
  112. {
  113. struct trace_array_cpu *data;
  114. int cpu;
  115. assert_spin_locked(&wakeup_lock);
  116. for_each_possible_cpu(cpu) {
  117. data = tr->data[cpu];
  118. tracing_reset(data);
  119. }
  120. wakeup_cpu = -1;
  121. wakeup_prio = -1;
  122. if (wakeup_task)
  123. put_task_struct(wakeup_task);
  124. wakeup_task = NULL;
  125. }
  126. static void notrace wakeup_reset(struct trace_array *tr)
  127. {
  128. unsigned long flags;
  129. spin_lock_irqsave(&wakeup_lock, flags);
  130. __wakeup_reset(tr);
  131. spin_unlock_irqrestore(&wakeup_lock, flags);
  132. }
  133. static notrace void
  134. wakeup_check_start(struct trace_array *tr, struct task_struct *p,
  135. struct task_struct *curr)
  136. {
  137. int cpu = smp_processor_id();
  138. unsigned long flags;
  139. long disabled;
  140. if (likely(!rt_task(p)) ||
  141. p->prio >= wakeup_prio ||
  142. p->prio >= curr->prio)
  143. return;
  144. disabled = atomic_inc_return(&tr->data[cpu]->disabled);
  145. if (unlikely(disabled != 1))
  146. goto out;
  147. /* interrupts should be off from try_to_wake_up */
  148. spin_lock(&wakeup_lock);
  149. /* check for races. */
  150. if (!tracer_enabled || p->prio >= wakeup_prio)
  151. goto out_locked;
  152. /* reset the trace */
  153. __wakeup_reset(tr);
  154. wakeup_cpu = task_cpu(p);
  155. wakeup_prio = p->prio;
  156. wakeup_task = p;
  157. get_task_struct(wakeup_task);
  158. local_save_flags(flags);
  159. tr->data[wakeup_cpu]->preempt_timestamp = now(cpu);
  160. ftrace(tr, tr->data[wakeup_cpu], CALLER_ADDR1, CALLER_ADDR2, flags);
  161. out_locked:
  162. spin_unlock(&wakeup_lock);
  163. out:
  164. atomic_dec(&tr->data[cpu]->disabled);
  165. }
  166. notrace void
  167. ftrace_wake_up_task(struct task_struct *wakee, struct task_struct *curr)
  168. {
  169. if (likely(!tracer_enabled))
  170. return;
  171. wakeup_check_start(wakeup_trace, wakee, curr);
  172. }
  173. notrace void
  174. ftrace_wake_up_new_task(struct task_struct *wakee, struct task_struct *curr)
  175. {
  176. if (likely(!tracer_enabled))
  177. return;
  178. wakeup_check_start(wakeup_trace, wakee, curr);
  179. }
  180. static notrace void start_wakeup_tracer(struct trace_array *tr)
  181. {
  182. wakeup_reset(tr);
  183. /*
  184. * Don't let the tracer_enabled = 1 show up before
  185. * the wakeup_task is reset. This may be overkill since
  186. * wakeup_reset does a spin_unlock after setting the
  187. * wakeup_task to NULL, but I want to be safe.
  188. * This is a slow path anyway.
  189. */
  190. smp_wmb();
  191. tracer_enabled = 1;
  192. return;
  193. }
  194. static notrace void stop_wakeup_tracer(struct trace_array *tr)
  195. {
  196. tracer_enabled = 0;
  197. }
  198. static notrace void wakeup_tracer_init(struct trace_array *tr)
  199. {
  200. wakeup_trace = tr;
  201. if (tr->ctrl)
  202. start_wakeup_tracer(tr);
  203. }
  204. static notrace void wakeup_tracer_reset(struct trace_array *tr)
  205. {
  206. if (tr->ctrl) {
  207. stop_wakeup_tracer(tr);
  208. /* make sure we put back any tasks we are tracing */
  209. wakeup_reset(tr);
  210. }
  211. }
  212. static void wakeup_tracer_ctrl_update(struct trace_array *tr)
  213. {
  214. if (tr->ctrl)
  215. start_wakeup_tracer(tr);
  216. else
  217. stop_wakeup_tracer(tr);
  218. }
  219. static void notrace wakeup_tracer_open(struct trace_iterator *iter)
  220. {
  221. /* stop the trace while dumping */
  222. if (iter->tr->ctrl)
  223. stop_wakeup_tracer(iter->tr);
  224. }
  225. static void notrace wakeup_tracer_close(struct trace_iterator *iter)
  226. {
  227. /* forget about any processes we were recording */
  228. if (iter->tr->ctrl)
  229. start_wakeup_tracer(iter->tr);
  230. }
  231. static struct tracer wakeup_tracer __read_mostly =
  232. {
  233. .name = "wakeup",
  234. .init = wakeup_tracer_init,
  235. .reset = wakeup_tracer_reset,
  236. .open = wakeup_tracer_open,
  237. .close = wakeup_tracer_close,
  238. .ctrl_update = wakeup_tracer_ctrl_update,
  239. .print_max = 1,
  240. };
  241. __init static int init_wakeup_tracer(void)
  242. {
  243. int ret;
  244. ret = register_tracer(&wakeup_tracer);
  245. if (ret)
  246. return ret;
  247. return 0;
  248. }
  249. device_initcall(init_wakeup_tracer);