trace_sched_switch.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/marker.h>
  13. #include <linux/ftrace.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static atomic_t sched_ref;
  18. static void
  19. sched_switch_func(void *private, void *__rq, struct task_struct *prev,
  20. struct task_struct *next)
  21. {
  22. struct trace_array **ptr = private;
  23. struct trace_array *tr = *ptr;
  24. struct trace_array_cpu *data;
  25. unsigned long flags;
  26. long disabled;
  27. int cpu;
  28. if (!tracer_enabled)
  29. return;
  30. local_irq_save(flags);
  31. cpu = raw_smp_processor_id();
  32. data = tr->data[cpu];
  33. disabled = atomic_inc_return(&data->disabled);
  34. if (likely(disabled == 1))
  35. tracing_sched_switch_trace(tr, data, prev, next, flags);
  36. atomic_dec(&data->disabled);
  37. local_irq_restore(flags);
  38. }
  39. static notrace void
  40. sched_switch_callback(void *probe_data, void *call_data,
  41. const char *format, va_list *args)
  42. {
  43. struct task_struct *prev;
  44. struct task_struct *next;
  45. struct rq *__rq;
  46. if (!atomic_read(&sched_ref))
  47. return;
  48. /* skip prev_pid %d next_pid %d prev_state %ld */
  49. (void)va_arg(*args, int);
  50. (void)va_arg(*args, int);
  51. (void)va_arg(*args, long);
  52. __rq = va_arg(*args, typeof(__rq));
  53. prev = va_arg(*args, typeof(prev));
  54. next = va_arg(*args, typeof(next));
  55. tracing_record_cmdline(prev);
  56. /*
  57. * If tracer_switch_func only points to the local
  58. * switch func, it still needs the ptr passed to it.
  59. */
  60. sched_switch_func(probe_data, __rq, prev, next);
  61. }
  62. static void
  63. wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
  64. task_struct *curr)
  65. {
  66. struct trace_array **ptr = private;
  67. struct trace_array *tr = *ptr;
  68. struct trace_array_cpu *data;
  69. unsigned long flags;
  70. long disabled;
  71. int cpu;
  72. if (!tracer_enabled)
  73. return;
  74. tracing_record_cmdline(curr);
  75. local_irq_save(flags);
  76. cpu = raw_smp_processor_id();
  77. data = tr->data[cpu];
  78. disabled = atomic_inc_return(&data->disabled);
  79. if (likely(disabled == 1))
  80. tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
  81. atomic_dec(&data->disabled);
  82. local_irq_restore(flags);
  83. }
  84. static notrace void
  85. wake_up_callback(void *probe_data, void *call_data,
  86. const char *format, va_list *args)
  87. {
  88. struct task_struct *curr;
  89. struct task_struct *task;
  90. struct rq *__rq;
  91. if (likely(!tracer_enabled))
  92. return;
  93. /* Skip pid %d state %ld */
  94. (void)va_arg(*args, int);
  95. (void)va_arg(*args, long);
  96. /* now get the meat: "rq %p task %p rq->curr %p" */
  97. __rq = va_arg(*args, typeof(__rq));
  98. task = va_arg(*args, typeof(task));
  99. curr = va_arg(*args, typeof(curr));
  100. tracing_record_cmdline(task);
  101. tracing_record_cmdline(curr);
  102. wakeup_func(probe_data, __rq, task, curr);
  103. }
  104. void
  105. ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
  106. {
  107. struct trace_array *tr = ctx_trace;
  108. struct trace_array_cpu *data;
  109. unsigned long flags;
  110. long disabled;
  111. int cpu;
  112. if (!tracer_enabled)
  113. return;
  114. local_irq_save(flags);
  115. cpu = raw_smp_processor_id();
  116. data = tr->data[cpu];
  117. disabled = atomic_inc_return(&data->disabled);
  118. if (likely(disabled == 1))
  119. __trace_special(tr, data, arg1, arg2, arg3);
  120. atomic_dec(&data->disabled);
  121. local_irq_restore(flags);
  122. }
  123. static void sched_switch_reset(struct trace_array *tr)
  124. {
  125. int cpu;
  126. tr->time_start = ftrace_now(tr->cpu);
  127. for_each_online_cpu(cpu)
  128. tracing_reset(tr->data[cpu]);
  129. }
  130. static int tracing_sched_register(void)
  131. {
  132. int ret;
  133. ret = marker_probe_register("kernel_sched_wakeup",
  134. "pid %d state %ld ## rq %p task %p rq->curr %p",
  135. wake_up_callback,
  136. &ctx_trace);
  137. if (ret) {
  138. pr_info("wakeup trace: Couldn't add marker"
  139. " probe to kernel_sched_wakeup\n");
  140. return ret;
  141. }
  142. ret = marker_probe_register("kernel_sched_wakeup_new",
  143. "pid %d state %ld ## rq %p task %p rq->curr %p",
  144. wake_up_callback,
  145. &ctx_trace);
  146. if (ret) {
  147. pr_info("wakeup trace: Couldn't add marker"
  148. " probe to kernel_sched_wakeup_new\n");
  149. goto fail_deprobe;
  150. }
  151. ret = marker_probe_register("kernel_sched_schedule",
  152. "prev_pid %d next_pid %d prev_state %ld "
  153. "## rq %p prev %p next %p",
  154. sched_switch_callback,
  155. &ctx_trace);
  156. if (ret) {
  157. pr_info("sched trace: Couldn't add marker"
  158. " probe to kernel_sched_schedule\n");
  159. goto fail_deprobe_wake_new;
  160. }
  161. return ret;
  162. fail_deprobe_wake_new:
  163. marker_probe_unregister("kernel_sched_wakeup_new",
  164. wake_up_callback,
  165. &ctx_trace);
  166. fail_deprobe:
  167. marker_probe_unregister("kernel_sched_wakeup",
  168. wake_up_callback,
  169. &ctx_trace);
  170. return ret;
  171. }
  172. static void tracing_sched_unregister(void)
  173. {
  174. marker_probe_unregister("kernel_sched_schedule",
  175. sched_switch_callback,
  176. &ctx_trace);
  177. marker_probe_unregister("kernel_sched_wakeup_new",
  178. wake_up_callback,
  179. &ctx_trace);
  180. marker_probe_unregister("kernel_sched_wakeup",
  181. wake_up_callback,
  182. &ctx_trace);
  183. }
  184. void tracing_start_sched_switch(void)
  185. {
  186. long ref;
  187. ref = atomic_inc_return(&sched_ref);
  188. if (ref == 1)
  189. tracing_sched_register();
  190. }
  191. void tracing_stop_sched_switch(void)
  192. {
  193. long ref;
  194. ref = atomic_dec_and_test(&sched_ref);
  195. if (ref)
  196. tracing_sched_unregister();
  197. }
  198. static void start_sched_trace(struct trace_array *tr)
  199. {
  200. sched_switch_reset(tr);
  201. atomic_inc(&trace_record_cmdline_enabled);
  202. tracer_enabled = 1;
  203. tracing_start_sched_switch();
  204. }
  205. static void stop_sched_trace(struct trace_array *tr)
  206. {
  207. tracing_stop_sched_switch();
  208. atomic_dec(&trace_record_cmdline_enabled);
  209. tracer_enabled = 0;
  210. }
  211. static void sched_switch_trace_init(struct trace_array *tr)
  212. {
  213. ctx_trace = tr;
  214. if (tr->ctrl)
  215. start_sched_trace(tr);
  216. }
  217. static void sched_switch_trace_reset(struct trace_array *tr)
  218. {
  219. if (tr->ctrl)
  220. stop_sched_trace(tr);
  221. }
  222. static void sched_switch_trace_ctrl_update(struct trace_array *tr)
  223. {
  224. /* When starting a new trace, reset the buffers */
  225. if (tr->ctrl)
  226. start_sched_trace(tr);
  227. else
  228. stop_sched_trace(tr);
  229. }
  230. static struct tracer sched_switch_trace __read_mostly =
  231. {
  232. .name = "sched_switch",
  233. .init = sched_switch_trace_init,
  234. .reset = sched_switch_trace_reset,
  235. .ctrl_update = sched_switch_trace_ctrl_update,
  236. #ifdef CONFIG_FTRACE_SELFTEST
  237. .selftest = trace_selftest_startup_sched_switch,
  238. #endif
  239. };
  240. __init static int init_sched_switch_trace(void)
  241. {
  242. int ret = 0;
  243. if (atomic_read(&sched_ref))
  244. ret = tracing_sched_register();
  245. if (ret) {
  246. pr_info("error registering scheduler trace\n");
  247. return ret;
  248. }
  249. return register_tracer(&sched_switch_trace);
  250. }
  251. device_initcall(init_sched_switch_trace);