trace_sched_switch.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/ftrace.h>
  13. #include <trace/sched.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static int sched_ref;
  18. static DEFINE_MUTEX(sched_register_mutex);
  19. static int sched_stopped;
  20. static void
  21. probe_sched_switch(struct rq *__rq, struct task_struct *prev,
  22. struct task_struct *next)
  23. {
  24. struct trace_array_cpu *data;
  25. unsigned long flags;
  26. int cpu;
  27. int pc;
  28. if (!sched_ref || sched_stopped)
  29. return;
  30. tracing_record_cmdline(prev);
  31. tracing_record_cmdline(next);
  32. if (!tracer_enabled)
  33. return;
  34. pc = preempt_count();
  35. local_irq_save(flags);
  36. cpu = raw_smp_processor_id();
  37. data = ctx_trace->data[cpu];
  38. if (likely(!atomic_read(&data->disabled)))
  39. tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
  40. local_irq_restore(flags);
  41. }
  42. static void
  43. probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
  44. {
  45. struct trace_array_cpu *data;
  46. unsigned long flags;
  47. int cpu, pc;
  48. if (!likely(tracer_enabled))
  49. return;
  50. pc = preempt_count();
  51. tracing_record_cmdline(current);
  52. local_irq_save(flags);
  53. cpu = raw_smp_processor_id();
  54. data = ctx_trace->data[cpu];
  55. if (likely(!atomic_read(&data->disabled)))
  56. tracing_sched_wakeup_trace(ctx_trace, wakee, current,
  57. flags, pc);
  58. local_irq_restore(flags);
  59. }
  60. static int tracing_sched_register(void)
  61. {
  62. int ret;
  63. ret = register_trace_sched_wakeup(probe_sched_wakeup);
  64. if (ret) {
  65. pr_info("wakeup trace: Couldn't activate tracepoint"
  66. " probe to kernel_sched_wakeup\n");
  67. return ret;
  68. }
  69. ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
  70. if (ret) {
  71. pr_info("wakeup trace: Couldn't activate tracepoint"
  72. " probe to kernel_sched_wakeup_new\n");
  73. goto fail_deprobe;
  74. }
  75. ret = register_trace_sched_switch(probe_sched_switch);
  76. if (ret) {
  77. pr_info("sched trace: Couldn't activate tracepoint"
  78. " probe to kernel_sched_switch\n");
  79. goto fail_deprobe_wake_new;
  80. }
  81. return ret;
  82. fail_deprobe_wake_new:
  83. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  84. fail_deprobe:
  85. unregister_trace_sched_wakeup(probe_sched_wakeup);
  86. return ret;
  87. }
  88. static void tracing_sched_unregister(void)
  89. {
  90. unregister_trace_sched_switch(probe_sched_switch);
  91. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  92. unregister_trace_sched_wakeup(probe_sched_wakeup);
  93. }
  94. static void tracing_start_sched_switch(void)
  95. {
  96. mutex_lock(&sched_register_mutex);
  97. if (!(sched_ref++))
  98. tracing_sched_register();
  99. mutex_unlock(&sched_register_mutex);
  100. }
  101. static void tracing_stop_sched_switch(void)
  102. {
  103. mutex_lock(&sched_register_mutex);
  104. if (!(--sched_ref))
  105. tracing_sched_unregister();
  106. mutex_unlock(&sched_register_mutex);
  107. }
  108. void tracing_start_cmdline_record(void)
  109. {
  110. tracing_start_sched_switch();
  111. }
  112. void tracing_stop_cmdline_record(void)
  113. {
  114. tracing_stop_sched_switch();
  115. }
  116. /**
  117. * tracing_start_sched_switch_record - start tracing context switches
  118. *
  119. * Turns on context switch tracing for a tracer.
  120. */
  121. void tracing_start_sched_switch_record(void)
  122. {
  123. if (unlikely(!ctx_trace)) {
  124. WARN_ON(1);
  125. return;
  126. }
  127. tracing_start_sched_switch();
  128. mutex_lock(&sched_register_mutex);
  129. tracer_enabled++;
  130. mutex_unlock(&sched_register_mutex);
  131. }
  132. /**
  133. * tracing_stop_sched_switch_record - start tracing context switches
  134. *
  135. * Turns off context switch tracing for a tracer.
  136. */
  137. void tracing_stop_sched_switch_record(void)
  138. {
  139. mutex_lock(&sched_register_mutex);
  140. tracer_enabled--;
  141. WARN_ON(tracer_enabled < 0);
  142. mutex_unlock(&sched_register_mutex);
  143. tracing_stop_sched_switch();
  144. }
  145. /**
  146. * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
  147. * @tr: trace array pointer to assign
  148. *
  149. * Some tracers might want to record the context switches in their
  150. * trace. This function lets those tracers assign the trace array
  151. * to use.
  152. */
  153. void tracing_sched_switch_assign_trace(struct trace_array *tr)
  154. {
  155. ctx_trace = tr;
  156. }
  157. static void stop_sched_trace(struct trace_array *tr)
  158. {
  159. tracing_stop_sched_switch_record();
  160. }
  161. static int sched_switch_trace_init(struct trace_array *tr)
  162. {
  163. ctx_trace = tr;
  164. tracing_reset_online_cpus(tr);
  165. tracing_start_sched_switch_record();
  166. return 0;
  167. }
  168. static void sched_switch_trace_reset(struct trace_array *tr)
  169. {
  170. if (sched_ref)
  171. stop_sched_trace(tr);
  172. }
  173. static void sched_switch_trace_start(struct trace_array *tr)
  174. {
  175. sched_stopped = 0;
  176. }
  177. static void sched_switch_trace_stop(struct trace_array *tr)
  178. {
  179. sched_stopped = 1;
  180. }
  181. static struct tracer sched_switch_trace __read_mostly =
  182. {
  183. .name = "sched_switch",
  184. .init = sched_switch_trace_init,
  185. .reset = sched_switch_trace_reset,
  186. .start = sched_switch_trace_start,
  187. .stop = sched_switch_trace_stop,
  188. .wait_pipe = poll_wait_pipe,
  189. #ifdef CONFIG_FTRACE_SELFTEST
  190. .selftest = trace_selftest_startup_sched_switch,
  191. #endif
  192. };
  193. __init static int init_sched_switch_trace(void)
  194. {
  195. return register_tracer(&sched_switch_trace);
  196. }
  197. device_initcall(init_sched_switch_trace);