trace_sched_switch.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/ftrace.h>
  13. #include <trace/sched.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static int sched_ref;
  18. static DEFINE_MUTEX(sched_register_mutex);
  19. static void
  20. probe_sched_switch(struct rq *__rq, struct task_struct *prev,
  21. struct task_struct *next)
  22. {
  23. struct trace_array_cpu *data;
  24. unsigned long flags;
  25. int cpu;
  26. int pc;
  27. if (!sched_ref)
  28. return;
  29. tracing_record_cmdline(prev);
  30. tracing_record_cmdline(next);
  31. if (!tracer_enabled)
  32. return;
  33. pc = preempt_count();
  34. local_irq_save(flags);
  35. cpu = raw_smp_processor_id();
  36. data = ctx_trace->data[cpu];
  37. if (likely(!atomic_read(&data->disabled)))
  38. tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
  39. local_irq_restore(flags);
  40. }
  41. static void
  42. probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
  43. {
  44. struct trace_array_cpu *data;
  45. unsigned long flags;
  46. int cpu, pc;
  47. if (!likely(tracer_enabled))
  48. return;
  49. pc = preempt_count();
  50. tracing_record_cmdline(current);
  51. local_irq_save(flags);
  52. cpu = raw_smp_processor_id();
  53. data = ctx_trace->data[cpu];
  54. if (likely(!atomic_read(&data->disabled)))
  55. tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
  56. flags, pc);
  57. local_irq_restore(flags);
  58. }
  59. static int tracing_sched_register(void)
  60. {
  61. int ret;
  62. ret = register_trace_sched_wakeup(probe_sched_wakeup);
  63. if (ret) {
  64. pr_info("wakeup trace: Couldn't activate tracepoint"
  65. " probe to kernel_sched_wakeup\n");
  66. return ret;
  67. }
  68. ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
  69. if (ret) {
  70. pr_info("wakeup trace: Couldn't activate tracepoint"
  71. " probe to kernel_sched_wakeup_new\n");
  72. goto fail_deprobe;
  73. }
  74. ret = register_trace_sched_switch(probe_sched_switch);
  75. if (ret) {
  76. pr_info("sched trace: Couldn't activate tracepoint"
  77. " probe to kernel_sched_schedule\n");
  78. goto fail_deprobe_wake_new;
  79. }
  80. return ret;
  81. fail_deprobe_wake_new:
  82. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  83. fail_deprobe:
  84. unregister_trace_sched_wakeup(probe_sched_wakeup);
  85. return ret;
  86. }
  87. static void tracing_sched_unregister(void)
  88. {
  89. unregister_trace_sched_switch(probe_sched_switch);
  90. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  91. unregister_trace_sched_wakeup(probe_sched_wakeup);
  92. }
  93. static void tracing_start_sched_switch(void)
  94. {
  95. mutex_lock(&sched_register_mutex);
  96. if (!(sched_ref++))
  97. tracing_sched_register();
  98. mutex_unlock(&sched_register_mutex);
  99. }
  100. static void tracing_stop_sched_switch(void)
  101. {
  102. mutex_lock(&sched_register_mutex);
  103. if (!(--sched_ref))
  104. tracing_sched_unregister();
  105. mutex_unlock(&sched_register_mutex);
  106. }
  107. void tracing_start_cmdline_record(void)
  108. {
  109. tracing_start_sched_switch();
  110. }
  111. void tracing_stop_cmdline_record(void)
  112. {
  113. tracing_stop_sched_switch();
  114. }
  115. /**
  116. * tracing_start_sched_switch_record - start tracing context switches
  117. *
  118. * Turns on context switch tracing for a tracer.
  119. */
  120. void tracing_start_sched_switch_record(void)
  121. {
  122. if (unlikely(!ctx_trace)) {
  123. WARN_ON(1);
  124. return;
  125. }
  126. tracing_start_sched_switch();
  127. mutex_lock(&sched_register_mutex);
  128. tracer_enabled++;
  129. mutex_unlock(&sched_register_mutex);
  130. }
  131. /**
  132. * tracing_stop_sched_switch_record - start tracing context switches
  133. *
  134. * Turns off context switch tracing for a tracer.
  135. */
  136. void tracing_stop_sched_switch_record(void)
  137. {
  138. mutex_lock(&sched_register_mutex);
  139. tracer_enabled--;
  140. WARN_ON(tracer_enabled < 0);
  141. mutex_unlock(&sched_register_mutex);
  142. tracing_stop_sched_switch();
  143. }
  144. /**
  145. * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
  146. * @tr: trace array pointer to assign
  147. *
  148. * Some tracers might want to record the context switches in their
  149. * trace. This function lets those tracers assign the trace array
  150. * to use.
  151. */
  152. void tracing_sched_switch_assign_trace(struct trace_array *tr)
  153. {
  154. ctx_trace = tr;
  155. }
  156. static void start_sched_trace(struct trace_array *tr)
  157. {
  158. tracing_reset_online_cpus(tr);
  159. tracing_start_sched_switch_record();
  160. }
  161. static void stop_sched_trace(struct trace_array *tr)
  162. {
  163. tracing_stop_sched_switch_record();
  164. }
  165. static int sched_switch_trace_init(struct trace_array *tr)
  166. {
  167. ctx_trace = tr;
  168. start_sched_trace(tr);
  169. return 0;
  170. }
  171. static void sched_switch_trace_reset(struct trace_array *tr)
  172. {
  173. if (sched_ref)
  174. stop_sched_trace(tr);
  175. }
  176. static void sched_switch_trace_start(struct trace_array *tr)
  177. {
  178. tracing_reset_online_cpus(tr);
  179. tracing_start_sched_switch();
  180. }
  181. static void sched_switch_trace_stop(struct trace_array *tr)
  182. {
  183. tracing_stop_sched_switch();
  184. }
  185. static struct tracer sched_switch_trace __read_mostly =
  186. {
  187. .name = "sched_switch",
  188. .init = sched_switch_trace_init,
  189. .reset = sched_switch_trace_reset,
  190. .start = sched_switch_trace_start,
  191. .stop = sched_switch_trace_stop,
  192. #ifdef CONFIG_FTRACE_SELFTEST
  193. .selftest = trace_selftest_startup_sched_switch,
  194. #endif
  195. };
  196. __init static int init_sched_switch_trace(void)
  197. {
  198. return register_tracer(&sched_switch_trace);
  199. }
  200. device_initcall(init_sched_switch_trace);