trace_sched_switch.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/ftrace.h>
  13. #include <trace/sched.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static int sched_ref;
  18. static DEFINE_MUTEX(sched_register_mutex);
  19. static void
  20. probe_sched_switch(struct rq *__rq, struct task_struct *prev,
  21. struct task_struct *next)
  22. {
  23. struct trace_array_cpu *data;
  24. unsigned long flags;
  25. int cpu;
  26. int pc;
  27. if (!sched_ref)
  28. return;
  29. tracing_record_cmdline(prev);
  30. tracing_record_cmdline(next);
  31. if (!tracer_enabled)
  32. return;
  33. pc = preempt_count();
  34. local_irq_save(flags);
  35. cpu = raw_smp_processor_id();
  36. data = ctx_trace->data[cpu];
  37. if (likely(!atomic_read(&data->disabled)))
  38. tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
  39. local_irq_restore(flags);
  40. }
  41. static void
  42. probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
  43. {
  44. struct trace_array_cpu *data;
  45. unsigned long flags;
  46. int cpu, pc;
  47. if (!likely(tracer_enabled))
  48. return;
  49. pc = preempt_count();
  50. tracing_record_cmdline(current);
  51. local_irq_save(flags);
  52. cpu = raw_smp_processor_id();
  53. data = ctx_trace->data[cpu];
  54. if (likely(!atomic_read(&data->disabled)))
  55. tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
  56. flags, pc);
  57. local_irq_restore(flags);
  58. }
  59. static void sched_switch_reset(struct trace_array *tr)
  60. {
  61. int cpu;
  62. tr->time_start = ftrace_now(tr->cpu);
  63. for_each_online_cpu(cpu)
  64. tracing_reset(tr, cpu);
  65. }
  66. static int tracing_sched_register(void)
  67. {
  68. int ret;
  69. ret = register_trace_sched_wakeup(probe_sched_wakeup);
  70. if (ret) {
  71. pr_info("wakeup trace: Couldn't activate tracepoint"
  72. " probe to kernel_sched_wakeup\n");
  73. return ret;
  74. }
  75. ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
  76. if (ret) {
  77. pr_info("wakeup trace: Couldn't activate tracepoint"
  78. " probe to kernel_sched_wakeup_new\n");
  79. goto fail_deprobe;
  80. }
  81. ret = register_trace_sched_switch(probe_sched_switch);
  82. if (ret) {
  83. pr_info("sched trace: Couldn't activate tracepoint"
  84. " probe to kernel_sched_schedule\n");
  85. goto fail_deprobe_wake_new;
  86. }
  87. return ret;
  88. fail_deprobe_wake_new:
  89. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  90. fail_deprobe:
  91. unregister_trace_sched_wakeup(probe_sched_wakeup);
  92. return ret;
  93. }
  94. static void tracing_sched_unregister(void)
  95. {
  96. unregister_trace_sched_switch(probe_sched_switch);
  97. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  98. unregister_trace_sched_wakeup(probe_sched_wakeup);
  99. }
  100. static void tracing_start_sched_switch(void)
  101. {
  102. mutex_lock(&sched_register_mutex);
  103. if (!(sched_ref++))
  104. tracing_sched_register();
  105. mutex_unlock(&sched_register_mutex);
  106. }
  107. static void tracing_stop_sched_switch(void)
  108. {
  109. mutex_lock(&sched_register_mutex);
  110. if (!(--sched_ref))
  111. tracing_sched_unregister();
  112. mutex_unlock(&sched_register_mutex);
  113. }
  114. void tracing_start_cmdline_record(void)
  115. {
  116. tracing_start_sched_switch();
  117. }
  118. void tracing_stop_cmdline_record(void)
  119. {
  120. tracing_stop_sched_switch();
  121. }
  122. /**
  123. * tracing_start_sched_switch_record - start tracing context switches
  124. *
  125. * Turns on context switch tracing for a tracer.
  126. */
  127. void tracing_start_sched_switch_record(void)
  128. {
  129. if (unlikely(!ctx_trace)) {
  130. WARN_ON(1);
  131. return;
  132. }
  133. tracing_start_sched_switch();
  134. mutex_lock(&sched_register_mutex);
  135. tracer_enabled++;
  136. mutex_unlock(&sched_register_mutex);
  137. }
  138. /**
  139. * tracing_stop_sched_switch_record - start tracing context switches
  140. *
  141. * Turns off context switch tracing for a tracer.
  142. */
  143. void tracing_stop_sched_switch_record(void)
  144. {
  145. mutex_lock(&sched_register_mutex);
  146. tracer_enabled--;
  147. WARN_ON(tracer_enabled < 0);
  148. mutex_unlock(&sched_register_mutex);
  149. tracing_stop_sched_switch();
  150. }
  151. /**
  152. * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
  153. * @tr: trace array pointer to assign
  154. *
  155. * Some tracers might want to record the context switches in their
  156. * trace. This function lets those tracers assign the trace array
  157. * to use.
  158. */
  159. void tracing_sched_switch_assign_trace(struct trace_array *tr)
  160. {
  161. ctx_trace = tr;
  162. }
  163. static void start_sched_trace(struct trace_array *tr)
  164. {
  165. sched_switch_reset(tr);
  166. tracing_start_sched_switch_record();
  167. }
  168. static void stop_sched_trace(struct trace_array *tr)
  169. {
  170. tracing_stop_sched_switch_record();
  171. }
  172. static void sched_switch_trace_init(struct trace_array *tr)
  173. {
  174. ctx_trace = tr;
  175. if (tr->ctrl)
  176. start_sched_trace(tr);
  177. }
  178. static void sched_switch_trace_reset(struct trace_array *tr)
  179. {
  180. if (tr->ctrl && sched_ref)
  181. stop_sched_trace(tr);
  182. }
  183. static void sched_switch_trace_start(struct trace_array *tr)
  184. {
  185. sched_switch_reset(tr);
  186. tracing_start_sched_switch();
  187. }
  188. static void sched_switch_trace_stop(struct trace_array *tr)
  189. {
  190. tracing_stop_sched_switch();
  191. }
  192. static struct tracer sched_switch_trace __read_mostly =
  193. {
  194. .name = "sched_switch",
  195. .init = sched_switch_trace_init,
  196. .reset = sched_switch_trace_reset,
  197. .start = sched_switch_trace_start,
  198. .stop = sched_switch_trace_stop,
  199. #ifdef CONFIG_FTRACE_SELFTEST
  200. .selftest = trace_selftest_startup_sched_switch,
  201. #endif
  202. };
  203. __init static int init_sched_switch_trace(void)
  204. {
  205. return register_tracer(&sched_switch_trace);
  206. }
  207. device_initcall(init_sched_switch_trace);