trace_sched_switch.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/ftrace.h>
  13. #include <trace/sched.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static atomic_t sched_ref;
  18. static DEFINE_MUTEX(tracepoint_mutex);
  19. static void
  20. probe_sched_switch(struct rq *__rq, struct task_struct *prev,
  21. struct task_struct *next)
  22. {
  23. struct trace_array_cpu *data;
  24. unsigned long flags;
  25. int cpu;
  26. int pc;
  27. if (!atomic_read(&sched_ref))
  28. return;
  29. tracing_record_cmdline(prev);
  30. tracing_record_cmdline(next);
  31. if (!tracer_enabled)
  32. return;
  33. pc = preempt_count();
  34. local_irq_save(flags);
  35. cpu = raw_smp_processor_id();
  36. data = ctx_trace->data[cpu];
  37. if (likely(!atomic_read(&data->disabled)))
  38. tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
  39. local_irq_restore(flags);
  40. }
  41. static void
  42. probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
  43. {
  44. struct trace_array_cpu *data;
  45. unsigned long flags;
  46. int cpu, pc;
  47. if (!likely(tracer_enabled))
  48. return;
  49. pc = preempt_count();
  50. tracing_record_cmdline(current);
  51. local_irq_save(flags);
  52. cpu = raw_smp_processor_id();
  53. data = ctx_trace->data[cpu];
  54. if (likely(!atomic_read(&data->disabled)))
  55. tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
  56. flags, pc);
  57. local_irq_restore(flags);
  58. }
  59. static void sched_switch_reset(struct trace_array *tr)
  60. {
  61. int cpu;
  62. tr->time_start = ftrace_now(tr->cpu);
  63. for_each_online_cpu(cpu)
  64. tracing_reset(tr, cpu);
  65. }
  66. static int tracing_sched_register(void)
  67. {
  68. int ret;
  69. ret = register_trace_sched_wakeup(probe_sched_wakeup);
  70. if (ret) {
  71. pr_info("wakeup trace: Couldn't activate tracepoint"
  72. " probe to kernel_sched_wakeup\n");
  73. return ret;
  74. }
  75. ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
  76. if (ret) {
  77. pr_info("wakeup trace: Couldn't activate tracepoint"
  78. " probe to kernel_sched_wakeup_new\n");
  79. goto fail_deprobe;
  80. }
  81. ret = register_trace_sched_switch(probe_sched_switch);
  82. if (ret) {
  83. pr_info("sched trace: Couldn't activate tracepoint"
  84. " probe to kernel_sched_schedule\n");
  85. goto fail_deprobe_wake_new;
  86. }
  87. return ret;
  88. fail_deprobe_wake_new:
  89. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  90. fail_deprobe:
  91. unregister_trace_sched_wakeup(probe_sched_wakeup);
  92. return ret;
  93. }
  94. static void tracing_sched_unregister(void)
  95. {
  96. unregister_trace_sched_switch(probe_sched_switch);
  97. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  98. unregister_trace_sched_wakeup(probe_sched_wakeup);
  99. }
  100. static void tracing_start_sched_switch(void)
  101. {
  102. long ref;
  103. mutex_lock(&tracepoint_mutex);
  104. ref = atomic_inc_return(&sched_ref);
  105. if (ref == 1)
  106. tracing_sched_register();
  107. mutex_unlock(&tracepoint_mutex);
  108. }
  109. static void tracing_stop_sched_switch(void)
  110. {
  111. long ref;
  112. mutex_lock(&tracepoint_mutex);
  113. ref = atomic_dec_and_test(&sched_ref);
  114. if (ref)
  115. tracing_sched_unregister();
  116. mutex_unlock(&tracepoint_mutex);
  117. }
  118. void tracing_start_cmdline_record(void)
  119. {
  120. tracing_start_sched_switch();
  121. }
  122. void tracing_stop_cmdline_record(void)
  123. {
  124. tracing_stop_sched_switch();
  125. }
  126. static void start_sched_trace(struct trace_array *tr)
  127. {
  128. sched_switch_reset(tr);
  129. tracing_start_cmdline_record();
  130. tracer_enabled = 1;
  131. }
  132. static void stop_sched_trace(struct trace_array *tr)
  133. {
  134. tracer_enabled = 0;
  135. tracing_stop_cmdline_record();
  136. }
  137. static void sched_switch_trace_init(struct trace_array *tr)
  138. {
  139. ctx_trace = tr;
  140. if (tr->ctrl)
  141. start_sched_trace(tr);
  142. }
  143. static void sched_switch_trace_reset(struct trace_array *tr)
  144. {
  145. if (tr->ctrl)
  146. stop_sched_trace(tr);
  147. }
  148. static void sched_switch_trace_ctrl_update(struct trace_array *tr)
  149. {
  150. /* When starting a new trace, reset the buffers */
  151. if (tr->ctrl)
  152. start_sched_trace(tr);
  153. else
  154. stop_sched_trace(tr);
  155. }
  156. static struct tracer sched_switch_trace __read_mostly =
  157. {
  158. .name = "sched_switch",
  159. .init = sched_switch_trace_init,
  160. .reset = sched_switch_trace_reset,
  161. .ctrl_update = sched_switch_trace_ctrl_update,
  162. #ifdef CONFIG_FTRACE_SELFTEST
  163. .selftest = trace_selftest_startup_sched_switch,
  164. #endif
  165. };
  166. __init static int init_sched_switch_trace(void)
  167. {
  168. int ret = 0;
  169. if (atomic_read(&sched_ref))
  170. ret = tracing_sched_register();
  171. if (ret) {
  172. pr_info("error registering scheduler trace\n");
  173. return ret;
  174. }
  175. return register_tracer(&sched_switch_trace);
  176. }
  177. device_initcall(init_sched_switch_trace);