trace_sched_switch.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/ftrace.h>
  13. #include <trace/sched.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static int sched_ref;
  18. static DEFINE_MUTEX(sched_register_mutex);
  19. static void
  20. probe_sched_switch(struct rq *__rq, struct task_struct *prev,
  21. struct task_struct *next)
  22. {
  23. struct trace_array_cpu *data;
  24. unsigned long flags;
  25. int cpu;
  26. int pc;
  27. if (!sched_ref)
  28. return;
  29. tracing_record_cmdline(prev);
  30. tracing_record_cmdline(next);
  31. if (!tracer_enabled)
  32. return;
  33. pc = preempt_count();
  34. local_irq_save(flags);
  35. cpu = raw_smp_processor_id();
  36. data = ctx_trace->data[cpu];
  37. if (likely(!atomic_read(&data->disabled)))
  38. tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
  39. local_irq_restore(flags);
  40. }
  41. static void
  42. probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
  43. {
  44. struct trace_array_cpu *data;
  45. unsigned long flags;
  46. int cpu, pc;
  47. if (!likely(tracer_enabled))
  48. return;
  49. pc = preempt_count();
  50. tracing_record_cmdline(current);
  51. local_irq_save(flags);
  52. cpu = raw_smp_processor_id();
  53. data = ctx_trace->data[cpu];
  54. if (likely(!atomic_read(&data->disabled)))
  55. tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
  56. flags, pc);
  57. local_irq_restore(flags);
  58. }
  59. static void sched_switch_reset(struct trace_array *tr)
  60. {
  61. int cpu;
  62. tr->time_start = ftrace_now(tr->cpu);
  63. for_each_online_cpu(cpu)
  64. tracing_reset(tr, cpu);
  65. }
  66. static int tracing_sched_register(void)
  67. {
  68. int ret;
  69. ret = register_trace_sched_wakeup(probe_sched_wakeup);
  70. if (ret) {
  71. pr_info("wakeup trace: Couldn't activate tracepoint"
  72. " probe to kernel_sched_wakeup\n");
  73. return ret;
  74. }
  75. ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
  76. if (ret) {
  77. pr_info("wakeup trace: Couldn't activate tracepoint"
  78. " probe to kernel_sched_wakeup_new\n");
  79. goto fail_deprobe;
  80. }
  81. ret = register_trace_sched_switch(probe_sched_switch);
  82. if (ret) {
  83. pr_info("sched trace: Couldn't activate tracepoint"
  84. " probe to kernel_sched_schedule\n");
  85. goto fail_deprobe_wake_new;
  86. }
  87. return ret;
  88. fail_deprobe_wake_new:
  89. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  90. fail_deprobe:
  91. unregister_trace_sched_wakeup(probe_sched_wakeup);
  92. return ret;
  93. }
  94. static void tracing_sched_unregister(void)
  95. {
  96. unregister_trace_sched_switch(probe_sched_switch);
  97. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  98. unregister_trace_sched_wakeup(probe_sched_wakeup);
  99. }
  100. static void tracing_start_sched_switch(void)
  101. {
  102. mutex_lock(&sched_register_mutex);
  103. if (!(sched_ref++)) {
  104. tracer_enabled = 1;
  105. tracing_sched_register();
  106. }
  107. mutex_unlock(&sched_register_mutex);
  108. }
  109. static void tracing_stop_sched_switch(void)
  110. {
  111. mutex_lock(&sched_register_mutex);
  112. if (!(--sched_ref)) {
  113. tracing_sched_unregister();
  114. tracer_enabled = 0;
  115. }
  116. mutex_unlock(&sched_register_mutex);
  117. }
  118. void tracing_start_cmdline_record(void)
  119. {
  120. tracing_start_sched_switch();
  121. }
  122. void tracing_stop_cmdline_record(void)
  123. {
  124. tracing_stop_sched_switch();
  125. }
  126. static void start_sched_trace(struct trace_array *tr)
  127. {
  128. sched_switch_reset(tr);
  129. tracing_start_cmdline_record();
  130. }
  131. static void stop_sched_trace(struct trace_array *tr)
  132. {
  133. tracing_stop_cmdline_record();
  134. }
  135. static void sched_switch_trace_init(struct trace_array *tr)
  136. {
  137. ctx_trace = tr;
  138. if (tr->ctrl)
  139. start_sched_trace(tr);
  140. }
  141. static void sched_switch_trace_reset(struct trace_array *tr)
  142. {
  143. if (tr->ctrl && sched_ref)
  144. stop_sched_trace(tr);
  145. }
  146. static void sched_switch_trace_ctrl_update(struct trace_array *tr)
  147. {
  148. /* When starting a new trace, reset the buffers */
  149. if (tr->ctrl)
  150. start_sched_trace(tr);
  151. else
  152. stop_sched_trace(tr);
  153. }
  154. struct tracer sched_switch_trace __read_mostly =
  155. {
  156. .name = "sched_switch",
  157. .init = sched_switch_trace_init,
  158. .reset = sched_switch_trace_reset,
  159. .ctrl_update = sched_switch_trace_ctrl_update,
  160. #ifdef CONFIG_FTRACE_SELFTEST
  161. .selftest = trace_selftest_startup_sched_switch,
  162. #endif
  163. };
  164. __init static int init_sched_switch_trace(void)
  165. {
  166. return register_tracer(&sched_switch_trace);
  167. }
  168. device_initcall(init_sched_switch_trace);