trace_sched_switch.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/ftrace.h>
  13. #include <trace/sched.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static atomic_t sched_ref;
  18. static void
  19. probe_sched_switch(struct rq *__rq, struct task_struct *prev,
  20. struct task_struct *next)
  21. {
  22. struct trace_array_cpu *data;
  23. unsigned long flags;
  24. int cpu;
  25. int pc;
  26. if (!atomic_read(&sched_ref))
  27. return;
  28. tracing_record_cmdline(prev);
  29. tracing_record_cmdline(next);
  30. if (!tracer_enabled)
  31. return;
  32. pc = preempt_count();
  33. local_irq_save(flags);
  34. cpu = raw_smp_processor_id();
  35. data = ctx_trace->data[cpu];
  36. if (likely(!atomic_read(&data->disabled)))
  37. tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
  38. local_irq_restore(flags);
  39. }
  40. static void
  41. probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
  42. {
  43. struct trace_array_cpu *data;
  44. unsigned long flags;
  45. int cpu, pc;
  46. if (!likely(tracer_enabled))
  47. return;
  48. pc = preempt_count();
  49. tracing_record_cmdline(current);
  50. local_irq_save(flags);
  51. cpu = raw_smp_processor_id();
  52. data = ctx_trace->data[cpu];
  53. if (likely(!atomic_read(&data->disabled)))
  54. tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
  55. flags, pc);
  56. local_irq_restore(flags);
  57. }
  58. static void sched_switch_reset(struct trace_array *tr)
  59. {
  60. int cpu;
  61. tr->time_start = ftrace_now(tr->cpu);
  62. for_each_online_cpu(cpu)
  63. tracing_reset(tr, cpu);
  64. }
  65. static int tracing_sched_register(void)
  66. {
  67. int ret;
  68. ret = register_trace_sched_wakeup(probe_sched_wakeup);
  69. if (ret) {
  70. pr_info("wakeup trace: Couldn't activate tracepoint"
  71. " probe to kernel_sched_wakeup\n");
  72. return ret;
  73. }
  74. ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
  75. if (ret) {
  76. pr_info("wakeup trace: Couldn't activate tracepoint"
  77. " probe to kernel_sched_wakeup_new\n");
  78. goto fail_deprobe;
  79. }
  80. ret = register_trace_sched_switch(probe_sched_switch);
  81. if (ret) {
  82. pr_info("sched trace: Couldn't activate tracepoint"
  83. " probe to kernel_sched_schedule\n");
  84. goto fail_deprobe_wake_new;
  85. }
  86. return ret;
  87. fail_deprobe_wake_new:
  88. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  89. fail_deprobe:
  90. unregister_trace_sched_wakeup(probe_sched_wakeup);
  91. return ret;
  92. }
  93. static void tracing_sched_unregister(void)
  94. {
  95. unregister_trace_sched_switch(probe_sched_switch);
  96. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  97. unregister_trace_sched_wakeup(probe_sched_wakeup);
  98. }
  99. static void tracing_start_sched_switch(void)
  100. {
  101. long ref;
  102. ref = atomic_inc_return(&sched_ref);
  103. if (ref == 1)
  104. tracing_sched_register();
  105. }
  106. static void tracing_stop_sched_switch(void)
  107. {
  108. long ref;
  109. ref = atomic_dec_and_test(&sched_ref);
  110. if (ref)
  111. tracing_sched_unregister();
  112. }
  113. void tracing_start_cmdline_record(void)
  114. {
  115. tracing_start_sched_switch();
  116. }
  117. void tracing_stop_cmdline_record(void)
  118. {
  119. tracing_stop_sched_switch();
  120. }
  121. static void start_sched_trace(struct trace_array *tr)
  122. {
  123. sched_switch_reset(tr);
  124. tracing_start_cmdline_record();
  125. tracer_enabled = 1;
  126. }
  127. static void stop_sched_trace(struct trace_array *tr)
  128. {
  129. tracer_enabled = 0;
  130. tracing_stop_cmdline_record();
  131. }
  132. static void sched_switch_trace_init(struct trace_array *tr)
  133. {
  134. ctx_trace = tr;
  135. if (tr->ctrl)
  136. start_sched_trace(tr);
  137. }
  138. static void sched_switch_trace_reset(struct trace_array *tr)
  139. {
  140. if (tr->ctrl)
  141. stop_sched_trace(tr);
  142. }
  143. static void sched_switch_trace_ctrl_update(struct trace_array *tr)
  144. {
  145. /* When starting a new trace, reset the buffers */
  146. if (tr->ctrl)
  147. start_sched_trace(tr);
  148. else
  149. stop_sched_trace(tr);
  150. }
  151. static struct tracer sched_switch_trace __read_mostly =
  152. {
  153. .name = "sched_switch",
  154. .init = sched_switch_trace_init,
  155. .reset = sched_switch_trace_reset,
  156. .ctrl_update = sched_switch_trace_ctrl_update,
  157. #ifdef CONFIG_FTRACE_SELFTEST
  158. .selftest = trace_selftest_startup_sched_switch,
  159. #endif
  160. };
  161. __init static int init_sched_switch_trace(void)
  162. {
  163. int ret = 0;
  164. if (atomic_read(&sched_ref))
  165. ret = tracing_sched_register();
  166. if (ret) {
  167. pr_info("error registering scheduler trace\n");
  168. return ret;
  169. }
  170. return register_tracer(&sched_switch_trace);
  171. }
  172. device_initcall(init_sched_switch_trace);