trace_sched_switch.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/ftrace.h>
  13. #include <trace/sched.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static atomic_t sched_ref;
  18. static void
  19. probe_sched_switch(struct rq *__rq, struct task_struct *prev,
  20. struct task_struct *next)
  21. {
  22. struct trace_array_cpu *data;
  23. unsigned long flags;
  24. long disabled;
  25. int cpu;
  26. if (!atomic_read(&sched_ref))
  27. return;
  28. tracing_record_cmdline(prev);
  29. tracing_record_cmdline(next);
  30. if (!tracer_enabled)
  31. return;
  32. local_irq_save(flags);
  33. cpu = raw_smp_processor_id();
  34. data = ctx_trace->data[cpu];
  35. disabled = atomic_inc_return(&data->disabled);
  36. if (likely(disabled == 1))
  37. tracing_sched_switch_trace(ctx_trace, data, prev, next, flags);
  38. atomic_dec(&data->disabled);
  39. local_irq_restore(flags);
  40. }
  41. static void
  42. probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
  43. {
  44. struct trace_array_cpu *data;
  45. unsigned long flags;
  46. long disabled;
  47. int cpu;
  48. if (!likely(tracer_enabled))
  49. return;
  50. tracing_record_cmdline(current);
  51. local_irq_save(flags);
  52. cpu = raw_smp_processor_id();
  53. data = ctx_trace->data[cpu];
  54. disabled = atomic_inc_return(&data->disabled);
  55. if (likely(disabled == 1))
  56. tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
  57. flags);
  58. atomic_dec(&data->disabled);
  59. local_irq_restore(flags);
  60. }
  61. static void sched_switch_reset(struct trace_array *tr)
  62. {
  63. int cpu;
  64. tr->time_start = ftrace_now(tr->cpu);
  65. for_each_online_cpu(cpu)
  66. tracing_reset(tr, cpu);
  67. }
  68. static int tracing_sched_register(void)
  69. {
  70. int ret;
  71. ret = register_trace_sched_wakeup(probe_sched_wakeup);
  72. if (ret) {
  73. pr_info("wakeup trace: Couldn't activate tracepoint"
  74. " probe to kernel_sched_wakeup\n");
  75. return ret;
  76. }
  77. ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
  78. if (ret) {
  79. pr_info("wakeup trace: Couldn't activate tracepoint"
  80. " probe to kernel_sched_wakeup_new\n");
  81. goto fail_deprobe;
  82. }
  83. ret = register_trace_sched_switch(probe_sched_switch);
  84. if (ret) {
  85. pr_info("sched trace: Couldn't activate tracepoint"
  86. " probe to kernel_sched_schedule\n");
  87. goto fail_deprobe_wake_new;
  88. }
  89. return ret;
  90. fail_deprobe_wake_new:
  91. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  92. fail_deprobe:
  93. unregister_trace_sched_wakeup(probe_sched_wakeup);
  94. return ret;
  95. }
  96. static void tracing_sched_unregister(void)
  97. {
  98. unregister_trace_sched_switch(probe_sched_switch);
  99. unregister_trace_sched_wakeup_new(probe_sched_wakeup);
  100. unregister_trace_sched_wakeup(probe_sched_wakeup);
  101. }
  102. static void tracing_start_sched_switch(void)
  103. {
  104. long ref;
  105. ref = atomic_inc_return(&sched_ref);
  106. if (ref == 1)
  107. tracing_sched_register();
  108. }
  109. static void tracing_stop_sched_switch(void)
  110. {
  111. long ref;
  112. ref = atomic_dec_and_test(&sched_ref);
  113. if (ref)
  114. tracing_sched_unregister();
  115. }
  116. void tracing_start_cmdline_record(void)
  117. {
  118. tracing_start_sched_switch();
  119. }
  120. void tracing_stop_cmdline_record(void)
  121. {
  122. tracing_stop_sched_switch();
  123. }
  124. static void start_sched_trace(struct trace_array *tr)
  125. {
  126. sched_switch_reset(tr);
  127. tracing_start_cmdline_record();
  128. tracer_enabled = 1;
  129. }
  130. static void stop_sched_trace(struct trace_array *tr)
  131. {
  132. tracer_enabled = 0;
  133. tracing_stop_cmdline_record();
  134. }
  135. static void sched_switch_trace_init(struct trace_array *tr)
  136. {
  137. ctx_trace = tr;
  138. if (tr->ctrl)
  139. start_sched_trace(tr);
  140. }
  141. static void sched_switch_trace_reset(struct trace_array *tr)
  142. {
  143. if (tr->ctrl)
  144. stop_sched_trace(tr);
  145. }
  146. static void sched_switch_trace_ctrl_update(struct trace_array *tr)
  147. {
  148. /* When starting a new trace, reset the buffers */
  149. if (tr->ctrl)
  150. start_sched_trace(tr);
  151. else
  152. stop_sched_trace(tr);
  153. }
  154. static struct tracer sched_switch_trace __read_mostly =
  155. {
  156. .name = "sched_switch",
  157. .init = sched_switch_trace_init,
  158. .reset = sched_switch_trace_reset,
  159. .ctrl_update = sched_switch_trace_ctrl_update,
  160. #ifdef CONFIG_FTRACE_SELFTEST
  161. .selftest = trace_selftest_startup_sched_switch,
  162. #endif
  163. };
  164. __init static int init_sched_switch_trace(void)
  165. {
  166. int ret = 0;
  167. if (atomic_read(&sched_ref))
  168. ret = tracing_sched_register();
  169. if (ret) {
  170. pr_info("error registering scheduler trace\n");
  171. return ret;
  172. }
  173. return register_tracer(&sched_switch_trace);
  174. }
  175. device_initcall(init_sched_switch_trace);