trace_sched_switch.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/marker.h>
  13. #include <linux/ftrace.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static void
  18. ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
  19. {
  20. struct trace_array *tr = ctx_trace;
  21. struct trace_array_cpu *data;
  22. unsigned long flags;
  23. long disabled;
  24. int cpu;
  25. if (!tracer_enabled)
  26. return;
  27. tracing_record_cmdline(prev);
  28. local_irq_save(flags);
  29. cpu = raw_smp_processor_id();
  30. data = tr->data[cpu];
  31. disabled = atomic_inc_return(&data->disabled);
  32. if (likely(disabled == 1)) {
  33. tracing_sched_switch_trace(tr, data, prev, next, flags);
  34. if (trace_flags & TRACE_ITER_SCHED_TREE)
  35. ftrace_all_fair_tasks(__rq, tr, data);
  36. }
  37. atomic_dec(&data->disabled);
  38. local_irq_restore(flags);
  39. }
  40. static void
  41. wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
  42. {
  43. struct trace_array *tr = ctx_trace;
  44. struct trace_array_cpu *data;
  45. unsigned long flags;
  46. long disabled;
  47. int cpu;
  48. if (!tracer_enabled)
  49. return;
  50. tracing_record_cmdline(curr);
  51. local_irq_save(flags);
  52. cpu = raw_smp_processor_id();
  53. data = tr->data[cpu];
  54. disabled = atomic_inc_return(&data->disabled);
  55. if (likely(disabled == 1)) {
  56. tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
  57. if (trace_flags & TRACE_ITER_SCHED_TREE)
  58. ftrace_all_fair_tasks(__rq, tr, data);
  59. }
  60. atomic_dec(&data->disabled);
  61. local_irq_restore(flags);
  62. }
  63. void
  64. ftrace_ctx_switch(void *__rq, struct task_struct *prev,
  65. struct task_struct *next)
  66. {
  67. /*
  68. * If tracer_switch_func only points to the local
  69. * switch func, it still needs the ptr passed to it.
  70. */
  71. ctx_switch_func(__rq, prev, next);
  72. /*
  73. * Chain to the wakeup tracer (this is a NOP if disabled):
  74. */
  75. wakeup_sched_switch(prev, next);
  76. }
  77. void
  78. ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
  79. struct task_struct *curr)
  80. {
  81. wakeup_func(__rq, wakee, curr);
  82. /*
  83. * Chain to the wakeup tracer (this is a NOP if disabled):
  84. */
  85. wakeup_sched_wakeup(wakee, curr);
  86. }
  87. void
  88. ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
  89. {
  90. struct trace_array *tr = ctx_trace;
  91. struct trace_array_cpu *data;
  92. unsigned long flags;
  93. long disabled;
  94. int cpu;
  95. if (!tracer_enabled)
  96. return;
  97. local_irq_save(flags);
  98. cpu = raw_smp_processor_id();
  99. data = tr->data[cpu];
  100. disabled = atomic_inc_return(&data->disabled);
  101. if (likely(disabled == 1))
  102. __trace_special(tr, data, arg1, arg2, arg3);
  103. atomic_dec(&data->disabled);
  104. local_irq_restore(flags);
  105. }
  106. static void sched_switch_reset(struct trace_array *tr)
  107. {
  108. int cpu;
  109. tr->time_start = ftrace_now(tr->cpu);
  110. for_each_online_cpu(cpu)
  111. tracing_reset(tr->data[cpu]);
  112. }
  113. static void start_sched_trace(struct trace_array *tr)
  114. {
  115. sched_switch_reset(tr);
  116. tracer_enabled = 1;
  117. }
  118. static void stop_sched_trace(struct trace_array *tr)
  119. {
  120. tracer_enabled = 0;
  121. }
  122. static void sched_switch_trace_init(struct trace_array *tr)
  123. {
  124. ctx_trace = tr;
  125. if (tr->ctrl)
  126. start_sched_trace(tr);
  127. }
  128. static void sched_switch_trace_reset(struct trace_array *tr)
  129. {
  130. if (tr->ctrl)
  131. stop_sched_trace(tr);
  132. }
  133. static void sched_switch_trace_ctrl_update(struct trace_array *tr)
  134. {
  135. /* When starting a new trace, reset the buffers */
  136. if (tr->ctrl)
  137. start_sched_trace(tr);
  138. else
  139. stop_sched_trace(tr);
  140. }
  141. static struct tracer sched_switch_trace __read_mostly =
  142. {
  143. .name = "sched_switch",
  144. .init = sched_switch_trace_init,
  145. .reset = sched_switch_trace_reset,
  146. .ctrl_update = sched_switch_trace_ctrl_update,
  147. #ifdef CONFIG_FTRACE_SELFTEST
  148. .selftest = trace_selftest_startup_sched_switch,
  149. #endif
  150. };
  151. __init static int init_sched_switch_trace(void)
  152. {
  153. return register_tracer(&sched_switch_trace);
  154. }
  155. device_initcall(init_sched_switch_trace);