trace_sched_switch.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/marker.h>
  13. #include <linux/ftrace.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static void
  18. ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
  19. {
  20. struct trace_array *tr = ctx_trace;
  21. struct trace_array_cpu *data;
  22. unsigned long flags;
  23. long disabled;
  24. int cpu;
  25. if (!tracer_enabled)
  26. return;
  27. tracing_record_cmdline(prev);
  28. local_irq_save(flags);
  29. cpu = raw_smp_processor_id();
  30. data = tr->data[cpu];
  31. disabled = atomic_inc_return(&data->disabled);
  32. if (likely(disabled == 1))
  33. tracing_sched_switch_trace(tr, data, prev, next, flags);
  34. atomic_dec(&data->disabled);
  35. local_irq_restore(flags);
  36. }
  37. static void
  38. wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
  39. {
  40. struct trace_array *tr = ctx_trace;
  41. struct trace_array_cpu *data;
  42. unsigned long flags;
  43. long disabled;
  44. int cpu;
  45. if (!tracer_enabled)
  46. return;
  47. tracing_record_cmdline(curr);
  48. local_irq_save(flags);
  49. cpu = raw_smp_processor_id();
  50. data = tr->data[cpu];
  51. disabled = atomic_inc_return(&data->disabled);
  52. if (likely(disabled == 1))
  53. tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
  54. atomic_dec(&data->disabled);
  55. local_irq_restore(flags);
  56. }
  57. void
  58. ftrace_ctx_switch(void *__rq, struct task_struct *prev,
  59. struct task_struct *next)
  60. {
  61. /*
  62. * If tracer_switch_func only points to the local
  63. * switch func, it still needs the ptr passed to it.
  64. */
  65. ctx_switch_func(__rq, prev, next);
  66. /*
  67. * Chain to the wakeup tracer (this is a NOP if disabled):
  68. */
  69. wakeup_sched_switch(prev, next);
  70. }
  71. void
  72. ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
  73. struct task_struct *curr)
  74. {
  75. wakeup_func(__rq, wakee, curr);
  76. /*
  77. * Chain to the wakeup tracer (this is a NOP if disabled):
  78. */
  79. wakeup_sched_wakeup(wakee, curr);
  80. }
  81. void
  82. ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
  83. {
  84. struct trace_array *tr = ctx_trace;
  85. struct trace_array_cpu *data;
  86. unsigned long flags;
  87. long disabled;
  88. int cpu;
  89. if (!tracer_enabled)
  90. return;
  91. local_irq_save(flags);
  92. cpu = raw_smp_processor_id();
  93. data = tr->data[cpu];
  94. disabled = atomic_inc_return(&data->disabled);
  95. if (likely(disabled == 1))
  96. __trace_special(tr, data, arg1, arg2, arg3);
  97. atomic_dec(&data->disabled);
  98. local_irq_restore(flags);
  99. }
  100. static void sched_switch_reset(struct trace_array *tr)
  101. {
  102. int cpu;
  103. tr->time_start = ftrace_now(tr->cpu);
  104. for_each_online_cpu(cpu)
  105. tracing_reset(tr->data[cpu]);
  106. }
  107. static void start_sched_trace(struct trace_array *tr)
  108. {
  109. sched_switch_reset(tr);
  110. tracer_enabled = 1;
  111. }
  112. static void stop_sched_trace(struct trace_array *tr)
  113. {
  114. tracer_enabled = 0;
  115. }
  116. static void sched_switch_trace_init(struct trace_array *tr)
  117. {
  118. ctx_trace = tr;
  119. if (tr->ctrl)
  120. start_sched_trace(tr);
  121. }
  122. static void sched_switch_trace_reset(struct trace_array *tr)
  123. {
  124. if (tr->ctrl)
  125. stop_sched_trace(tr);
  126. }
  127. static void sched_switch_trace_ctrl_update(struct trace_array *tr)
  128. {
  129. /* When starting a new trace, reset the buffers */
  130. if (tr->ctrl)
  131. start_sched_trace(tr);
  132. else
  133. stop_sched_trace(tr);
  134. }
  135. static struct tracer sched_switch_trace __read_mostly =
  136. {
  137. .name = "sched_switch",
  138. .init = sched_switch_trace_init,
  139. .reset = sched_switch_trace_reset,
  140. .ctrl_update = sched_switch_trace_ctrl_update,
  141. #ifdef CONFIG_FTRACE_SELFTEST
  142. .selftest = trace_selftest_startup_sched_switch,
  143. #endif
  144. };
  145. __init static int init_sched_switch_trace(void)
  146. {
  147. return register_tracer(&sched_switch_trace);
  148. }
  149. device_initcall(init_sched_switch_trace);