trace_sched_switch.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /*
  2. * trace context switch
  3. *
  4. * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/marker.h>
  13. #include <linux/ftrace.h>
  14. #include "trace.h"
  15. static struct trace_array *ctx_trace;
  16. static int __read_mostly tracer_enabled;
  17. static void notrace
  18. ctx_switch_func(struct task_struct *prev, struct task_struct *next)
  19. {
  20. struct trace_array *tr = ctx_trace;
  21. struct trace_array_cpu *data;
  22. unsigned long flags;
  23. long disabled;
  24. int cpu;
  25. if (!tracer_enabled)
  26. return;
  27. local_irq_save(flags);
  28. cpu = raw_smp_processor_id();
  29. data = tr->data[cpu];
  30. disabled = atomic_inc_return(&data->disabled);
  31. if (likely(disabled == 1))
  32. tracing_sched_switch_trace(tr, data, prev, next, flags);
  33. atomic_dec(&data->disabled);
  34. local_irq_restore(flags);
  35. }
  36. void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)
  37. {
  38. tracing_record_cmdline(prev);
  39. /*
  40. * If tracer_switch_func only points to the local
  41. * switch func, it still needs the ptr passed to it.
  42. */
  43. ctx_switch_func(prev, next);
  44. /*
  45. * Chain to the wakeup tracer (this is a NOP if disabled):
  46. */
  47. wakeup_sched_switch(prev, next);
  48. }
  49. static notrace void sched_switch_reset(struct trace_array *tr)
  50. {
  51. int cpu;
  52. tr->time_start = now(tr->cpu);
  53. for_each_online_cpu(cpu)
  54. tracing_reset(tr->data[cpu]);
  55. }
  56. static notrace void start_sched_trace(struct trace_array *tr)
  57. {
  58. sched_switch_reset(tr);
  59. tracer_enabled = 1;
  60. }
  61. static notrace void stop_sched_trace(struct trace_array *tr)
  62. {
  63. tracer_enabled = 0;
  64. }
  65. static notrace void sched_switch_trace_init(struct trace_array *tr)
  66. {
  67. ctx_trace = tr;
  68. if (tr->ctrl)
  69. start_sched_trace(tr);
  70. }
  71. static notrace void sched_switch_trace_reset(struct trace_array *tr)
  72. {
  73. if (tr->ctrl)
  74. stop_sched_trace(tr);
  75. }
  76. static void sched_switch_trace_ctrl_update(struct trace_array *tr)
  77. {
  78. /* When starting a new trace, reset the buffers */
  79. if (tr->ctrl)
  80. start_sched_trace(tr);
  81. else
  82. stop_sched_trace(tr);
  83. }
  84. static struct tracer sched_switch_trace __read_mostly =
  85. {
  86. .name = "sched_switch",
  87. .init = sched_switch_trace_init,
  88. .reset = sched_switch_trace_reset,
  89. .ctrl_update = sched_switch_trace_ctrl_update,
  90. };
  91. __init static int init_sched_switch_trace(void)
  92. {
  93. return register_tracer(&sched_switch_trace);
  94. }
  95. device_initcall(init_sched_switch_trace);