|
@@ -16,11 +16,14 @@
|
|
|
|
|
|
static struct trace_array *ctx_trace;
|
|
|
static int __read_mostly tracer_enabled;
|
|
|
+static atomic_t sched_ref;
|
|
|
|
|
|
static void
|
|
|
-ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
|
|
|
+sched_switch_func(void *private, void *__rq, struct task_struct *prev,
|
|
|
+ struct task_struct *next)
|
|
|
{
|
|
|
- struct trace_array *tr = ctx_trace;
|
|
|
+ struct trace_array **ptr = private;
|
|
|
+ struct trace_array *tr = *ptr;
|
|
|
struct trace_array_cpu *data;
|
|
|
unsigned long flags;
|
|
|
long disabled;
|
|
@@ -41,10 +44,40 @@ ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next)
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
+static notrace void
|
|
|
+sched_switch_callback(void *probe_data, void *call_data,
|
|
|
+ const char *format, va_list *args)
|
|
|
+{
|
|
|
+ struct task_struct *prev;
|
|
|
+ struct task_struct *next;
|
|
|
+ struct rq *__rq;
|
|
|
+
|
|
|
+ if (!atomic_read(&sched_ref))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* skip prev_pid %d next_pid %d prev_state %ld */
|
|
|
+ (void)va_arg(*args, int);
|
|
|
+ (void)va_arg(*args, int);
|
|
|
+ (void)va_arg(*args, long);
|
|
|
+ __rq = va_arg(*args, typeof(__rq));
|
|
|
+ prev = va_arg(*args, typeof(prev));
|
|
|
+ next = va_arg(*args, typeof(next));
|
|
|
+
|
|
|
+ tracing_record_cmdline(prev);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If tracer_switch_func only points to the local
|
|
|
+ * switch func, it still needs the ptr passed to it.
|
|
|
+ */
|
|
|
+ sched_switch_func(probe_data, __rq, prev, next);
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
-wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
|
|
|
+wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
|
|
|
+ task_struct *curr)
|
|
|
{
|
|
|
- struct trace_array *tr = ctx_trace;
|
|
|
+ struct trace_array **ptr = private;
|
|
|
+ struct trace_array *tr = *ptr;
|
|
|
struct trace_array_cpu *data;
|
|
|
unsigned long flags;
|
|
|
long disabled;
|
|
@@ -67,35 +100,29 @@ wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr)
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
-void
|
|
|
-ftrace_ctx_switch(void *__rq, struct task_struct *prev,
|
|
|
- struct task_struct *next)
|
|
|
+static notrace void
|
|
|
+wake_up_callback(void *probe_data, void *call_data,
|
|
|
+ const char *format, va_list *args)
|
|
|
{
|
|
|
- if (unlikely(atomic_read(&trace_record_cmdline_enabled)))
|
|
|
- tracing_record_cmdline(prev);
|
|
|
+ struct task_struct *curr;
|
|
|
+ struct task_struct *task;
|
|
|
+ struct rq *__rq;
|
|
|
|
|
|
- /*
|
|
|
- * If tracer_switch_func only points to the local
|
|
|
- * switch func, it still needs the ptr passed to it.
|
|
|
- */
|
|
|
- ctx_switch_func(__rq, prev, next);
|
|
|
+ if (likely(!tracer_enabled))
|
|
|
+ return;
|
|
|
|
|
|
- /*
|
|
|
- * Chain to the wakeup tracer (this is a NOP if disabled):
|
|
|
- */
|
|
|
- wakeup_sched_switch(prev, next);
|
|
|
-}
|
|
|
+ /* Skip pid %d state %ld */
|
|
|
+ (void)va_arg(*args, int);
|
|
|
+ (void)va_arg(*args, long);
|
|
|
+ /* now get the meat: "rq %p task %p rq->curr %p" */
|
|
|
+ __rq = va_arg(*args, typeof(__rq));
|
|
|
+ task = va_arg(*args, typeof(task));
|
|
|
+ curr = va_arg(*args, typeof(curr));
|
|
|
|
|
|
-void
|
|
|
-ftrace_wake_up_task(void *__rq, struct task_struct *wakee,
|
|
|
- struct task_struct *curr)
|
|
|
-{
|
|
|
- wakeup_func(__rq, wakee, curr);
|
|
|
+ tracing_record_cmdline(task);
|
|
|
+ tracing_record_cmdline(curr);
|
|
|
|
|
|
- /*
|
|
|
- * Chain to the wakeup tracer (this is a NOP if disabled):
|
|
|
- */
|
|
|
- wakeup_sched_wakeup(wakee, curr);
|
|
|
+ wakeup_func(probe_data, __rq, task, curr);
|
|
|
}
|
|
|
|
|
|
void
|
|
@@ -132,15 +159,95 @@ static void sched_switch_reset(struct trace_array *tr)
|
|
|
tracing_reset(tr->data[cpu]);
|
|
|
}
|
|
|
|
|
|
+static int tracing_sched_register(void)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ ret = marker_probe_register("kernel_sched_wakeup",
|
|
|
+ "pid %d state %ld ## rq %p task %p rq->curr %p",
|
|
|
+ wake_up_callback,
|
|
|
+ &ctx_trace);
|
|
|
+ if (ret) {
|
|
|
+ pr_info("wakeup trace: Couldn't add marker"
|
|
|
+ " probe to kernel_sched_wakeup\n");
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = marker_probe_register("kernel_sched_wakeup_new",
|
|
|
+ "pid %d state %ld ## rq %p task %p rq->curr %p",
|
|
|
+ wake_up_callback,
|
|
|
+ &ctx_trace);
|
|
|
+ if (ret) {
|
|
|
+ pr_info("wakeup trace: Couldn't add marker"
|
|
|
+ " probe to kernel_sched_wakeup_new\n");
|
|
|
+ goto fail_deprobe;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = marker_probe_register("kernel_sched_schedule",
|
|
|
+ "prev_pid %d next_pid %d prev_state %ld "
|
|
|
+ "## rq %p prev %p next %p",
|
|
|
+ sched_switch_callback,
|
|
|
+ &ctx_trace);
|
|
|
+ if (ret) {
|
|
|
+ pr_info("sched trace: Couldn't add marker"
|
|
|
+ " probe to kernel_sched_schedule\n");
|
|
|
+ goto fail_deprobe_wake_new;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+fail_deprobe_wake_new:
|
|
|
+ marker_probe_unregister("kernel_sched_wakeup_new",
|
|
|
+ wake_up_callback,
|
|
|
+ &ctx_trace);
|
|
|
+fail_deprobe:
|
|
|
+ marker_probe_unregister("kernel_sched_wakeup",
|
|
|
+ wake_up_callback,
|
|
|
+ &ctx_trace);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void tracing_sched_unregister(void)
|
|
|
+{
|
|
|
+ marker_probe_unregister("kernel_sched_schedule",
|
|
|
+ sched_switch_callback,
|
|
|
+ &ctx_trace);
|
|
|
+ marker_probe_unregister("kernel_sched_wakeup_new",
|
|
|
+ wake_up_callback,
|
|
|
+ &ctx_trace);
|
|
|
+ marker_probe_unregister("kernel_sched_wakeup",
|
|
|
+ wake_up_callback,
|
|
|
+ &ctx_trace);
|
|
|
+}
|
|
|
+
|
|
|
+void tracing_start_sched_switch(void)
|
|
|
+{
|
|
|
+ long ref;
|
|
|
+
|
|
|
+ ref = atomic_inc_return(&sched_ref);
|
|
|
+ if (ref == 1)
|
|
|
+ tracing_sched_register();
|
|
|
+}
|
|
|
+
|
|
|
+void tracing_stop_sched_switch(void)
|
|
|
+{
|
|
|
+ long ref;
|
|
|
+
|
|
|
+ ref = atomic_dec_and_test(&sched_ref);
|
|
|
+ if (ref)
|
|
|
+ tracing_sched_unregister();
|
|
|
+}
|
|
|
+
|
|
|
static void start_sched_trace(struct trace_array *tr)
|
|
|
{
|
|
|
sched_switch_reset(tr);
|
|
|
atomic_inc(&trace_record_cmdline_enabled);
|
|
|
tracer_enabled = 1;
|
|
|
+ tracing_start_sched_switch();
|
|
|
}
|
|
|
|
|
|
static void stop_sched_trace(struct trace_array *tr)
|
|
|
{
|
|
|
+ tracing_stop_sched_switch();
|
|
|
atomic_dec(&trace_record_cmdline_enabled);
|
|
|
tracer_enabled = 0;
|
|
|
}
|
|
@@ -181,6 +288,14 @@ static struct tracer sched_switch_trace __read_mostly =
|
|
|
|
|
|
__init static int init_sched_switch_trace(void)
|
|
|
{
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (atomic_read(&sched_ref))
|
|
|
+ ret = tracing_sched_register();
|
|
|
+ if (ret) {
|
|
|
+ pr_info("error registering scheduler trace\n");
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
return register_tracer(&sched_switch_trace);
|
|
|
}
|
|
|
device_initcall(init_sched_switch_trace);
|