|
@@ -20,6 +20,34 @@ static int sched_ref;
|
|
|
static DEFINE_MUTEX(sched_register_mutex);
|
|
|
static int sched_stopped;
|
|
|
|
|
|
+
|
|
|
+void
|
|
|
+tracing_sched_switch_trace(struct trace_array *tr,
|
|
|
+ struct task_struct *prev,
|
|
|
+ struct task_struct *next,
|
|
|
+ unsigned long flags, int pc)
|
|
|
+{
|
|
|
+ struct ftrace_event_call *call = &event_context_switch;
|
|
|
+ struct ring_buffer_event *event;
|
|
|
+ struct ctx_switch_entry *entry;
|
|
|
+
|
|
|
+ event = trace_buffer_lock_reserve(tr, TRACE_CTX,
|
|
|
+ sizeof(*entry), flags, pc);
|
|
|
+ if (!event)
|
|
|
+ return;
|
|
|
+ entry = ring_buffer_event_data(event);
|
|
|
+ entry->prev_pid = prev->pid;
|
|
|
+ entry->prev_prio = prev->prio;
|
|
|
+ entry->prev_state = prev->state;
|
|
|
+ entry->next_pid = next->pid;
|
|
|
+ entry->next_prio = next->prio;
|
|
|
+ entry->next_state = next->state;
|
|
|
+ entry->next_cpu = task_cpu(next);
|
|
|
+
|
|
|
+ if (!filter_check_discard(call, entry, tr->buffer, event))
|
|
|
+ trace_buffer_unlock_commit(tr, event, flags, pc);
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
|
|
struct task_struct *next)
|
|
@@ -49,6 +77,35 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
|
|
|
local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
+void
|
|
|
+tracing_sched_wakeup_trace(struct trace_array *tr,
|
|
|
+ struct task_struct *wakee,
|
|
|
+ struct task_struct *curr,
|
|
|
+ unsigned long flags, int pc)
|
|
|
+{
|
|
|
+ struct ftrace_event_call *call = &event_wakeup;
|
|
|
+ struct ring_buffer_event *event;
|
|
|
+ struct ctx_switch_entry *entry;
|
|
|
+
|
|
|
+ event = trace_buffer_lock_reserve(tr, TRACE_WAKE,
|
|
|
+ sizeof(*entry), flags, pc);
|
|
|
+ if (!event)
|
|
|
+ return;
|
|
|
+ entry = ring_buffer_event_data(event);
|
|
|
+ entry->prev_pid = curr->pid;
|
|
|
+ entry->prev_prio = curr->prio;
|
|
|
+ entry->prev_state = curr->state;
|
|
|
+ entry->next_pid = wakee->pid;
|
|
|
+ entry->next_prio = wakee->prio;
|
|
|
+ entry->next_state = wakee->state;
|
|
|
+ entry->next_cpu = task_cpu(wakee);
|
|
|
+
|
|
|
+ if (!filter_check_discard(call, entry, tr->buffer, event))
|
|
|
+ ring_buffer_unlock_commit(tr->buffer, event);
|
|
|
+ ftrace_trace_stack(tr, flags, 6, pc);
|
|
|
+ ftrace_trace_userstack(tr, flags, pc);
|
|
|
+}
|
|
|
+
|
|
|
static void
|
|
|
probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
|
|
|
{
|