|
@@ -71,6 +71,7 @@
|
|
#include <linux/debugfs.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/ctype.h>
|
|
#include <linux/ctype.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/ftrace.h>
|
|
|
|
+#include <trace/sched.h>
|
|
|
|
|
|
#include <asm/tlb.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/irq_regs.h>
|
|
#include <asm/irq_regs.h>
|
|
@@ -1936,6 +1937,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
* just go back and repeat.
|
|
* just go back and repeat.
|
|
*/
|
|
*/
|
|
rq = task_rq_lock(p, &flags);
|
|
rq = task_rq_lock(p, &flags);
|
|
|
|
+ trace_sched_wait_task(rq, p);
|
|
running = task_running(rq, p);
|
|
running = task_running(rq, p);
|
|
on_rq = p->se.on_rq;
|
|
on_rq = p->se.on_rq;
|
|
ncsw = 0;
|
|
ncsw = 0;
|
|
@@ -2297,9 +2299,7 @@ out_activate:
|
|
success = 1;
|
|
success = 1;
|
|
|
|
|
|
out_running:
|
|
out_running:
|
|
- trace_mark(kernel_sched_wakeup,
|
|
|
|
- "pid %d state %ld ## rq %p task %p rq->curr %p",
|
|
|
|
- p->pid, p->state, rq, p, rq->curr);
|
|
|
|
|
|
+ trace_sched_wakeup(rq, p);
|
|
check_preempt_curr(rq, p, sync);
|
|
check_preempt_curr(rq, p, sync);
|
|
|
|
|
|
p->state = TASK_RUNNING;
|
|
p->state = TASK_RUNNING;
|
|
@@ -2432,9 +2432,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
p->sched_class->task_new(rq, p);
|
|
p->sched_class->task_new(rq, p);
|
|
inc_nr_running(rq);
|
|
inc_nr_running(rq);
|
|
}
|
|
}
|
|
- trace_mark(kernel_sched_wakeup_new,
|
|
|
|
- "pid %d state %ld ## rq %p task %p rq->curr %p",
|
|
|
|
- p->pid, p->state, rq, p, rq->curr);
|
|
|
|
|
|
+ trace_sched_wakeup_new(rq, p);
|
|
check_preempt_curr(rq, p, 0);
|
|
check_preempt_curr(rq, p, 0);
|
|
#ifdef CONFIG_SMP
|
|
#ifdef CONFIG_SMP
|
|
if (p->sched_class->task_wake_up)
|
|
if (p->sched_class->task_wake_up)
|
|
@@ -2607,11 +2605,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
|
|
struct mm_struct *mm, *oldmm;
|
|
struct mm_struct *mm, *oldmm;
|
|
|
|
|
|
prepare_task_switch(rq, prev, next);
|
|
prepare_task_switch(rq, prev, next);
|
|
- trace_mark(kernel_sched_schedule,
|
|
|
|
- "prev_pid %d next_pid %d prev_state %ld "
|
|
|
|
- "## rq %p prev %p next %p",
|
|
|
|
- prev->pid, next->pid, prev->state,
|
|
|
|
- rq, prev, next);
|
|
|
|
|
|
+ trace_sched_switch(rq, prev, next);
|
|
mm = next->mm;
|
|
mm = next->mm;
|
|
oldmm = prev->active_mm;
|
|
oldmm = prev->active_mm;
|
|
/*
|
|
/*
|
|
@@ -2851,6 +2845,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
|
|
|| unlikely(!cpu_active(dest_cpu)))
|
|
|| unlikely(!cpu_active(dest_cpu)))
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
|
|
+ trace_sched_migrate_task(rq, p, dest_cpu);
|
|
/* force the process onto the specified CPU */
|
|
/* force the process onto the specified CPU */
|
|
if (migrate_task(p, dest_cpu, &req)) {
|
|
if (migrate_task(p, dest_cpu, &req)) {
|
|
/* Need to wait for migration thread (might exit: take ref). */
|
|
/* Need to wait for migration thread (might exit: take ref). */
|