|
@@ -1785,7 +1785,6 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
|
|
update_rq_clock(rq);
|
|
|
sched_info_queued(p);
|
|
|
p->sched_class->enqueue_task(rq, p, flags);
|
|
|
- p->se.on_rq = 1;
|
|
|
}
|
|
|
|
|
|
static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
|
@@ -1793,7 +1792,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
|
|
update_rq_clock(rq);
|
|
|
sched_info_dequeued(p);
|
|
|
p->sched_class->dequeue_task(rq, p, flags);
|
|
|
- p->se.on_rq = 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2128,7 +2126,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|
|
* A queue event has occurred, and we're going to schedule. In
|
|
|
* this case, we can save a useless back to back clock update.
|
|
|
*/
|
|
|
- if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
|
|
|
+ if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
|
|
|
rq->skip_clock_update = 1;
|
|
|
}
|
|
|
|
|
@@ -2203,7 +2201,7 @@ static bool migrate_task(struct task_struct *p, struct rq *rq)
|
|
|
* If the task is not on a runqueue (and not running), then
|
|
|
* the next wake-up will properly place the task.
|
|
|
*/
|
|
|
- return p->se.on_rq || task_running(rq, p);
|
|
|
+ return p->on_rq || task_running(rq, p);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2263,7 +2261,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
trace_sched_wait_task(p);
|
|
|
running = task_running(rq, p);
|
|
|
- on_rq = p->se.on_rq;
|
|
|
+ on_rq = p->on_rq;
|
|
|
ncsw = 0;
|
|
|
if (!match_state || p->state == match_state)
|
|
|
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
|
|
@@ -2444,6 +2442,7 @@ ttwu_stat(struct rq *rq, struct task_struct *p, int cpu, int wake_flags)
|
|
|
static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
|
|
|
{
|
|
|
activate_task(rq, p, en_flags);
|
|
|
+ p->on_rq = 1;
|
|
|
|
|
|
/* if a worker is waking up, notify workqueue */
|
|
|
if (p->flags & PF_WQ_WORKER)
|
|
@@ -2506,7 +2505,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
|
|
|
|
cpu = task_cpu(p);
|
|
|
|
|
|
- if (p->se.on_rq)
|
|
|
+ if (p->on_rq)
|
|
|
goto out_running;
|
|
|
|
|
|
orig_cpu = cpu;
|
|
@@ -2583,7 +2582,7 @@ static void try_to_wake_up_local(struct task_struct *p)
|
|
|
if (!(p->state & TASK_NORMAL))
|
|
|
return;
|
|
|
|
|
|
- if (!p->se.on_rq)
|
|
|
+ if (!p->on_rq)
|
|
|
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
|
|
|
|
|
|
ttwu_post_activation(p, rq, 0);
|
|
@@ -2620,19 +2619,21 @@ int wake_up_state(struct task_struct *p, unsigned int state)
|
|
|
*/
|
|
|
static void __sched_fork(struct task_struct *p)
|
|
|
{
|
|
|
+ p->on_rq = 0;
|
|
|
+
|
|
|
+ p->se.on_rq = 0;
|
|
|
p->se.exec_start = 0;
|
|
|
p->se.sum_exec_runtime = 0;
|
|
|
p->se.prev_sum_exec_runtime = 0;
|
|
|
p->se.nr_migrations = 0;
|
|
|
p->se.vruntime = 0;
|
|
|
+ INIT_LIST_HEAD(&p->se.group_node);
|
|
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
|
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
|
|
#endif
|
|
|
|
|
|
INIT_LIST_HEAD(&p->rt.run_list);
|
|
|
- p->se.on_rq = 0;
|
|
|
- INIT_LIST_HEAD(&p->se.group_node);
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|
|
|
INIT_HLIST_HEAD(&p->preempt_notifiers);
|
|
@@ -2750,6 +2751,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
activate_task(rq, p, 0);
|
|
|
+ p->on_rq = 1;
|
|
|
trace_sched_wakeup_new(p, true);
|
|
|
check_preempt_curr(rq, p, WF_FORK);
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -4051,7 +4053,7 @@ static inline void schedule_debug(struct task_struct *prev)
|
|
|
|
|
|
static void put_prev_task(struct rq *rq, struct task_struct *prev)
|
|
|
{
|
|
|
- if (prev->se.on_rq)
|
|
|
+ if (prev->on_rq)
|
|
|
update_rq_clock(rq);
|
|
|
prev->sched_class->put_prev_task(rq, prev);
|
|
|
}
|
|
@@ -4126,7 +4128,9 @@ need_resched:
|
|
|
if (to_wakeup)
|
|
|
try_to_wake_up_local(to_wakeup);
|
|
|
}
|
|
|
+
|
|
|
deactivate_task(rq, prev, DEQUEUE_SLEEP);
|
|
|
+ prev->on_rq = 0;
|
|
|
|
|
|
/*
|
|
|
* If we are going to sleep and we have plugged IO queued, make
|
|
@@ -4695,7 +4699,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
trace_sched_pi_setprio(p, prio);
|
|
|
oldprio = p->prio;
|
|
|
prev_class = p->sched_class;
|
|
|
- on_rq = p->se.on_rq;
|
|
|
+ on_rq = p->on_rq;
|
|
|
running = task_current(rq, p);
|
|
|
if (on_rq)
|
|
|
dequeue_task(rq, p, 0);
|
|
@@ -4743,7 +4747,7 @@ void set_user_nice(struct task_struct *p, long nice)
|
|
|
p->static_prio = NICE_TO_PRIO(nice);
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
- on_rq = p->se.on_rq;
|
|
|
+ on_rq = p->on_rq;
|
|
|
if (on_rq)
|
|
|
dequeue_task(rq, p, 0);
|
|
|
|
|
@@ -4877,8 +4881,6 @@ static struct task_struct *find_process_by_pid(pid_t pid)
|
|
|
static void
|
|
|
__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
|
|
|
{
|
|
|
- BUG_ON(p->se.on_rq);
|
|
|
-
|
|
|
p->policy = policy;
|
|
|
p->rt_priority = prio;
|
|
|
p->normal_prio = normal_prio(p);
|
|
@@ -5044,7 +5046,7 @@ recheck:
|
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
goto recheck;
|
|
|
}
|
|
|
- on_rq = p->se.on_rq;
|
|
|
+ on_rq = p->on_rq;
|
|
|
running = task_current(rq, p);
|
|
|
if (on_rq)
|
|
|
deactivate_task(rq, p, 0);
|
|
@@ -5965,7 +5967,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
|
|
|
* If we're not on a rq, the next wake-up will ensure we're
|
|
|
* placed properly.
|
|
|
*/
|
|
|
- if (p->se.on_rq) {
|
|
|
+ if (p->on_rq) {
|
|
|
deactivate_task(rq_src, p, 0);
|
|
|
set_task_cpu(p, dest_cpu);
|
|
|
activate_task(rq_dest, p, 0);
|
|
@@ -8339,7 +8341,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
|
int old_prio = p->prio;
|
|
|
int on_rq;
|
|
|
|
|
|
- on_rq = p->se.on_rq;
|
|
|
+ on_rq = p->on_rq;
|
|
|
if (on_rq)
|
|
|
deactivate_task(rq, p, 0);
|
|
|
__setscheduler(rq, p, SCHED_NORMAL, 0);
|
|
@@ -8682,7 +8684,7 @@ void sched_move_task(struct task_struct *tsk)
|
|
|
rq = task_rq_lock(tsk, &flags);
|
|
|
|
|
|
running = task_current(rq, tsk);
|
|
|
- on_rq = tsk->se.on_rq;
|
|
|
+ on_rq = tsk->on_rq;
|
|
|
|
|
|
if (on_rq)
|
|
|
dequeue_task(rq, tsk, 0);
|