|
@@ -2057,14 +2057,14 @@ inline int task_curr(const struct task_struct *p)
|
|
|
|
|
|
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
|
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
|
const struct sched_class *prev_class,
|
|
const struct sched_class *prev_class,
|
|
- int oldprio, int running)
|
|
|
|
|
|
+ int oldprio)
|
|
{
|
|
{
|
|
if (prev_class != p->sched_class) {
|
|
if (prev_class != p->sched_class) {
|
|
if (prev_class->switched_from)
|
|
if (prev_class->switched_from)
|
|
- prev_class->switched_from(rq, p, running);
|
|
|
|
- p->sched_class->switched_to(rq, p, running);
|
|
|
|
- } else
|
|
|
|
- p->sched_class->prio_changed(rq, p, oldprio, running);
|
|
|
|
|
|
+ prev_class->switched_from(rq, p);
|
|
|
|
+ p->sched_class->switched_to(rq, p);
|
|
|
|
+ } else if (oldprio != p->prio)
|
|
|
|
+ p->sched_class->prio_changed(rq, p, oldprio);
|
|
}
|
|
}
|
|
|
|
|
|
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|
static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
|
|
@@ -2598,6 +2598,7 @@ static void __sched_fork(struct task_struct *p)
|
|
p->se.sum_exec_runtime = 0;
|
|
p->se.sum_exec_runtime = 0;
|
|
p->se.prev_sum_exec_runtime = 0;
|
|
p->se.prev_sum_exec_runtime = 0;
|
|
p->se.nr_migrations = 0;
|
|
p->se.nr_migrations = 0;
|
|
|
|
+ p->se.vruntime = 0;
|
|
|
|
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
#ifdef CONFIG_SCHEDSTATS
|
|
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
|
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
|
@@ -4696,11 +4697,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
|
|
|
if (running)
|
|
if (running)
|
|
p->sched_class->set_curr_task(rq);
|
|
p->sched_class->set_curr_task(rq);
|
|
- if (on_rq) {
|
|
|
|
|
|
+ if (on_rq)
|
|
enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
|
|
enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
|
|
|
|
|
|
- check_class_changed(rq, p, prev_class, oldprio, running);
|
|
|
|
- }
|
|
|
|
|
|
+ check_class_changed(rq, p, prev_class, oldprio);
|
|
task_rq_unlock(rq, &flags);
|
|
task_rq_unlock(rq, &flags);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5028,11 +5028,10 @@ recheck:
|
|
|
|
|
|
if (running)
|
|
if (running)
|
|
p->sched_class->set_curr_task(rq);
|
|
p->sched_class->set_curr_task(rq);
|
|
- if (on_rq) {
|
|
|
|
|
|
+ if (on_rq)
|
|
activate_task(rq, p, 0);
|
|
activate_task(rq, p, 0);
|
|
|
|
|
|
- check_class_changed(rq, p, prev_class, oldprio, running);
|
|
|
|
- }
|
|
|
|
|
|
+ check_class_changed(rq, p, prev_class, oldprio);
|
|
__task_rq_unlock(rq);
|
|
__task_rq_unlock(rq);
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
|
|
|
@@ -8237,6 +8236,8 @@ EXPORT_SYMBOL(__might_sleep);
|
|
#ifdef CONFIG_MAGIC_SYSRQ
|
|
#ifdef CONFIG_MAGIC_SYSRQ
|
|
static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
|
|
+ const struct sched_class *prev_class = p->sched_class;
|
|
|
|
+ int old_prio = p->prio;
|
|
int on_rq;
|
|
int on_rq;
|
|
|
|
|
|
on_rq = p->se.on_rq;
|
|
on_rq = p->se.on_rq;
|
|
@@ -8247,6 +8248,8 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
|
|
activate_task(rq, p, 0);
|
|
activate_task(rq, p, 0);
|
|
resched_task(rq->curr);
|
|
resched_task(rq->curr);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ check_class_changed(rq, p, prev_class, old_prio);
|
|
}
|
|
}
|
|
|
|
|
|
void normalize_rt_tasks(void)
|
|
void normalize_rt_tasks(void)
|