|
@@ -1152,6 +1152,18 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+static inline void check_class_changed(struct rq *rq, struct task_struct *p,
|
|
|
+ const struct sched_class *prev_class,
|
|
|
+ int oldprio, int running)
|
|
|
+{
|
|
|
+ if (prev_class != p->sched_class) {
|
|
|
+ if (prev_class->switched_from)
|
|
|
+ prev_class->switched_from(rq, p, running);
|
|
|
+ p->sched_class->switched_to(rq, p, running);
|
|
|
+ } else
|
|
|
+ p->sched_class->prio_changed(rq, p, oldprio, running);
|
|
|
+}
|
|
|
+
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
|
|
/*
|
|
@@ -4017,6 +4029,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
unsigned long flags;
|
|
|
int oldprio, on_rq, running;
|
|
|
struct rq *rq;
|
|
|
+ const struct sched_class *prev_class = p->sched_class;
|
|
|
|
|
|
BUG_ON(prio < 0 || prio > MAX_PRIO);
|
|
|
|
|
@@ -4042,18 +4055,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|
|
if (on_rq) {
|
|
|
if (running)
|
|
|
p->sched_class->set_curr_task(rq);
|
|
|
+
|
|
|
enqueue_task(rq, p, 0);
|
|
|
- /*
|
|
|
- * Reschedule if we are currently running on this runqueue and
|
|
|
- * our priority decreased, or if we are not currently running on
|
|
|
- * this runqueue and our priority is higher than the current's
|
|
|
- */
|
|
|
- if (running) {
|
|
|
- if (p->prio > oldprio)
|
|
|
- resched_task(rq->curr);
|
|
|
- } else {
|
|
|
- check_preempt_curr(rq, p);
|
|
|
- }
|
|
|
+
|
|
|
+ check_class_changed(rq, p, prev_class, oldprio, running);
|
|
|
}
|
|
|
task_rq_unlock(rq, &flags);
|
|
|
}
|
|
@@ -4253,6 +4258,7 @@ int sched_setscheduler(struct task_struct *p, int policy,
|
|
|
{
|
|
|
int retval, oldprio, oldpolicy = -1, on_rq, running;
|
|
|
unsigned long flags;
|
|
|
+ const struct sched_class *prev_class = p->sched_class;
|
|
|
struct rq *rq;
|
|
|
|
|
|
/* may grab non-irq protected spin_locks */
|
|
@@ -4346,18 +4352,10 @@ recheck:
|
|
|
if (on_rq) {
|
|
|
if (running)
|
|
|
p->sched_class->set_curr_task(rq);
|
|
|
+
|
|
|
activate_task(rq, p, 0);
|
|
|
- /*
|
|
|
- * Reschedule if we are currently running on this runqueue and
|
|
|
- * our priority decreased, or if we are not currently running on
|
|
|
- * this runqueue and our priority is higher than the current's
|
|
|
- */
|
|
|
- if (running) {
|
|
|
- if (p->prio > oldprio)
|
|
|
- resched_task(rq->curr);
|
|
|
- } else {
|
|
|
- check_preempt_curr(rq, p);
|
|
|
- }
|
|
|
+
|
|
|
+ check_class_changed(rq, p, prev_class, oldprio, running);
|
|
|
}
|
|
|
__task_rq_unlock(rq);
|
|
|
spin_unlock_irqrestore(&p->pi_lock, flags);
|