|
@@ -977,13 +977,23 @@ static void yield_task_rt(struct rq *rq)
|
|
|
static int find_lowest_rq(struct task_struct *task);
|
|
|
|
|
|
static int
|
|
|
-select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
|
|
|
+select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
|
|
|
{
|
|
|
+ struct task_struct *curr;
|
|
|
+ struct rq *rq;
|
|
|
+ int cpu;
|
|
|
+
|
|
|
if (sd_flag != SD_BALANCE_WAKE)
|
|
|
return smp_processor_id();
|
|
|
|
|
|
+ cpu = task_cpu(p);
|
|
|
+ rq = cpu_rq(cpu);
|
|
|
+
|
|
|
+ rcu_read_lock();
|
|
|
+ curr = ACCESS_ONCE(rq->curr); /* unlocked access */
|
|
|
+
|
|
|
/*
|
|
|
- * If the current task is an RT task, then
|
|
|
+ * If the current task on @p's runqueue is an RT task, then
|
|
|
* try to see if we can wake this RT task up on another
|
|
|
* runqueue. Otherwise simply start this RT task
|
|
|
* on its current runqueue.
|
|
@@ -997,21 +1007,25 @@ select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
|
|
|
* lock?
|
|
|
*
|
|
|
* For equal prio tasks, we just let the scheduler sort it out.
|
|
|
+ *
|
|
|
+ * Otherwise, just let it ride on the affined RQ and the
|
|
|
+ * post-schedule router will push the preempted task away
|
|
|
+ *
|
|
|
+ * This test is optimistic, if we get it wrong the load-balancer
|
|
|
+ * will have to sort it out.
|
|
|
*/
|
|
|
- if (unlikely(rt_task(rq->curr)) &&
|
|
|
- (rq->curr->rt.nr_cpus_allowed < 2 ||
|
|
|
- rq->curr->prio < p->prio) &&
|
|
|
+ if (curr && unlikely(rt_task(curr)) &&
|
|
|
+ (curr->rt.nr_cpus_allowed < 2 ||
|
|
|
+ curr->prio < p->prio) &&
|
|
|
(p->rt.nr_cpus_allowed > 1)) {
|
|
|
- int cpu = find_lowest_rq(p);
|
|
|
+ int target = find_lowest_rq(p);
|
|
|
|
|
|
- return (cpu == -1) ? task_cpu(p) : cpu;
|
|
|
+ if (target != -1)
|
|
|
+ cpu = target;
|
|
|
}
|
|
|
+ rcu_read_unlock();
|
|
|
|
|
|
- /*
|
|
|
- * Otherwise, just let it ride on the affined RQ and the
|
|
|
- * post-schedule router will push the preempted task away
|
|
|
- */
|
|
|
- return task_cpu(p);
|
|
|
+ return cpu;
|
|
|
}
|
|
|
|
|
|
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
|
@@ -1136,7 +1150,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
|
|
* The previous task needs to be made eligible for pushing
|
|
|
* if it is still active
|
|
|
*/
|
|
|
- if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
|
|
|
+ if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
|
|
|
enqueue_pushable_task(rq, p);
|
|
|
}
|
|
|
|
|
@@ -1287,7 +1301,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
|
|
!cpumask_test_cpu(lowest_rq->cpu,
|
|
|
&task->cpus_allowed) ||
|
|
|
task_running(rq, task) ||
|
|
|
- !task->se.on_rq)) {
|
|
|
+ !task->on_rq)) {
|
|
|
|
|
|
raw_spin_unlock(&lowest_rq->lock);
|
|
|
lowest_rq = NULL;
|
|
@@ -1321,7 +1335,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
|
|
|
BUG_ON(task_current(rq, p));
|
|
|
BUG_ON(p->rt.nr_cpus_allowed <= 1);
|
|
|
|
|
|
- BUG_ON(!p->se.on_rq);
|
|
|
+ BUG_ON(!p->on_rq);
|
|
|
BUG_ON(!rt_task(p));
|
|
|
|
|
|
return p;
|
|
@@ -1467,7 +1481,7 @@ static int pull_rt_task(struct rq *this_rq)
|
|
|
*/
|
|
|
if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
|
|
|
WARN_ON(p == src_rq->curr);
|
|
|
- WARN_ON(!p->se.on_rq);
|
|
|
+ WARN_ON(!p->on_rq);
|
|
|
|
|
|
/*
|
|
|
* There's a chance that p is higher in priority
|
|
@@ -1538,7 +1552,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
|
|
|
* Update the migration status of the RQ if we have an RT task
|
|
|
* which is running AND changing its weight value.
|
|
|
*/
|
|
|
- if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
|
|
|
+ if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
|
|
|
struct rq *rq = task_rq(p);
|
|
|
|
|
|
if (!task_current(rq, p)) {
|
|
@@ -1608,7 +1622,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
|
|
|
* we may need to handle the pulling of RT tasks
|
|
|
* now.
|
|
|
*/
|
|
|
- if (p->se.on_rq && !rq->rt.rt_nr_running)
|
|
|
+ if (p->on_rq && !rq->rt.rt_nr_running)
|
|
|
pull_rt_task(rq);
|
|
|
}
|
|
|
|
|
@@ -1638,7 +1652,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
|
|
* If that current running task is also an RT task
|
|
|
* then see if we can move to another run queue.
|
|
|
*/
|
|
|
- if (p->se.on_rq && rq->curr != p) {
|
|
|
+ if (p->on_rq && rq->curr != p) {
|
|
|
#ifdef CONFIG_SMP
|
|
|
if (rq->rt.overloaded && push_rt_task(rq) &&
|
|
|
/* Don't resched if we changed runqueues */
|
|
@@ -1657,7 +1671,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
|
|
|
static void
|
|
|
prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
|
|
|
{
|
|
|
- if (!p->se.on_rq)
|
|
|
+ if (!p->on_rq)
|
|
|
return;
|
|
|
|
|
|
if (rq->curr == p) {
|