|
@@ -151,8 +151,27 @@ yield_task_rt(struct rq *rq)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
+static int find_lowest_rq(struct task_struct *task);
|
|
|
+
|
|
|
static int select_task_rq_rt(struct task_struct *p, int sync)
|
|
|
{
|
|
|
+ struct rq *rq = task_rq(p);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the task will not preempt the RQ, try to find a better RQ
|
|
|
+ * before we even activate the task
|
|
|
+ */
|
|
|
+ if ((p->prio >= rq->rt.highest_prio)
|
|
|
+ && (p->nr_cpus_allowed > 1)) {
|
|
|
+ int cpu = find_lowest_rq(p);
|
|
|
+
|
|
|
+ return (cpu == -1) ? task_cpu(p) : cpu;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Otherwise, just let it ride on the affined RQ and the
|
|
|
+ * post-schedule router will push the preempted task away
|
|
|
+ */
|
|
|
return task_cpu(p);
|
|
|
}
|
|
|
#endif /* CONFIG_SMP */
|