|
@@ -160,11 +160,23 @@ static int select_task_rq_rt(struct task_struct *p, int sync)
|
|
|
struct rq *rq = task_rq(p);
|
|
|
|
|
|
/*
|
|
|
- * If the task will not preempt the RQ, try to find a better RQ
|
|
|
- * before we even activate the task
|
|
|
+ * If the current task is an RT task, then
|
|
|
+ * try to see if we can wake this RT task up on another
|
|
|
+ * runqueue. Otherwise simply start this RT task
|
|
|
+ * on its current runqueue.
|
|
|
+ *
|
|
|
+ * We want to avoid overloading runqueues. Even if
|
|
|
+ * the RT task is of higher priority than the current RT task.
|
|
|
+ * RT tasks behave differently than other tasks. If
|
|
|
+ * one gets preempted, we try to push it off to another queue.
|
|
|
+ * So trying to keep a preempting RT task on the same
|
|
|
+ * cache hot CPU will force the running RT task to
|
|
|
+ * a cold CPU. So we waste all the cache for the lower
|
|
|
+ * RT task in hopes of saving some of a RT task
|
|
|
+ * that is just being woken and probably will have
|
|
|
+ * cold cache anyway.
|
|
|
*/
|
|
|
- if ((p->prio >= rq->rt.highest_prio)
|
|
|
- && (p->nr_cpus_allowed > 1)) {
|
|
|
+ if (unlikely(rt_task(rq->curr))) {
|
|
|
int cpu = find_lowest_rq(p);
|
|
|
|
|
|
return (cpu == -1) ? task_cpu(p) : cpu;
|