|
@@ -1218,6 +1218,18 @@ static int pull_rt_task(struct rq *this_rq)
|
|
|
continue;
|
|
|
|
|
|
src_rq = cpu_rq(cpu);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Don't bother taking the src_rq->lock if the next highest
|
|
|
+ * task is known to be lower-priority than our current task.
|
|
|
+ * This may look racy, but if this value is about to go
|
|
|
+ * logically higher, the src_rq will push this task away.
|
|
|
+ * And if its going logically lower, we do not care
|
|
|
+ */
|
|
|
+ if (src_rq->rt.highest_prio.next >=
|
|
|
+ this_rq->rt.highest_prio.curr)
|
|
|
+ continue;
|
|
|
+
|
|
|
/*
|
|
|
* We can potentially drop this_rq's lock in
|
|
|
* double_lock_balance, and another CPU could
|