|
@@ -2896,7 +2896,7 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
|
enum cpu_idle_type idle, int *all_pinned,
|
|
|
int *this_best_prio, struct rq_iterator *iterator)
|
|
|
{
|
|
|
- int loops = 0, pulled = 0, pinned = 0, skip_for_load;
|
|
|
+ int loops = 0, pulled = 0, pinned = 0;
|
|
|
struct task_struct *p;
|
|
|
long rem_load_move = max_load_move;
|
|
|
|
|
@@ -2912,14 +2912,8 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
|
next:
|
|
|
if (!p || loops++ > sysctl_sched_nr_migrate)
|
|
|
goto out;
|
|
|
- /*
|
|
|
- * To help distribute high priority tasks across CPUs we don't
|
|
|
- * skip a task if it will be the highest priority task (i.e. smallest
|
|
|
- * prio value) on its new queue regardless of its load weight
|
|
|
- */
|
|
|
- skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
|
|
|
- SCHED_LOAD_SCALE_FUZZ;
|
|
|
- if ((skip_for_load && p->prio >= *this_best_prio) ||
|
|
|
+
|
|
|
+ if ((p->se.load.weight >> 1) > rem_load_move ||
|
|
|
!can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
|
|
|
p = iterator->next(iterator->arg);
|
|
|
goto next;
|