|
@@ -3905,7 +3905,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
|
return 0;
|
|
|
|
|
|
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
|
|
|
- int new_dst_cpu;
|
|
|
+ int cpu;
|
|
|
|
|
|
schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
|
|
|
|
|
@@ -3920,12 +3920,15 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
|
if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
|
|
|
return 0;
|
|
|
|
|
|
- new_dst_cpu = cpumask_first_and(env->dst_grpmask,
|
|
|
- tsk_cpus_allowed(p));
|
|
|
- if (new_dst_cpu < nr_cpu_ids) {
|
|
|
- env->flags |= LBF_SOME_PINNED;
|
|
|
- env->new_dst_cpu = new_dst_cpu;
|
|
|
+ /* Prevent to re-select dst_cpu via env's cpus */
|
|
|
+ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
|
|
|
+ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
|
|
|
+ env->flags |= LBF_SOME_PINNED;
|
|
|
+ env->new_dst_cpu = cpu;
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -5008,7 +5011,6 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
int *balance)
|
|
|
{
|
|
|
int ld_moved, cur_ld_moved, active_balance = 0;
|
|
|
- int lb_iterations, max_lb_iterations;
|
|
|
struct sched_group *group;
|
|
|
struct rq *busiest;
|
|
|
unsigned long flags;
|
|
@@ -5028,15 +5030,8 @@ static int load_balance(int this_cpu, struct rq *this_rq,
|
|
|
* For NEWLY_IDLE load_balancing, we don't need to consider
|
|
|
* other cpus in our group
|
|
|
*/
|
|
|
- if (idle == CPU_NEWLY_IDLE) {
|
|
|
+ if (idle == CPU_NEWLY_IDLE)
|
|
|
env.dst_grpmask = NULL;
|
|
|
- /*
|
|
|
- * we don't care max_lb_iterations in this case,
|
|
|
- * in following patch, this will be removed
|
|
|
- */
|
|
|
- max_lb_iterations = 0;
|
|
|
- } else
|
|
|
- max_lb_iterations = cpumask_weight(env.dst_grpmask);
|
|
|
|
|
|
cpumask_copy(cpus, cpu_active_mask);
|
|
|
|
|
@@ -5064,7 +5059,6 @@ redo:
|
|
|
schedstat_add(sd, lb_imbalance[idle], env.imbalance);
|
|
|
|
|
|
ld_moved = 0;
|
|
|
- lb_iterations = 1;
|
|
|
if (busiest->nr_running > 1) {
|
|
|
/*
|
|
|
* Attempt to move tasks. If find_busiest_group has found
|
|
@@ -5121,14 +5115,17 @@ more_balance:
|
|
|
* moreover subsequent load balance cycles should correct the
|
|
|
* excess load moved.
|
|
|
*/
|
|
|
- if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 &&
|
|
|
- lb_iterations++ < max_lb_iterations) {
|
|
|
+ if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
|
|
|
|
|
|
env.dst_rq = cpu_rq(env.new_dst_cpu);
|
|
|
env.dst_cpu = env.new_dst_cpu;
|
|
|
env.flags &= ~LBF_SOME_PINNED;
|
|
|
env.loop = 0;
|
|
|
env.loop_break = sched_nr_migrate_break;
|
|
|
+
|
|
|
+ /* Prevent to re-select dst_cpu via env's cpus */
|
|
|
+ cpumask_clear_cpu(env.dst_cpu, env.cpus);
|
|
|
+
|
|
|
/*
|
|
|
* Go back to "more_balance" rather than "redo" since we
|
|
|
* need to continue with same src_cpu.
|