|
@@ -3896,10 +3896,14 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
|
|
|
int tsk_cache_hot = 0;
|
|
|
/*
|
|
|
* We do not migrate tasks that are:
|
|
|
- * 1) running (obviously), or
|
|
|
+ * 1) throttled_lb_pair, or
|
|
|
* 2) cannot be migrated to this CPU due to cpus_allowed, or
|
|
|
- * 3) are cache-hot on their current CPU.
|
|
|
+ * 3) running (obviously), or
|
|
|
+ * 4) are cache-hot on their current CPU.
|
|
|
*/
|
|
|
+ if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
|
|
|
+ return 0;
|
|
|
+
|
|
|
if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
|
|
|
int new_dst_cpu;
|
|
|
|
|
@@ -3967,9 +3971,6 @@ static int move_one_task(struct lb_env *env)
|
|
|
struct task_struct *p, *n;
|
|
|
|
|
|
list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
|
|
|
- if (throttled_lb_pair(task_group(p), env->src_rq->cpu, env->dst_cpu))
|
|
|
- continue;
|
|
|
-
|
|
|
if (!can_migrate_task(p, env))
|
|
|
continue;
|
|
|
|
|
@@ -4021,7 +4022,7 @@ static int move_tasks(struct lb_env *env)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
|
|
|
+ if (!can_migrate_task(p, env))
|
|
|
goto next;
|
|
|
|
|
|
load = task_h_load(p);
|
|
@@ -4032,9 +4033,6 @@ static int move_tasks(struct lb_env *env)
|
|
|
if ((load / 2) > env->imbalance)
|
|
|
goto next;
|
|
|
|
|
|
- if (!can_migrate_task(p, env))
|
|
|
- goto next;
|
|
|
-
|
|
|
move_task(p, env);
|
|
|
pulled++;
|
|
|
env->imbalance -= load;
|