|
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
|
|
(busiest->load_per_task * SCHED_POWER_SCALE) /
|
|
(busiest->load_per_task * SCHED_POWER_SCALE) /
|
|
busiest->group_power;
|
|
busiest->group_power;
|
|
|
|
|
|
- if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
|
|
|
|
- (scaled_busy_load_per_task * imbn)) {
|
|
|
|
|
|
+ if (busiest->avg_load + scaled_busy_load_per_task >=
|
|
|
|
+ local->avg_load + (scaled_busy_load_per_task * imbn)) {
|
|
env->imbalance = busiest->load_per_task;
|
|
env->imbalance = busiest->load_per_task;
|
|
return;
|
|
return;
|
|
}
|
|
}
|