|
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
|
|
}
|
|
}
|
|
|
|
|
|
if (!se) {
|
|
if (!se) {
|
|
- cfs_rq->h_load = rq->avg.load_avg_contrib;
|
|
|
|
|
|
+ cfs_rq->h_load = cfs_rq->runnable_load_avg;
|
|
cfs_rq->last_h_load_update = now;
|
|
cfs_rq->last_h_load_update = now;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
|
|
(busiest->load_per_task * SCHED_POWER_SCALE) /
|
|
(busiest->load_per_task * SCHED_POWER_SCALE) /
|
|
busiest->group_power;
|
|
busiest->group_power;
|
|
|
|
|
|
- if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
|
|
|
|
- (scaled_busy_load_per_task * imbn)) {
|
|
|
|
|
|
+ if (busiest->avg_load + scaled_busy_load_per_task >=
|
|
|
|
+ local->avg_load + (scaled_busy_load_per_task * imbn)) {
|
|
env->imbalance = busiest->load_per_task;
|
|
env->imbalance = busiest->load_per_task;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
|
|
* max load less than avg load(as we skip the groups at or below
|
|
* max load less than avg load(as we skip the groups at or below
|
|
* its cpu_power, while calculating max_load..)
|
|
* its cpu_power, while calculating max_load..)
|
|
*/
|
|
*/
|
|
- if (busiest->avg_load < sds->avg_load) {
|
|
|
|
|
|
+ if (busiest->avg_load <= sds->avg_load ||
|
|
|
|
+ local->avg_load >= sds->avg_load) {
|
|
env->imbalance = 0;
|
|
env->imbalance = 0;
|
|
return fix_small_imbalance(env, sds);
|
|
return fix_small_imbalance(env, sds);
|
|
}
|
|
}
|