|
@@ -1422,9 +1422,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
|
|
|
|
rcu_read_lock();
|
|
|
list_for_each_entry(tg, &task_groups, list) {
|
|
|
- long imbalance;
|
|
|
- unsigned long this_weight, busiest_weight;
|
|
|
- long rem_load, max_load, moved_load;
|
|
|
+ long rem_load, moved_load;
|
|
|
|
|
|
/*
|
|
|
* empty group
|
|
@@ -1435,17 +1433,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|
|
rem_load = rem_load_move * aggregate(tg, this_cpu)->rq_weight;
|
|
|
rem_load /= aggregate(tg, this_cpu)->load + 1;
|
|
|
|
|
|
- this_weight = tg->cfs_rq[this_cpu]->task_weight;
|
|
|
- busiest_weight = tg->cfs_rq[busiest_cpu]->task_weight;
|
|
|
-
|
|
|
- imbalance = (busiest_weight - this_weight) / 2;
|
|
|
-
|
|
|
- if (imbalance < 0)
|
|
|
- imbalance = busiest_weight;
|
|
|
-
|
|
|
- max_load = max(rem_load, imbalance);
|
|
|
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
|
|
|
- max_load, sd, idle, all_pinned, this_best_prio,
|
|
|
+ rem_load, sd, idle, all_pinned, this_best_prio,
|
|
|
tg->cfs_rq[busiest_cpu]);
|
|
|
|
|
|
if (!moved_load)
|