|
@@ -2097,6 +2097,7 @@ struct sd_lb_stats {
|
|
unsigned long max_load;
|
|
unsigned long max_load;
|
|
unsigned long busiest_load_per_task;
|
|
unsigned long busiest_load_per_task;
|
|
unsigned long busiest_nr_running;
|
|
unsigned long busiest_nr_running;
|
|
|
|
+ unsigned long busiest_group_capacity;
|
|
|
|
|
|
int group_imb; /* Is there imbalance in this sd */
|
|
int group_imb; /* Is there imbalance in this sd */
|
|
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
|
|
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
|
|
@@ -2416,14 +2417,12 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
|
|
unsigned long load, max_cpu_load, min_cpu_load;
|
|
unsigned long load, max_cpu_load, min_cpu_load;
|
|
int i;
|
|
int i;
|
|
unsigned int balance_cpu = -1, first_idle_cpu = 0;
|
|
unsigned int balance_cpu = -1, first_idle_cpu = 0;
|
|
- unsigned long sum_avg_load_per_task;
|
|
|
|
- unsigned long avg_load_per_task;
|
|
|
|
|
|
+ unsigned long avg_load_per_task = 0;
|
|
|
|
|
|
if (local_group)
|
|
if (local_group)
|
|
balance_cpu = group_first_cpu(group);
|
|
balance_cpu = group_first_cpu(group);
|
|
|
|
|
|
/* Tally up the load of all CPUs in the group */
|
|
/* Tally up the load of all CPUs in the group */
|
|
- sum_avg_load_per_task = avg_load_per_task = 0;
|
|
|
|
max_cpu_load = 0;
|
|
max_cpu_load = 0;
|
|
min_cpu_load = ~0UL;
|
|
min_cpu_load = ~0UL;
|
|
|
|
|
|
@@ -2453,7 +2452,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
|
|
sgs->sum_nr_running += rq->nr_running;
|
|
sgs->sum_nr_running += rq->nr_running;
|
|
sgs->sum_weighted_load += weighted_cpuload(i);
|
|
sgs->sum_weighted_load += weighted_cpuload(i);
|
|
|
|
|
|
- sum_avg_load_per_task += cpu_avg_load_per_task(i);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2473,7 +2471,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
|
|
/* Adjust by relative CPU power of the group */
|
|
/* Adjust by relative CPU power of the group */
|
|
sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
|
|
sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Consider the group unbalanced when the imbalance is larger
|
|
* Consider the group unbalanced when the imbalance is larger
|
|
* than the average weight of two tasks.
|
|
* than the average weight of two tasks.
|
|
@@ -2483,8 +2480,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
|
|
* normalized nr_running number somewhere that negates
|
|
* normalized nr_running number somewhere that negates
|
|
* the hierarchy?
|
|
* the hierarchy?
|
|
*/
|
|
*/
|
|
- avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
|
|
|
|
- group->cpu_power;
|
|
|
|
|
|
+ if (sgs->sum_nr_running)
|
|
|
|
+ avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
|
|
|
|
|
|
if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
|
|
if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
|
|
sgs->group_imb = 1;
|
|
sgs->group_imb = 1;
|
|
@@ -2553,6 +2550,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
|
|
sds->max_load = sgs.avg_load;
|
|
sds->max_load = sgs.avg_load;
|
|
sds->busiest = group;
|
|
sds->busiest = group;
|
|
sds->busiest_nr_running = sgs.sum_nr_running;
|
|
sds->busiest_nr_running = sgs.sum_nr_running;
|
|
|
|
+ sds->busiest_group_capacity = sgs.group_capacity;
|
|
sds->busiest_load_per_task = sgs.sum_weighted_load;
|
|
sds->busiest_load_per_task = sgs.sum_weighted_load;
|
|
sds->group_imb = sgs.group_imb;
|
|
sds->group_imb = sgs.group_imb;
|
|
}
|
|
}
|
|
@@ -2575,6 +2573,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
|
|
{
|
|
{
|
|
unsigned long tmp, pwr_now = 0, pwr_move = 0;
|
|
unsigned long tmp, pwr_now = 0, pwr_move = 0;
|
|
unsigned int imbn = 2;
|
|
unsigned int imbn = 2;
|
|
|
|
+ unsigned long scaled_busy_load_per_task;
|
|
|
|
|
|
if (sds->this_nr_running) {
|
|
if (sds->this_nr_running) {
|
|
sds->this_load_per_task /= sds->this_nr_running;
|
|
sds->this_load_per_task /= sds->this_nr_running;
|
|
@@ -2585,8 +2584,12 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
|
|
sds->this_load_per_task =
|
|
sds->this_load_per_task =
|
|
cpu_avg_load_per_task(this_cpu);
|
|
cpu_avg_load_per_task(this_cpu);
|
|
|
|
|
|
- if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
|
|
|
|
- sds->busiest_load_per_task * imbn) {
|
|
|
|
|
|
+ scaled_busy_load_per_task = sds->busiest_load_per_task
|
|
|
|
+ * SCHED_LOAD_SCALE;
|
|
|
|
+ scaled_busy_load_per_task /= sds->busiest->cpu_power;
|
|
|
|
+
|
|
|
|
+ if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
|
|
|
|
+ (scaled_busy_load_per_task * imbn)) {
|
|
*imbalance = sds->busiest_load_per_task;
|
|
*imbalance = sds->busiest_load_per_task;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -2637,7 +2640,14 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
|
|
static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
|
|
static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
|
|
unsigned long *imbalance)
|
|
unsigned long *imbalance)
|
|
{
|
|
{
|
|
- unsigned long max_pull;
|
|
|
|
|
|
+ unsigned long max_pull, load_above_capacity = ~0UL;
|
|
|
|
+
|
|
|
|
+ sds->busiest_load_per_task /= sds->busiest_nr_running;
|
|
|
|
+ if (sds->group_imb) {
|
|
|
|
+ sds->busiest_load_per_task =
|
|
|
|
+ min(sds->busiest_load_per_task, sds->avg_load);
|
|
|
|
+ }
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* In the presence of smp nice balancing, certain scenarios can have
|
|
* In the presence of smp nice balancing, certain scenarios can have
|
|
* max load less than avg load(as we skip the groups at or below
|
|
* max load less than avg load(as we skip the groups at or below
|
|
@@ -2648,9 +2658,29 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
|
|
return fix_small_imbalance(sds, this_cpu, imbalance);
|
|
return fix_small_imbalance(sds, this_cpu, imbalance);
|
|
}
|
|
}
|
|
|
|
|
|
- /* Don't want to pull so many tasks that a group would go idle */
|
|
|
|
- max_pull = min(sds->max_load - sds->avg_load,
|
|
|
|
- sds->max_load - sds->busiest_load_per_task);
|
|
|
|
|
|
+ if (!sds->group_imb) {
|
|
|
|
+ /*
|
|
|
|
+ * Don't want to pull so many tasks that a group would go idle.
|
|
|
|
+ */
|
|
|
|
+ load_above_capacity = (sds->busiest_nr_running -
|
|
|
|
+ sds->busiest_group_capacity);
|
|
|
|
+
|
|
|
|
+ load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
|
|
|
|
+
|
|
|
|
+ load_above_capacity /= sds->busiest->cpu_power;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * We're trying to get all the cpus to the average_load, so we don't
|
|
|
|
+ * want to push ourselves above the average load, nor do we wish to
|
|
|
|
+ * reduce the max loaded cpu below the average load. At the same time,
|
|
|
|
+ * we also don't want to reduce the group load below the group capacity
|
|
|
|
+ * (so that we can implement power-savings policies etc). Thus we look
|
|
|
|
+ * for the minimum possible imbalance.
|
|
|
|
+ * Be careful of negative numbers as they'll appear as very large values
|
|
|
|
+ * with unsigned longs.
|
|
|
|
+ */
|
|
|
|
+ max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
|
|
|
|
|
|
/* How much load to actually move to equalise the imbalance */
|
|
/* How much load to actually move to equalise the imbalance */
|
|
*imbalance = min(max_pull * sds->busiest->cpu_power,
|
|
*imbalance = min(max_pull * sds->busiest->cpu_power,
|
|
@@ -2718,7 +2748,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|
* 4) This group is more busy than the avg busieness at this
|
|
* 4) This group is more busy than the avg busieness at this
|
|
* sched_domain.
|
|
* sched_domain.
|
|
* 5) The imbalance is within the specified limit.
|
|
* 5) The imbalance is within the specified limit.
|
|
- * 6) Any rebalance would lead to ping-pong
|
|
|
|
*/
|
|
*/
|
|
if (!(*balance))
|
|
if (!(*balance))
|
|
goto ret;
|
|
goto ret;
|
|
@@ -2737,25 +2766,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|
if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
|
|
if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
|
|
goto out_balanced;
|
|
goto out_balanced;
|
|
|
|
|
|
- sds.busiest_load_per_task /= sds.busiest_nr_running;
|
|
|
|
- if (sds.group_imb)
|
|
|
|
- sds.busiest_load_per_task =
|
|
|
|
- min(sds.busiest_load_per_task, sds.avg_load);
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * We're trying to get all the cpus to the average_load, so we don't
|
|
|
|
- * want to push ourselves above the average load, nor do we wish to
|
|
|
|
- * reduce the max loaded cpu below the average load, as either of these
|
|
|
|
- * actions would just result in more rebalancing later, and ping-pong
|
|
|
|
- * tasks around. Thus we look for the minimum possible imbalance.
|
|
|
|
- * Negative imbalances (*we* are more loaded than anyone else) will
|
|
|
|
- * be counted as no imbalance for these purposes -- we can't fix that
|
|
|
|
- * by pulling tasks to us. Be careful of negative numbers as they'll
|
|
|
|
- * appear as very large values with unsigned longs.
|
|
|
|
- */
|
|
|
|
- if (sds.max_load <= sds.busiest_load_per_task)
|
|
|
|
- goto out_balanced;
|
|
|
|
-
|
|
|
|
/* Looks like there is an imbalance. Compute it */
|
|
/* Looks like there is an imbalance. Compute it */
|
|
calculate_imbalance(&sds, this_cpu, imbalance);
|
|
calculate_imbalance(&sds, this_cpu, imbalance);
|
|
return sds.busiest;
|
|
return sds.busiest;
|