|
@@ -8464,15 +8464,13 @@ static void free_sched_groups(const struct cpumask *cpu_map,
|
|
|
* there are asymmetries in the topology. If there are asymmetries, group
|
|
|
* having more cpu_power will pickup more load compared to the group having
|
|
|
* less cpu_power.
|
|
|
- *
|
|
|
- * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
|
|
|
- * the maximum number of tasks a group can handle in the presence of other idle
|
|
|
- * or lightly loaded groups in the same sched domain.
|
|
|
*/
|
|
|
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
|
|
|
{
|
|
|
struct sched_domain *child;
|
|
|
struct sched_group *group;
|
|
|
+ long power;
|
|
|
+ int weight;
|
|
|
|
|
|
WARN_ON(!sd || !sd->groups);
|
|
|
|
|
@@ -8483,22 +8481,20 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
|
|
|
|
|
|
sd->groups->__cpu_power = 0;
|
|
|
|
|
|
- /*
|
|
|
- * For perf policy, if the groups in child domain share resources
|
|
|
- * (for example cores sharing some portions of the cache hierarchy
|
|
|
- * or SMT), then set this domain groups cpu_power such that each group
|
|
|
- * can handle only one task, when there are other idle groups in the
|
|
|
- * same sched domain.
|
|
|
- */
|
|
|
- if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
|
|
|
- (child->flags &
|
|
|
- (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
|
|
|
- sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
|
|
|
+ if (!child) {
|
|
|
+ power = SCHED_LOAD_SCALE;
|
|
|
+ weight = cpumask_weight(sched_domain_span(sd));
|
|
|
+ /*
|
|
|
+ * SMT siblings share the power of a single core.
|
|
|
+ */
|
|
|
+ if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1)
|
|
|
+ power /= weight;
|
|
|
+ sg_inc_cpu_power(sd->groups, power);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * add cpu_power of each child group to this groups cpu_power
|
|
|
+ * Add cpu_power of each child group to this groups cpu_power.
|
|
|
*/
|
|
|
group = child->groups;
|
|
|
do {
|