|
@@ -1508,9 +1508,7 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
|
|
* Pick the largest domain to update shares over
|
|
* Pick the largest domain to update shares over
|
|
*/
|
|
*/
|
|
tmp = sd;
|
|
tmp = sd;
|
|
- if (affine_sd && (!tmp ||
|
|
|
|
- cpumask_weight(sched_domain_span(affine_sd)) >
|
|
|
|
- cpumask_weight(sched_domain_span(sd))))
|
|
|
|
|
|
+ if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
|
|
tmp = affine_sd;
|
|
tmp = affine_sd;
|
|
|
|
|
|
if (tmp) {
|
|
if (tmp) {
|
|
@@ -1554,10 +1552,10 @@ select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_
|
|
|
|
|
|
/* Now try balancing at a lower domain level of new_cpu */
|
|
/* Now try balancing at a lower domain level of new_cpu */
|
|
cpu = new_cpu;
|
|
cpu = new_cpu;
|
|
- weight = cpumask_weight(sched_domain_span(sd));
|
|
|
|
|
|
+ weight = sd->span_weight;
|
|
sd = NULL;
|
|
sd = NULL;
|
|
for_each_domain(cpu, tmp) {
|
|
for_each_domain(cpu, tmp) {
|
|
- if (weight <= cpumask_weight(sched_domain_span(tmp)))
|
|
|
|
|
|
+ if (weight <= tmp->span_weight)
|
|
break;
|
|
break;
|
|
if (tmp->flags & sd_flag)
|
|
if (tmp->flags & sd_flag)
|
|
sd = tmp;
|
|
sd = tmp;
|
|
@@ -2243,7 +2241,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
|
|
|
|
|
|
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
|
|
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
|
|
{
|
|
{
|
|
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
|
|
|
|
|
|
+ unsigned long weight = sd->span_weight;
|
|
unsigned long smt_gain = sd->smt_gain;
|
|
unsigned long smt_gain = sd->smt_gain;
|
|
|
|
|
|
smt_gain /= weight;
|
|
smt_gain /= weight;
|
|
@@ -2276,7 +2274,7 @@ unsigned long scale_rt_power(int cpu)
|
|
|
|
|
|
static void update_cpu_power(struct sched_domain *sd, int cpu)
|
|
static void update_cpu_power(struct sched_domain *sd, int cpu)
|
|
{
|
|
{
|
|
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
|
|
|
|
|
|
+ unsigned long weight = sd->span_weight;
|
|
unsigned long power = SCHED_LOAD_SCALE;
|
|
unsigned long power = SCHED_LOAD_SCALE;
|
|
struct sched_group *sdg = sd->groups;
|
|
struct sched_group *sdg = sd->groups;
|
|
|
|
|