|
@@ -2271,7 +2271,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
|
|
|
/* Tally up the load of all CPUs in the group */
|
|
|
avg_load = 0;
|
|
|
|
|
|
- for_each_cpu_mask(i, group->cpumask) {
|
|
|
+ for_each_cpu_mask_nr(i, group->cpumask) {
|
|
|
/* Bias balancing toward cpus of our domain */
|
|
|
if (local_group)
|
|
|
load = source_load(i, load_idx);
|
|
@@ -2313,7 +2313,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
|
|
|
/* Traverse only the allowed CPUs */
|
|
|
cpus_and(*tmp, group->cpumask, p->cpus_allowed);
|
|
|
|
|
|
- for_each_cpu_mask(i, *tmp) {
|
|
|
+ for_each_cpu_mask_nr(i, *tmp) {
|
|
|
load = weighted_cpuload(i);
|
|
|
|
|
|
if (load < min_load || (load == min_load && i == this_cpu)) {
|
|
@@ -3296,7 +3296,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
|
|
|
max_cpu_load = 0;
|
|
|
min_cpu_load = ~0UL;
|
|
|
|
|
|
- for_each_cpu_mask(i, group->cpumask) {
|
|
|
+ for_each_cpu_mask_nr(i, group->cpumask) {
|
|
|
struct rq *rq;
|
|
|
|
|
|
if (!cpu_isset(i, *cpus))
|
|
@@ -3560,7 +3560,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
|
|
|
unsigned long max_load = 0;
|
|
|
int i;
|
|
|
|
|
|
- for_each_cpu_mask(i, group->cpumask) {
|
|
|
+ for_each_cpu_mask_nr(i, group->cpumask) {
|
|
|
unsigned long wl;
|
|
|
|
|
|
if (!cpu_isset(i, *cpus))
|
|
@@ -4100,7 +4100,7 @@ static void run_rebalance_domains(struct softirq_action *h)
|
|
|
int balance_cpu;
|
|
|
|
|
|
cpu_clear(this_cpu, cpus);
|
|
|
- for_each_cpu_mask(balance_cpu, cpus) {
|
|
|
+ for_each_cpu_mask_nr(balance_cpu, cpus) {
|
|
|
/*
|
|
|
* If this cpu gets work to do, stop the load balancing
|
|
|
* work being done for other cpus. Next load
|
|
@@ -6832,7 +6832,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
|
|
|
|
|
|
cpus_clear(*covered);
|
|
|
|
|
|
- for_each_cpu_mask(i, *span) {
|
|
|
+ for_each_cpu_mask_nr(i, *span) {
|
|
|
struct sched_group *sg;
|
|
|
int group = group_fn(i, cpu_map, &sg, tmpmask);
|
|
|
int j;
|
|
@@ -6843,7 +6843,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
|
|
|
cpus_clear(sg->cpumask);
|
|
|
sg->__cpu_power = 0;
|
|
|
|
|
|
- for_each_cpu_mask(j, *span) {
|
|
|
+ for_each_cpu_mask_nr(j, *span) {
|
|
|
if (group_fn(j, cpu_map, NULL, tmpmask) != group)
|
|
|
continue;
|
|
|
|
|
@@ -7043,7 +7043,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
|
|
|
if (!sg)
|
|
|
return;
|
|
|
do {
|
|
|
- for_each_cpu_mask(j, sg->cpumask) {
|
|
|
+ for_each_cpu_mask_nr(j, sg->cpumask) {
|
|
|
struct sched_domain *sd;
|
|
|
|
|
|
sd = &per_cpu(phys_domains, j);
|
|
@@ -7068,7 +7068,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
|
|
|
{
|
|
|
int cpu, i;
|
|
|
|
|
|
- for_each_cpu_mask(cpu, *cpu_map) {
|
|
|
+ for_each_cpu_mask_nr(cpu, *cpu_map) {
|
|
|
struct sched_group **sched_group_nodes
|
|
|
= sched_group_nodes_bycpu[cpu];
|
|
|
|
|
@@ -7302,7 +7302,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|
|
/*
|
|
|
* Set up domains for cpus specified by the cpu_map.
|
|
|
*/
|
|
|
- for_each_cpu_mask(i, *cpu_map) {
|
|
|
+ for_each_cpu_mask_nr(i, *cpu_map) {
|
|
|
struct sched_domain *sd = NULL, *p;
|
|
|
SCHED_CPUMASK_VAR(nodemask, allmasks);
|
|
|
|
|
@@ -7374,7 +7374,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|
|
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
|
/* Set up CPU (sibling) groups */
|
|
|
- for_each_cpu_mask(i, *cpu_map) {
|
|
|
+ for_each_cpu_mask_nr(i, *cpu_map) {
|
|
|
SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
|
|
|
SCHED_CPUMASK_VAR(send_covered, allmasks);
|
|
|
|
|
@@ -7391,7 +7391,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|
|
|
|
|
#ifdef CONFIG_SCHED_MC
|
|
|
/* Set up multi-core groups */
|
|
|
- for_each_cpu_mask(i, *cpu_map) {
|
|
|
+ for_each_cpu_mask_nr(i, *cpu_map) {
|
|
|
SCHED_CPUMASK_VAR(this_core_map, allmasks);
|
|
|
SCHED_CPUMASK_VAR(send_covered, allmasks);
|
|
|
|
|
@@ -7458,7 +7458,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|
|
goto error;
|
|
|
}
|
|
|
sched_group_nodes[i] = sg;
|
|
|
- for_each_cpu_mask(j, *nodemask) {
|
|
|
+ for_each_cpu_mask_nr(j, *nodemask) {
|
|
|
struct sched_domain *sd;
|
|
|
|
|
|
sd = &per_cpu(node_domains, j);
|
|
@@ -7504,21 +7504,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|
|
|
|
|
/* Calculate CPU power for physical packages and nodes */
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
|
- for_each_cpu_mask(i, *cpu_map) {
|
|
|
+ for_each_cpu_mask_nr(i, *cpu_map) {
|
|
|
struct sched_domain *sd = &per_cpu(cpu_domains, i);
|
|
|
|
|
|
init_sched_groups_power(i, sd);
|
|
|
}
|
|
|
#endif
|
|
|
#ifdef CONFIG_SCHED_MC
|
|
|
- for_each_cpu_mask(i, *cpu_map) {
|
|
|
+ for_each_cpu_mask_nr(i, *cpu_map) {
|
|
|
struct sched_domain *sd = &per_cpu(core_domains, i);
|
|
|
|
|
|
init_sched_groups_power(i, sd);
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
- for_each_cpu_mask(i, *cpu_map) {
|
|
|
+ for_each_cpu_mask_nr(i, *cpu_map) {
|
|
|
struct sched_domain *sd = &per_cpu(phys_domains, i);
|
|
|
|
|
|
init_sched_groups_power(i, sd);
|
|
@@ -7538,7 +7538,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
|
|
|
#endif
|
|
|
|
|
|
/* Attach the domains */
|
|
|
- for_each_cpu_mask(i, *cpu_map) {
|
|
|
+ for_each_cpu_mask_nr(i, *cpu_map) {
|
|
|
struct sched_domain *sd;
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
|
sd = &per_cpu(cpu_domains, i);
|
|
@@ -7621,7 +7621,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
|
|
|
|
|
|
unregister_sched_domain_sysctl();
|
|
|
|
|
|
- for_each_cpu_mask(i, *cpu_map)
|
|
|
+ for_each_cpu_mask_nr(i, *cpu_map)
|
|
|
cpu_attach_domain(NULL, &def_root_domain, i);
|
|
|
synchronize_sched();
|
|
|
arch_destroy_sched_domains(cpu_map, &tmpmask);
|