|
@@ -7282,10 +7282,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
|
|
* groups, so roll our own. Now each node has its own list of groups which
|
|
* groups, so roll our own. Now each node has its own list of groups which
|
|
* gets dynamically allocated.
|
|
* gets dynamically allocated.
|
|
*/
|
|
*/
|
|
-static DEFINE_PER_CPU(struct sched_domain, node_domains);
|
|
|
|
|
|
+static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
|
|
static struct sched_group ***sched_group_nodes_bycpu;
|
|
static struct sched_group ***sched_group_nodes_bycpu;
|
|
|
|
|
|
-static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
|
|
|
|
|
|
+static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
|
|
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
|
|
static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
|
|
|
|
|
|
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
|
|
static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
|
|
@@ -7560,7 +7560,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
|
#ifdef CONFIG_NUMA
|
|
#ifdef CONFIG_NUMA
|
|
if (cpumask_weight(cpu_map) >
|
|
if (cpumask_weight(cpu_map) >
|
|
SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
|
|
SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
|
|
- sd = &per_cpu(allnodes_domains, i);
|
|
|
|
|
|
+ sd = &per_cpu(allnodes_domains, i).sd;
|
|
SD_INIT(sd, ALLNODES);
|
|
SD_INIT(sd, ALLNODES);
|
|
set_domain_attribute(sd, attr);
|
|
set_domain_attribute(sd, attr);
|
|
cpumask_copy(sched_domain_span(sd), cpu_map);
|
|
cpumask_copy(sched_domain_span(sd), cpu_map);
|
|
@@ -7570,7 +7570,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
|
} else
|
|
} else
|
|
p = NULL;
|
|
p = NULL;
|
|
|
|
|
|
- sd = &per_cpu(node_domains, i);
|
|
|
|
|
|
+ sd = &per_cpu(node_domains, i).sd;
|
|
SD_INIT(sd, NODE);
|
|
SD_INIT(sd, NODE);
|
|
set_domain_attribute(sd, attr);
|
|
set_domain_attribute(sd, attr);
|
|
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
|
|
sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
|
|
@@ -7688,7 +7688,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
|
for_each_cpu(j, nodemask) {
|
|
for_each_cpu(j, nodemask) {
|
|
struct sched_domain *sd;
|
|
struct sched_domain *sd;
|
|
|
|
|
|
- sd = &per_cpu(node_domains, j);
|
|
|
|
|
|
+ sd = &per_cpu(node_domains, j).sd;
|
|
sd->groups = sg;
|
|
sd->groups = sg;
|
|
}
|
|
}
|
|
sg->__cpu_power = 0;
|
|
sg->__cpu_power = 0;
|