|
@@ -5565,6 +5565,9 @@ static struct sched_domain_topology_level default_topology[] = {
|
|
|
|
|
|
static struct sched_domain_topology_level *sched_domain_topology = default_topology;
|
|
|
|
|
|
+#define for_each_sd_topology(tl) \
|
|
|
+ for (tl = sched_domain_topology; tl->init; tl++)
|
|
|
+
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
|
|
static int sched_domains_numa_levels;
|
|
@@ -5862,7 +5865,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
|
struct sched_domain_topology_level *tl;
|
|
|
int j;
|
|
|
|
|
|
- for (tl = sched_domain_topology; tl->init; tl++) {
|
|
|
+ for_each_sd_topology(tl) {
|
|
|
struct sd_data *sdd = &tl->data;
|
|
|
|
|
|
sdd->sd = alloc_percpu(struct sched_domain *);
|
|
@@ -5915,7 +5918,7 @@ static void __sdt_free(const struct cpumask *cpu_map)
|
|
|
struct sched_domain_topology_level *tl;
|
|
|
int j;
|
|
|
|
|
|
- for (tl = sched_domain_topology; tl->init; tl++) {
|
|
|
+ for_each_sd_topology(tl) {
|
|
|
struct sd_data *sdd = &tl->data;
|
|
|
|
|
|
for_each_cpu(j, cpu_map) {
|
|
@@ -5983,7 +5986,7 @@ static int build_sched_domains(const struct cpumask *cpu_map,
|
|
|
struct sched_domain_topology_level *tl;
|
|
|
|
|
|
sd = NULL;
|
|
|
- for (tl = sched_domain_topology; tl->init; tl++) {
|
|
|
+ for_each_sd_topology(tl) {
|
|
|
sd = build_sched_domain(tl, cpu_map, attr, sd, i);
|
|
|
if (tl == sched_domain_topology)
|
|
|
*per_cpu_ptr(d.sd, i) = sd;
|