|
@@ -6837,7 +6837,6 @@ struct sd_data {
|
|
|
|
|
|
struct s_data {
|
|
struct s_data {
|
|
struct sched_domain ** __percpu sd;
|
|
struct sched_domain ** __percpu sd;
|
|
- struct sd_data sdd[SD_LV_MAX];
|
|
|
|
struct root_domain *rd;
|
|
struct root_domain *rd;
|
|
};
|
|
};
|
|
|
|
|
|
@@ -6848,12 +6847,15 @@ enum s_alloc {
|
|
sa_none,
|
|
sa_none,
|
|
};
|
|
};
|
|
|
|
|
|
-typedef struct sched_domain *(*sched_domain_init_f)(struct s_data *d, int cpu);
|
|
|
|
|
|
+struct sched_domain_topology_level;
|
|
|
|
+
|
|
|
|
+typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
|
|
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
|
|
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
|
|
|
|
|
|
struct sched_domain_topology_level {
|
|
struct sched_domain_topology_level {
|
|
sched_domain_init_f init;
|
|
sched_domain_init_f init;
|
|
sched_domain_mask_f mask;
|
|
sched_domain_mask_f mask;
|
|
|
|
+ struct sd_data data;
|
|
};
|
|
};
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -6958,15 +6960,16 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
|
|
# define SD_INIT_NAME(sd, type) do { } while (0)
|
|
# define SD_INIT_NAME(sd, type) do { } while (0)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
-#define SD_INIT_FUNC(type) \
|
|
|
|
-static noinline struct sched_domain *sd_init_##type(struct s_data *d, int cpu) \
|
|
|
|
-{ \
|
|
|
|
- struct sched_domain *sd = *per_cpu_ptr(d->sdd[SD_LV_##type].sd, cpu); \
|
|
|
|
- *sd = SD_##type##_INIT; \
|
|
|
|
- sd->level = SD_LV_##type; \
|
|
|
|
- SD_INIT_NAME(sd, type); \
|
|
|
|
- sd->private = &d->sdd[SD_LV_##type]; \
|
|
|
|
- return sd; \
|
|
|
|
|
|
+#define SD_INIT_FUNC(type) \
|
|
|
|
+static noinline struct sched_domain * \
|
|
|
|
+sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
|
|
|
|
+{ \
|
|
|
|
+ struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
|
|
|
|
+ *sd = SD_##type##_INIT; \
|
|
|
|
+ sd->level = SD_LV_##type; \
|
|
|
|
+ SD_INIT_NAME(sd, type); \
|
|
|
|
+ sd->private = &tl->data; \
|
|
|
|
+ return sd; \
|
|
}
|
|
}
|
|
|
|
|
|
SD_INIT_FUNC(CPU)
|
|
SD_INIT_FUNC(CPU)
|
|
@@ -7019,11 +7022,12 @@ static void set_domain_attribute(struct sched_domain *sd,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void __sdt_free(const struct cpumask *cpu_map);
|
|
|
|
+static int __sdt_alloc(const struct cpumask *cpu_map);
|
|
|
|
+
|
|
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
|
|
static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
|
|
const struct cpumask *cpu_map)
|
|
const struct cpumask *cpu_map)
|
|
{
|
|
{
|
|
- int i, j;
|
|
|
|
-
|
|
|
|
switch (what) {
|
|
switch (what) {
|
|
case sa_rootdomain:
|
|
case sa_rootdomain:
|
|
if (!atomic_read(&d->rd->refcount))
|
|
if (!atomic_read(&d->rd->refcount))
|
|
@@ -7031,14 +7035,7 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
|
|
case sa_sd:
|
|
case sa_sd:
|
|
free_percpu(d->sd); /* fall through */
|
|
free_percpu(d->sd); /* fall through */
|
|
case sa_sd_storage:
|
|
case sa_sd_storage:
|
|
- for (i = 0; i < SD_LV_MAX; i++) {
|
|
|
|
- for_each_cpu(j, cpu_map) {
|
|
|
|
- kfree(*per_cpu_ptr(d->sdd[i].sd, j));
|
|
|
|
- kfree(*per_cpu_ptr(d->sdd[i].sg, j));
|
|
|
|
- }
|
|
|
|
- free_percpu(d->sdd[i].sd);
|
|
|
|
- free_percpu(d->sdd[i].sg);
|
|
|
|
- } /* fall through */
|
|
|
|
|
|
+ __sdt_free(cpu_map); /* fall through */
|
|
case sa_none:
|
|
case sa_none:
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -7047,38 +7044,10 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
|
|
static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
|
|
static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
|
|
const struct cpumask *cpu_map)
|
|
const struct cpumask *cpu_map)
|
|
{
|
|
{
|
|
- int i, j;
|
|
|
|
-
|
|
|
|
memset(d, 0, sizeof(*d));
|
|
memset(d, 0, sizeof(*d));
|
|
|
|
|
|
- for (i = 0; i < SD_LV_MAX; i++) {
|
|
|
|
- d->sdd[i].sd = alloc_percpu(struct sched_domain *);
|
|
|
|
- if (!d->sdd[i].sd)
|
|
|
|
- return sa_sd_storage;
|
|
|
|
-
|
|
|
|
- d->sdd[i].sg = alloc_percpu(struct sched_group *);
|
|
|
|
- if (!d->sdd[i].sg)
|
|
|
|
- return sa_sd_storage;
|
|
|
|
-
|
|
|
|
- for_each_cpu(j, cpu_map) {
|
|
|
|
- struct sched_domain *sd;
|
|
|
|
- struct sched_group *sg;
|
|
|
|
-
|
|
|
|
- sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
|
|
|
|
- GFP_KERNEL, cpu_to_node(j));
|
|
|
|
- if (!sd)
|
|
|
|
- return sa_sd_storage;
|
|
|
|
-
|
|
|
|
- *per_cpu_ptr(d->sdd[i].sd, j) = sd;
|
|
|
|
-
|
|
|
|
- sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
|
|
|
- GFP_KERNEL, cpu_to_node(j));
|
|
|
|
- if (!sg)
|
|
|
|
- return sa_sd_storage;
|
|
|
|
-
|
|
|
|
- *per_cpu_ptr(d->sdd[i].sg, j) = sg;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ if (__sdt_alloc(cpu_map))
|
|
|
|
+ return sa_sd_storage;
|
|
d->sd = alloc_percpu(struct sched_domain *);
|
|
d->sd = alloc_percpu(struct sched_domain *);
|
|
if (!d->sd)
|
|
if (!d->sd)
|
|
return sa_sd_storage;
|
|
return sa_sd_storage;
|
|
@@ -7137,12 +7106,68 @@ static struct sched_domain_topology_level default_topology[] = {
|
|
|
|
|
|
static struct sched_domain_topology_level *sched_domain_topology = default_topology;
|
|
static struct sched_domain_topology_level *sched_domain_topology = default_topology;
|
|
|
|
|
|
|
|
+static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
|
|
+{
|
|
|
|
+ struct sched_domain_topology_level *tl;
|
|
|
|
+ int j;
|
|
|
|
+
|
|
|
|
+ for (tl = sched_domain_topology; tl->init; tl++) {
|
|
|
|
+ struct sd_data *sdd = &tl->data;
|
|
|
|
+
|
|
|
|
+ sdd->sd = alloc_percpu(struct sched_domain *);
|
|
|
|
+ if (!sdd->sd)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ sdd->sg = alloc_percpu(struct sched_group *);
|
|
|
|
+ if (!sdd->sg)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ for_each_cpu(j, cpu_map) {
|
|
|
|
+ struct sched_domain *sd;
|
|
|
|
+ struct sched_group *sg;
|
|
|
|
+
|
|
|
|
+ sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
|
|
|
|
+ GFP_KERNEL, cpu_to_node(j));
|
|
|
|
+ if (!sd)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ *per_cpu_ptr(sdd->sd, j) = sd;
|
|
|
|
+
|
|
|
|
+ sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
|
|
|
+ GFP_KERNEL, cpu_to_node(j));
|
|
|
|
+ if (!sg)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
|
|
+ *per_cpu_ptr(sdd->sg, j) = sg;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __sdt_free(const struct cpumask *cpu_map)
|
|
|
|
+{
|
|
|
|
+ struct sched_domain_topology_level *tl;
|
|
|
|
+ int j;
|
|
|
|
+
|
|
|
|
+ for (tl = sched_domain_topology; tl->init; tl++) {
|
|
|
|
+ struct sd_data *sdd = &tl->data;
|
|
|
|
+
|
|
|
|
+ for_each_cpu(j, cpu_map) {
|
|
|
|
+ kfree(*per_cpu_ptr(sdd->sd, j));
|
|
|
|
+ kfree(*per_cpu_ptr(sdd->sg, j));
|
|
|
|
+ }
|
|
|
|
+ free_percpu(sdd->sd);
|
|
|
|
+ free_percpu(sdd->sg);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
|
|
struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
|
|
struct s_data *d, const struct cpumask *cpu_map,
|
|
struct s_data *d, const struct cpumask *cpu_map,
|
|
struct sched_domain_attr *attr, struct sched_domain *child,
|
|
struct sched_domain_attr *attr, struct sched_domain *child,
|
|
int cpu)
|
|
int cpu)
|
|
{
|
|
{
|
|
- struct sched_domain *sd = tl->init(d, cpu);
|
|
|
|
|
|
+ struct sched_domain *sd = tl->init(tl, cpu);
|
|
if (!sd)
|
|
if (!sd)
|
|
return child;
|
|
return child;
|
|
|
|
|