|
@@ -6557,7 +6557,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (!group->cpu_power) {
|
|
|
+ if (!group->sgp->power) {
|
|
|
printk(KERN_CONT "\n");
|
|
|
printk(KERN_ERR "ERROR: domain->cpu_power not "
|
|
|
"set\n");
|
|
@@ -6581,9 +6581,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
|
|
|
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
|
|
|
|
|
|
printk(KERN_CONT " %s", str);
|
|
|
- if (group->cpu_power != SCHED_POWER_SCALE) {
|
|
|
+ if (group->sgp->power != SCHED_POWER_SCALE) {
|
|
|
printk(KERN_CONT " (cpu_power = %d)",
|
|
|
- group->cpu_power);
|
|
|
+ group->sgp->power);
|
|
|
}
|
|
|
|
|
|
group = group->next;
|
|
@@ -6774,11 +6774,39 @@ static struct root_domain *alloc_rootdomain(void)
|
|
|
return rd;
|
|
|
}
|
|
|
|
|
|
+static void free_sched_groups(struct sched_group *sg, int free_sgp)
|
|
|
+{
|
|
|
+ struct sched_group *tmp, *first;
|
|
|
+
|
|
|
+ if (!sg)
|
|
|
+ return;
|
|
|
+
|
|
|
+ first = sg;
|
|
|
+ do {
|
|
|
+ tmp = sg->next;
|
|
|
+
|
|
|
+ if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
|
|
|
+ kfree(sg->sgp);
|
|
|
+
|
|
|
+ kfree(sg);
|
|
|
+ sg = tmp;
|
|
|
+ } while (sg != first);
|
|
|
+}
|
|
|
+
|
|
|
static void free_sched_domain(struct rcu_head *rcu)
|
|
|
{
|
|
|
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
|
|
|
- if (atomic_dec_and_test(&sd->groups->ref))
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If its an overlapping domain it has private groups, iterate and
|
|
|
+ * nuke them all.
|
|
|
+ */
|
|
|
+ if (sd->flags & SD_OVERLAP) {
|
|
|
+ free_sched_groups(sd->groups, 1);
|
|
|
+ } else if (atomic_dec_and_test(&sd->groups->ref)) {
|
|
|
+ kfree(sd->groups->sgp);
|
|
|
kfree(sd->groups);
|
|
|
+ }
|
|
|
kfree(sd);
|
|
|
}
|
|
|
|
|
@@ -6945,6 +6973,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
|
|
|
struct sd_data {
|
|
|
struct sched_domain **__percpu sd;
|
|
|
struct sched_group **__percpu sg;
|
|
|
+ struct sched_group_power **__percpu sgp;
|
|
|
};
|
|
|
|
|
|
struct s_data {
|
|
@@ -6964,15 +6993,73 @@ struct sched_domain_topology_level;
|
|
|
typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
|
|
|
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
|
|
|
|
|
|
+#define SDTL_OVERLAP 0x01
|
|
|
+
|
|
|
struct sched_domain_topology_level {
|
|
|
sched_domain_init_f init;
|
|
|
sched_domain_mask_f mask;
|
|
|
+ int flags;
|
|
|
struct sd_data data;
|
|
|
};
|
|
|
|
|
|
-/*
|
|
|
- * Assumes the sched_domain tree is fully constructed
|
|
|
- */
|
|
|
+static int
|
|
|
+build_overlap_sched_groups(struct sched_domain *sd, int cpu)
|
|
|
+{
|
|
|
+ struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
|
|
|
+ const struct cpumask *span = sched_domain_span(sd);
|
|
|
+ struct cpumask *covered = sched_domains_tmpmask;
|
|
|
+ struct sd_data *sdd = sd->private;
|
|
|
+ struct sched_domain *child;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ cpumask_clear(covered);
|
|
|
+
|
|
|
+ for_each_cpu(i, span) {
|
|
|
+ struct cpumask *sg_span;
|
|
|
+
|
|
|
+ if (cpumask_test_cpu(i, covered))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
|
|
|
+ GFP_KERNEL, cpu_to_node(i));
|
|
|
+
|
|
|
+ if (!sg)
|
|
|
+ goto fail;
|
|
|
+
|
|
|
+ sg_span = sched_group_cpus(sg);
|
|
|
+
|
|
|
+ child = *per_cpu_ptr(sdd->sd, i);
|
|
|
+ if (child->child) {
|
|
|
+ child = child->child;
|
|
|
+ cpumask_copy(sg_span, sched_domain_span(child));
|
|
|
+ } else
|
|
|
+ cpumask_set_cpu(i, sg_span);
|
|
|
+
|
|
|
+ cpumask_or(covered, covered, sg_span);
|
|
|
+
|
|
|
+ sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
|
|
|
+ atomic_inc(&sg->sgp->ref);
|
|
|
+
|
|
|
+ if (cpumask_test_cpu(cpu, sg_span))
|
|
|
+ groups = sg;
|
|
|
+
|
|
|
+ if (!first)
|
|
|
+ first = sg;
|
|
|
+ if (last)
|
|
|
+ last->next = sg;
|
|
|
+ last = sg;
|
|
|
+ last->next = first;
|
|
|
+ }
|
|
|
+ sd->groups = groups;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+fail:
|
|
|
+ free_sched_groups(first, 0);
|
|
|
+
|
|
|
+ return -ENOMEM;
|
|
|
+}
|
|
|
+
|
|
|
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
|
|
|
{
|
|
|
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
|
|
@@ -6981,24 +7068,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
|
|
|
if (child)
|
|
|
cpu = cpumask_first(sched_domain_span(child));
|
|
|
|
|
|
- if (sg)
|
|
|
+ if (sg) {
|
|
|
*sg = *per_cpu_ptr(sdd->sg, cpu);
|
|
|
+ (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
|
|
|
+ atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
|
|
|
+ }
|
|
|
|
|
|
return cpu;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * build_sched_groups takes the cpumask we wish to span, and a pointer
|
|
|
- * to a function which identifies what group(along with sched group) a CPU
|
|
|
- * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
|
|
|
- * (due to the fact that we keep track of groups covered with a struct cpumask).
|
|
|
- *
|
|
|
* build_sched_groups will build a circular linked list of the groups
|
|
|
* covered by the given span, and will set each group's ->cpumask correctly,
|
|
|
* and ->cpu_power to 0.
|
|
|
+ *
|
|
|
+ * Assumes the sched_domain tree is fully constructed
|
|
|
*/
|
|
|
-static void
|
|
|
-build_sched_groups(struct sched_domain *sd)
|
|
|
+static int
|
|
|
+build_sched_groups(struct sched_domain *sd, int cpu)
|
|
|
{
|
|
|
struct sched_group *first = NULL, *last = NULL;
|
|
|
struct sd_data *sdd = sd->private;
|
|
@@ -7006,6 +7093,12 @@ build_sched_groups(struct sched_domain *sd)
|
|
|
struct cpumask *covered;
|
|
|
int i;
|
|
|
|
|
|
+ get_group(cpu, sdd, &sd->groups);
|
|
|
+ atomic_inc(&sd->groups->ref);
|
|
|
+
|
|
|
+ if (cpu != cpumask_first(sched_domain_span(sd)))
|
|
|
+ return 0;
|
|
|
+
|
|
|
lockdep_assert_held(&sched_domains_mutex);
|
|
|
covered = sched_domains_tmpmask;
|
|
|
|
|
@@ -7020,7 +7113,7 @@ build_sched_groups(struct sched_domain *sd)
|
|
|
continue;
|
|
|
|
|
|
cpumask_clear(sched_group_cpus(sg));
|
|
|
- sg->cpu_power = 0;
|
|
|
+ sg->sgp->power = 0;
|
|
|
|
|
|
for_each_cpu(j, span) {
|
|
|
if (get_group(j, sdd, NULL) != group)
|
|
@@ -7037,6 +7130,8 @@ build_sched_groups(struct sched_domain *sd)
|
|
|
last = sg;
|
|
|
}
|
|
|
last->next = first;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -7051,12 +7146,17 @@ build_sched_groups(struct sched_domain *sd)
|
|
|
*/
|
|
|
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
|
|
|
{
|
|
|
- WARN_ON(!sd || !sd->groups);
|
|
|
+ struct sched_group *sg = sd->groups;
|
|
|
|
|
|
- if (cpu != group_first_cpu(sd->groups))
|
|
|
- return;
|
|
|
+ WARN_ON(!sd || !sg);
|
|
|
|
|
|
- sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
|
|
|
+ do {
|
|
|
+ sg->group_weight = cpumask_weight(sched_group_cpus(sg));
|
|
|
+ sg = sg->next;
|
|
|
+ } while (sg != sd->groups);
|
|
|
+
|
|
|
+ if (cpu != group_first_cpu(sg))
|
|
|
+ return;
|
|
|
|
|
|
update_group_power(sd, cpu);
|
|
|
}
|
|
@@ -7177,15 +7277,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
|
|
|
static void claim_allocations(int cpu, struct sched_domain *sd)
|
|
|
{
|
|
|
struct sd_data *sdd = sd->private;
|
|
|
- struct sched_group *sg = sd->groups;
|
|
|
|
|
|
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
|
|
|
*per_cpu_ptr(sdd->sd, cpu) = NULL;
|
|
|
|
|
|
- if (cpu == cpumask_first(sched_group_cpus(sg))) {
|
|
|
- WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
|
|
|
+ if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
|
|
|
*per_cpu_ptr(sdd->sg, cpu) = NULL;
|
|
|
- }
|
|
|
+
|
|
|
+ if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
|
|
|
+ *per_cpu_ptr(sdd->sgp, cpu) = NULL;
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
@@ -7210,7 +7310,7 @@ static struct sched_domain_topology_level default_topology[] = {
|
|
|
#endif
|
|
|
{ sd_init_CPU, cpu_cpu_mask, },
|
|
|
#ifdef CONFIG_NUMA
|
|
|
- { sd_init_NODE, cpu_node_mask, },
|
|
|
+ { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
|
|
|
{ sd_init_ALLNODES, cpu_allnodes_mask, },
|
|
|
#endif
|
|
|
{ NULL, },
|
|
@@ -7234,9 +7334,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
|
if (!sdd->sg)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ sdd->sgp = alloc_percpu(struct sched_group_power *);
|
|
|
+ if (!sdd->sgp)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
for_each_cpu(j, cpu_map) {
|
|
|
struct sched_domain *sd;
|
|
|
struct sched_group *sg;
|
|
|
+ struct sched_group_power *sgp;
|
|
|
|
|
|
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
|
|
|
GFP_KERNEL, cpu_to_node(j));
|
|
@@ -7251,6 +7356,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
*per_cpu_ptr(sdd->sg, j) = sg;
|
|
|
+
|
|
|
+ sgp = kzalloc_node(sizeof(struct sched_group_power),
|
|
|
+ GFP_KERNEL, cpu_to_node(j));
|
|
|
+ if (!sgp)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ *per_cpu_ptr(sdd->sgp, j) = sgp;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -7266,11 +7378,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
|
|
|
struct sd_data *sdd = &tl->data;
|
|
|
|
|
|
for_each_cpu(j, cpu_map) {
|
|
|
- kfree(*per_cpu_ptr(sdd->sd, j));
|
|
|
+ struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
|
|
|
+ if (sd && (sd->flags & SD_OVERLAP))
|
|
|
+ free_sched_groups(sd->groups, 0);
|
|
|
kfree(*per_cpu_ptr(sdd->sg, j));
|
|
|
+ kfree(*per_cpu_ptr(sdd->sgp, j));
|
|
|
}
|
|
|
free_percpu(sdd->sd);
|
|
|
free_percpu(sdd->sg);
|
|
|
+ free_percpu(sdd->sgp);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -7316,8 +7432,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
|
|
|
struct sched_domain_topology_level *tl;
|
|
|
|
|
|
sd = NULL;
|
|
|
- for (tl = sched_domain_topology; tl->init; tl++)
|
|
|
+ for (tl = sched_domain_topology; tl->init; tl++) {
|
|
|
sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
|
|
|
+ if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
|
|
|
+ sd->flags |= SD_OVERLAP;
|
|
|
+ if (cpumask_equal(cpu_map, sched_domain_span(sd)))
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
while (sd->child)
|
|
|
sd = sd->child;
|
|
@@ -7329,13 +7450,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
|
|
|
for_each_cpu(i, cpu_map) {
|
|
|
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
|
|
|
sd->span_weight = cpumask_weight(sched_domain_span(sd));
|
|
|
- get_group(i, sd->private, &sd->groups);
|
|
|
- atomic_inc(&sd->groups->ref);
|
|
|
-
|
|
|
- if (i != cpumask_first(sched_domain_span(sd)))
|
|
|
- continue;
|
|
|
-
|
|
|
- build_sched_groups(sd);
|
|
|
+ if (sd->flags & SD_OVERLAP) {
|
|
|
+ if (build_overlap_sched_groups(sd, i))
|
|
|
+ goto error;
|
|
|
+ } else {
|
|
|
+ if (build_sched_groups(sd, i))
|
|
|
+ goto error;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|