|
@@ -6506,6 +6506,7 @@ struct s_data {
|
|
cpumask_var_t nodemask;
|
|
cpumask_var_t nodemask;
|
|
cpumask_var_t this_sibling_map;
|
|
cpumask_var_t this_sibling_map;
|
|
cpumask_var_t this_core_map;
|
|
cpumask_var_t this_core_map;
|
|
|
|
+ cpumask_var_t this_book_map;
|
|
cpumask_var_t send_covered;
|
|
cpumask_var_t send_covered;
|
|
cpumask_var_t tmpmask;
|
|
cpumask_var_t tmpmask;
|
|
struct sched_group **sched_group_nodes;
|
|
struct sched_group **sched_group_nodes;
|
|
@@ -6517,6 +6518,7 @@ enum s_alloc {
|
|
sa_rootdomain,
|
|
sa_rootdomain,
|
|
sa_tmpmask,
|
|
sa_tmpmask,
|
|
sa_send_covered,
|
|
sa_send_covered,
|
|
|
|
+ sa_this_book_map,
|
|
sa_this_core_map,
|
|
sa_this_core_map,
|
|
sa_this_sibling_map,
|
|
sa_this_sibling_map,
|
|
sa_nodemask,
|
|
sa_nodemask,
|
|
@@ -6570,6 +6572,31 @@ cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
|
|
}
|
|
}
|
|
#endif /* CONFIG_SCHED_MC */
|
|
#endif /* CONFIG_SCHED_MC */
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * book sched-domains:
|
|
|
|
+ */
|
|
|
|
+#ifdef CONFIG_SCHED_BOOK
|
|
|
|
+static DEFINE_PER_CPU(struct static_sched_domain, book_domains);
|
|
|
|
+static DEFINE_PER_CPU(struct static_sched_group, sched_group_book);
|
|
|
|
+
|
|
|
|
+static int
|
|
|
|
+cpu_to_book_group(int cpu, const struct cpumask *cpu_map,
|
|
|
|
+ struct sched_group **sg, struct cpumask *mask)
|
|
|
|
+{
|
|
|
|
+ int group = cpu;
|
|
|
|
+#ifdef CONFIG_SCHED_MC
|
|
|
|
+ cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
|
|
|
|
+ group = cpumask_first(mask);
|
|
|
|
+#elif defined(CONFIG_SCHED_SMT)
|
|
|
|
+ cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
|
|
|
|
+ group = cpumask_first(mask);
|
|
|
|
+#endif
|
|
|
|
+ if (sg)
|
|
|
|
+ *sg = &per_cpu(sched_group_book, group).sg;
|
|
|
|
+ return group;
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_SCHED_BOOK */
|
|
|
|
+
|
|
static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
|
|
static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
|
|
static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
|
|
static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
|
|
|
|
|
|
@@ -6578,7 +6605,10 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
|
|
struct sched_group **sg, struct cpumask *mask)
|
|
struct sched_group **sg, struct cpumask *mask)
|
|
{
|
|
{
|
|
int group;
|
|
int group;
|
|
-#ifdef CONFIG_SCHED_MC
|
|
|
|
|
|
+#ifdef CONFIG_SCHED_BOOK
|
|
|
|
+ cpumask_and(mask, cpu_book_mask(cpu), cpu_map);
|
|
|
|
+ group = cpumask_first(mask);
|
|
|
|
+#elif defined(CONFIG_SCHED_MC)
|
|
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
|
|
cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
|
|
group = cpumask_first(mask);
|
|
group = cpumask_first(mask);
|
|
#elif defined(CONFIG_SCHED_SMT)
|
|
#elif defined(CONFIG_SCHED_SMT)
|
|
@@ -6839,6 +6869,9 @@ SD_INIT_FUNC(CPU)
|
|
#ifdef CONFIG_SCHED_MC
|
|
#ifdef CONFIG_SCHED_MC
|
|
SD_INIT_FUNC(MC)
|
|
SD_INIT_FUNC(MC)
|
|
#endif
|
|
#endif
|
|
|
|
+#ifdef CONFIG_SCHED_BOOK
|
|
|
|
+ SD_INIT_FUNC(BOOK)
|
|
|
|
+#endif
|
|
|
|
|
|
static int default_relax_domain_level = -1;
|
|
static int default_relax_domain_level = -1;
|
|
|
|
|
|
@@ -6888,6 +6921,8 @@ static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
|
|
free_cpumask_var(d->tmpmask); /* fall through */
|
|
free_cpumask_var(d->tmpmask); /* fall through */
|
|
case sa_send_covered:
|
|
case sa_send_covered:
|
|
free_cpumask_var(d->send_covered); /* fall through */
|
|
free_cpumask_var(d->send_covered); /* fall through */
|
|
|
|
+ case sa_this_book_map:
|
|
|
|
+ free_cpumask_var(d->this_book_map); /* fall through */
|
|
case sa_this_core_map:
|
|
case sa_this_core_map:
|
|
free_cpumask_var(d->this_core_map); /* fall through */
|
|
free_cpumask_var(d->this_core_map); /* fall through */
|
|
case sa_this_sibling_map:
|
|
case sa_this_sibling_map:
|
|
@@ -6934,8 +6969,10 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
|
|
return sa_nodemask;
|
|
return sa_nodemask;
|
|
if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
|
|
if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
|
|
return sa_this_sibling_map;
|
|
return sa_this_sibling_map;
|
|
- if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
|
|
|
|
|
|
+ if (!alloc_cpumask_var(&d->this_book_map, GFP_KERNEL))
|
|
return sa_this_core_map;
|
|
return sa_this_core_map;
|
|
|
|
+ if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
|
|
|
|
+ return sa_this_book_map;
|
|
if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
|
|
if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
|
|
return sa_send_covered;
|
|
return sa_send_covered;
|
|
d->rd = alloc_rootdomain();
|
|
d->rd = alloc_rootdomain();
|
|
@@ -6993,6 +7030,23 @@ static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
|
|
return sd;
|
|
return sd;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static struct sched_domain *__build_book_sched_domain(struct s_data *d,
|
|
|
|
+ const struct cpumask *cpu_map, struct sched_domain_attr *attr,
|
|
|
|
+ struct sched_domain *parent, int i)
|
|
|
|
+{
|
|
|
|
+ struct sched_domain *sd = parent;
|
|
|
|
+#ifdef CONFIG_SCHED_BOOK
|
|
|
|
+ sd = &per_cpu(book_domains, i).sd;
|
|
|
|
+ SD_INIT(sd, BOOK);
|
|
|
|
+ set_domain_attribute(sd, attr);
|
|
|
|
+ cpumask_and(sched_domain_span(sd), cpu_map, cpu_book_mask(i));
|
|
|
|
+ sd->parent = parent;
|
|
|
|
+ parent->child = sd;
|
|
|
|
+ cpu_to_book_group(i, cpu_map, &sd->groups, d->tmpmask);
|
|
|
|
+#endif
|
|
|
|
+ return sd;
|
|
|
|
+}
|
|
|
|
+
|
|
static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
|
|
static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
|
|
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
|
|
const struct cpumask *cpu_map, struct sched_domain_attr *attr,
|
|
struct sched_domain *parent, int i)
|
|
struct sched_domain *parent, int i)
|
|
@@ -7049,6 +7103,15 @@ static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
|
|
&cpu_to_core_group,
|
|
&cpu_to_core_group,
|
|
d->send_covered, d->tmpmask);
|
|
d->send_covered, d->tmpmask);
|
|
break;
|
|
break;
|
|
|
|
+#endif
|
|
|
|
+#ifdef CONFIG_SCHED_BOOK
|
|
|
|
+ case SD_LV_BOOK: /* set up book groups */
|
|
|
|
+ cpumask_and(d->this_book_map, cpu_map, cpu_book_mask(cpu));
|
|
|
|
+ if (cpu == cpumask_first(d->this_book_map))
|
|
|
|
+ init_sched_build_groups(d->this_book_map, cpu_map,
|
|
|
|
+ &cpu_to_book_group,
|
|
|
|
+ d->send_covered, d->tmpmask);
|
|
|
|
+ break;
|
|
#endif
|
|
#endif
|
|
case SD_LV_CPU: /* set up physical groups */
|
|
case SD_LV_CPU: /* set up physical groups */
|
|
cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
|
|
cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
|
|
@@ -7097,12 +7160,14 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
|
|
|
|
|
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
|
|
sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
|
|
sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
|
|
sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
|
|
|
|
+ sd = __build_book_sched_domain(&d, cpu_map, attr, sd, i);
|
|
sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
|
|
sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
|
|
sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
|
|
sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
|
|
}
|
|
}
|
|
|
|
|
|
for_each_cpu(i, cpu_map) {
|
|
for_each_cpu(i, cpu_map) {
|
|
build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
|
|
build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
|
|
|
|
+ build_sched_groups(&d, SD_LV_BOOK, cpu_map, i);
|
|
build_sched_groups(&d, SD_LV_MC, cpu_map, i);
|
|
build_sched_groups(&d, SD_LV_MC, cpu_map, i);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -7133,6 +7198,12 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
|
init_sched_groups_power(i, sd);
|
|
init_sched_groups_power(i, sd);
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
+#ifdef CONFIG_SCHED_BOOK
|
|
|
|
+ for_each_cpu(i, cpu_map) {
|
|
|
|
+ sd = &per_cpu(book_domains, i).sd;
|
|
|
|
+ init_sched_groups_power(i, sd);
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
|
|
for_each_cpu(i, cpu_map) {
|
|
for_each_cpu(i, cpu_map) {
|
|
sd = &per_cpu(phys_domains, i).sd;
|
|
sd = &per_cpu(phys_domains, i).sd;
|
|
@@ -7158,6 +7229,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map,
|
|
sd = &per_cpu(cpu_domains, i).sd;
|
|
sd = &per_cpu(cpu_domains, i).sd;
|
|
#elif defined(CONFIG_SCHED_MC)
|
|
#elif defined(CONFIG_SCHED_MC)
|
|
sd = &per_cpu(core_domains, i).sd;
|
|
sd = &per_cpu(core_domains, i).sd;
|
|
|
|
+#elif defined(CONFIG_SCHED_BOOK)
|
|
|
|
+ sd = &per_cpu(book_domains, i).sd;
|
|
#else
|
|
#else
|
|
sd = &per_cpu(phys_domains, i).sd;
|
|
sd = &per_cpu(phys_domains, i).sd;
|
|
#endif
|
|
#endif
|