|
@@ -6552,31 +6552,23 @@ cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
|
|
|
#ifdef CONFIG_SCHED_MC
|
|
|
static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
|
|
|
static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
|
|
|
-#endif /* CONFIG_SCHED_MC */
|
|
|
|
|
|
-#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
|
|
|
static int
|
|
|
cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
|
|
|
struct sched_group **sg, struct cpumask *mask)
|
|
|
{
|
|
|
int group;
|
|
|
-
|
|
|
+#ifdef CONFIG_SCHED_SMT
|
|
|
cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
|
|
|
group = cpumask_first(mask);
|
|
|
+#else
|
|
|
+ group = cpu;
|
|
|
+#endif
|
|
|
if (sg)
|
|
|
*sg = &per_cpu(sched_group_core, group).sg;
|
|
|
return group;
|
|
|
}
|
|
|
-#elif defined(CONFIG_SCHED_MC)
|
|
|
-static int
|
|
|
-cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
|
|
|
- struct sched_group **sg, struct cpumask *unused)
|
|
|
-{
|
|
|
- if (sg)
|
|
|
- *sg = &per_cpu(sched_group_core, cpu).sg;
|
|
|
- return cpu;
|
|
|
-}
|
|
|
-#endif
|
|
|
+#endif /* CONFIG_SCHED_MC */
|
|
|
|
|
|
static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
|
|
|
static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
|