|
@@ -517,6 +517,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+#ifdef CONFIG_SMP
|
|
/*
|
|
/*
|
|
* Helper routine for generate_sched_domains().
|
|
* Helper routine for generate_sched_domains().
|
|
* Do cpusets a, b have overlapping cpus_allowed masks?
|
|
* Do cpusets a, b have overlapping cpus_allowed masks?
|
|
@@ -811,6 +812,18 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
|
|
|
|
|
|
put_online_cpus();
|
|
put_online_cpus();
|
|
}
|
|
}
|
|
|
|
+#else /* !CONFIG_SMP */
|
|
|
|
+static void do_rebuild_sched_domains(struct work_struct *unused)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int generate_sched_domains(struct cpumask **domains,
|
|
|
|
+ struct sched_domain_attr **attributes)
|
|
|
|
+{
|
|
|
|
+ *domains = NULL;
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_SMP */
|
|
|
|
|
|
static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
|
|
static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
|
|
|
|
|
|
@@ -1164,8 +1177,10 @@ int current_cpuset_is_being_rebound(void)
|
|
|
|
|
|
static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
|
static int update_relax_domain_level(struct cpuset *cs, s64 val)
|
|
{
|
|
{
|
|
|
|
+#ifdef CONFIG_SMP
|
|
if (val < -1 || val >= SD_LV_MAX)
|
|
if (val < -1 || val >= SD_LV_MAX)
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
|
|
+#endif
|
|
|
|
|
|
if (val != cs->relax_domain_level) {
|
|
if (val != cs->relax_domain_level) {
|
|
cs->relax_domain_level = val;
|
|
cs->relax_domain_level = val;
|