|
@@ -882,7 +882,8 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
|
|
|
}
|
|
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
|
|
|
|
|
-#ifdef CONFIG_SMP
|
|
|
+/* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */
|
|
|
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
|
|
|
/*
|
|
|
* We choose a half-life close to 1 scheduling period.
|
|
|
* Note: The tables below are dependent on this value.
|
|
@@ -3173,6 +3174,12 @@ unlock:
|
|
|
return new_cpu;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
|
|
|
+ * removed when useful for applications beyond shares distribution (e.g.
|
|
|
+ * load-balance).
|
|
|
+ */
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
/*
|
|
|
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
|
|
|
* cfs_rq_of(p) references at time of call are still valid and identify the
|
|
@@ -3196,6 +3203,7 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
|
|
|
atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
|
|
|
}
|
|
|
}
|
|
|
+#endif
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
static unsigned long
|
|
@@ -5773,8 +5781,9 @@ const struct sched_class fair_sched_class = {
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
.select_task_rq = select_task_rq_fair,
|
|
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
|
|
.migrate_task_rq = migrate_task_rq_fair,
|
|
|
-
|
|
|
+#endif
|
|
|
.rq_online = rq_online_fair,
|
|
|
.rq_offline = rq_offline_fair,
|
|
|
|