|
@@ -27,6 +27,11 @@ static inline void autogroup_destroy(struct kref *kref)
|
|
|
{
|
|
|
struct autogroup *ag = container_of(kref, struct autogroup, kref);
|
|
|
|
|
|
+#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
+ /* We've redirected RT tasks to the root task group... */
|
|
|
+ ag->tg->rt_se = NULL;
|
|
|
+ ag->tg->rt_rq = NULL;
|
|
|
+#endif
|
|
|
sched_destroy_group(ag->tg);
|
|
|
}
|
|
|
|
|
@@ -55,6 +60,10 @@ static inline struct autogroup *autogroup_task_get(struct task_struct *p)
|
|
|
return ag;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
+static void free_rt_sched_group(struct task_group *tg);
|
|
|
+#endif
|
|
|
+
|
|
|
static inline struct autogroup *autogroup_create(void)
|
|
|
{
|
|
|
struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
|
|
@@ -72,6 +81,19 @@ static inline struct autogroup *autogroup_create(void)
|
|
|
init_rwsem(&ag->lock);
|
|
|
ag->id = atomic_inc_return(&autogroup_seq_nr);
|
|
|
ag->tg = tg;
|
|
|
+#ifdef CONFIG_RT_GROUP_SCHED
|
|
|
+ /*
|
|
|
+ * Autogroup RT tasks are redirected to the root task group
|
|
|
+ * so we don't have to move tasks around upon policy change,
|
|
|
+ * or flail around trying to allocate bandwidth on the fly.
|
|
|
+ * A bandwidth exception in __sched_setscheduler() allows
|
|
|
+ * the policy change to proceed. Thereafter, task_group()
|
|
|
+ * returns &root_task_group, so zero bandwidth is required.
|
|
|
+ */
|
|
|
+ free_rt_sched_group(tg);
|
|
|
+ tg->rt_se = root_task_group.rt_se;
|
|
|
+ tg->rt_rq = root_task_group.rt_rq;
|
|
|
+#endif
|
|
|
tg->autogroup = ag;
|
|
|
|
|
|
return ag;
|
|
@@ -106,6 +128,11 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+static inline bool task_group_is_autogroup(struct task_group *tg)
|
|
|
+{
|
|
|
+ return tg != &root_task_group && tg->autogroup;
|
|
|
+}
|
|
|
+
|
|
|
static inline struct task_group *
|
|
|
autogroup_task_group(struct task_struct *p, struct task_group *tg)
|
|
|
{
|