|
@@ -7750,6 +7750,17 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
|
|
|
return total + to_ratio(period, runtime) < global_ratio;
|
|
|
}
|
|
|
|
|
|
+/* Must be called with tasklist_lock held */
|
|
|
+static inline int tg_has_rt_tasks(struct task_group *tg)
|
|
|
+{
|
|
|
+ struct task_struct *g, *p;
|
|
|
+ do_each_thread(g, p) {
|
|
|
+ if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
|
|
|
+ return 1;
|
|
|
+ } while_each_thread(g, p);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
|
|
|
{
|
|
|
u64 rt_runtime, rt_period;
|
|
@@ -7761,12 +7772,18 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
|
|
|
rt_runtime = RUNTIME_INF;
|
|
|
|
|
|
mutex_lock(&rt_constraints_mutex);
|
|
|
+ read_lock(&tasklist_lock);
|
|
|
+ if (rt_runtime_us == 0 && tg_has_rt_tasks(tg)) {
|
|
|
+ err = -EBUSY;
|
|
|
+ goto unlock;
|
|
|
+ }
|
|
|
if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
|
|
|
err = -EINVAL;
|
|
|
goto unlock;
|
|
|
}
|
|
|
tg->rt_runtime = rt_runtime;
|
|
|
unlock:
|
|
|
+ read_unlock(&tasklist_lock);
|
|
|
mutex_unlock(&rt_constraints_mutex);
|
|
|
|
|
|
return err;
|