|
@@ -8866,11 +8866,29 @@ static int tg_schedulable(struct task_group *tg, void *data)
|
|
|
runtime = d->rt_runtime;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * Cannot have more runtime than the period.
|
|
|
+ */
|
|
|
+ if (runtime > period && runtime != RUNTIME_INF)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Ensure we don't starve existing RT tasks.
|
|
|
+ */
|
|
|
if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
|
|
|
return -EBUSY;
|
|
|
|
|
|
total = to_ratio(period, runtime);
|
|
|
|
|
|
+ /*
|
|
|
+ * Nobody can have more than the global setting allows.
|
|
|
+ */
|
|
|
+ if (total > to_ratio(global_rt_period(), global_rt_runtime()))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The sum of our children's runtime should not exceed our own.
|
|
|
+ */
|
|
|
list_for_each_entry_rcu(child, &tg->children, siblings) {
|
|
|
period = ktime_to_ns(child->rt_bandwidth.rt_period);
|
|
|
runtime = child->rt_bandwidth.rt_runtime;
|
|
@@ -8978,19 +8996,24 @@ long sched_group_rt_period(struct task_group *tg)
|
|
|
|
|
|
static int sched_rt_global_constraints(void)
|
|
|
{
|
|
|
- struct task_group *tg = &root_task_group;
|
|
|
- u64 rt_runtime, rt_period;
|
|
|
+ u64 runtime, period;
|
|
|
int ret = 0;
|
|
|
|
|
|
if (sysctl_sched_rt_period <= 0)
|
|
|
return -EINVAL;
|
|
|
|
|
|
- rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
|
|
|
- rt_runtime = tg->rt_bandwidth.rt_runtime;
|
|
|
+ runtime = global_rt_runtime();
|
|
|
+ period = global_rt_period();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Sanity check on the sysctl variables.
|
|
|
+ */
|
|
|
+ if (runtime > period && runtime != RUNTIME_INF)
|
|
|
+ return -EINVAL;
|
|
|
|
|
|
mutex_lock(&rt_constraints_mutex);
|
|
|
read_lock(&tasklist_lock);
|
|
|
- ret = __rt_schedulable(tg, rt_period, rt_runtime);
|
|
|
+ ret = __rt_schedulable(NULL, 0, 0);
|
|
|
read_unlock(&tasklist_lock);
|
|
|
mutex_unlock(&rt_constraints_mutex);
|
|
|
|