|
@@ -778,12 +778,9 @@ static inline int balance_runtime(struct rt_rq *rt_rq)
|
|
|
|
|
|
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
|
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
|
{
|
|
{
|
|
- int i, idle = 1;
|
|
|
|
|
|
+ int i, idle = 1, throttled = 0;
|
|
const struct cpumask *span;
|
|
const struct cpumask *span;
|
|
|
|
|
|
- if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
|
|
|
|
- return 1;
|
|
|
|
-
|
|
|
|
span = sched_rt_period_mask();
|
|
span = sched_rt_period_mask();
|
|
for_each_cpu(i, span) {
|
|
for_each_cpu(i, span) {
|
|
int enqueue = 0;
|
|
int enqueue = 0;
|
|
@@ -818,12 +815,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
|
if (!rt_rq_throttled(rt_rq))
|
|
if (!rt_rq_throttled(rt_rq))
|
|
enqueue = 1;
|
|
enqueue = 1;
|
|
}
|
|
}
|
|
|
|
+ if (rt_rq->rt_throttled)
|
|
|
|
+ throttled = 1;
|
|
|
|
|
|
if (enqueue)
|
|
if (enqueue)
|
|
sched_rt_rq_enqueue(rt_rq);
|
|
sched_rt_rq_enqueue(rt_rq);
|
|
raw_spin_unlock(&rq->lock);
|
|
raw_spin_unlock(&rq->lock);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
|
|
|
|
+ return 1;
|
|
|
|
+
|
|
return idle;
|
|
return idle;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -884,7 +886,8 @@ static void update_curr_rt(struct rq *rq)
|
|
if (unlikely((s64)delta_exec < 0))
|
|
if (unlikely((s64)delta_exec < 0))
|
|
delta_exec = 0;
|
|
delta_exec = 0;
|
|
|
|
|
|
- schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
|
|
|
|
|
|
+ schedstat_set(curr->se.statistics.exec_max,
|
|
|
|
+ max(curr->se.statistics.exec_max, delta_exec));
|
|
|
|
|
|
curr->se.sum_exec_runtime += delta_exec;
|
|
curr->se.sum_exec_runtime += delta_exec;
|
|
account_group_exec_runtime(curr, delta_exec);
|
|
account_group_exec_runtime(curr, delta_exec);
|