|
@@ -440,9 +440,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
|
|
{
|
|
|
u64 runtime = sched_rt_runtime(rt_rq);
|
|
|
|
|
|
- if (runtime == RUNTIME_INF)
|
|
|
- return 0;
|
|
|
-
|
|
|
if (rt_rq->rt_throttled)
|
|
|
return rt_rq_throttled(rt_rq);
|
|
|
|
|
@@ -493,9 +490,11 @@ static void update_curr_rt(struct rq *rq)
|
|
|
rt_rq = rt_rq_of_se(rt_se);
|
|
|
|
|
|
spin_lock(&rt_rq->rt_runtime_lock);
|
|
|
- rt_rq->rt_time += delta_exec;
|
|
|
- if (sched_rt_runtime_exceeded(rt_rq))
|
|
|
- resched_task(curr);
|
|
|
+ if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
|
|
|
+ rt_rq->rt_time += delta_exec;
|
|
|
+ if (sched_rt_runtime_exceeded(rt_rq))
|
|
|
+ resched_task(curr);
|
|
|
+ }
|
|
|
spin_unlock(&rt_rq->rt_runtime_lock);
|
|
|
}
|
|
|
}
|