|
@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
|
|
|
|
|
|
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
|
|
{
|
|
|
- int this_cpu = smp_processor_id();
|
|
|
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
|
|
|
struct sched_rt_entity *rt_se;
|
|
|
|
|
|
- rt_se = rt_rq->tg->rt_se[this_cpu];
|
|
|
+ int cpu = cpu_of(rq_of_rt_rq(rt_rq));
|
|
|
+
|
|
|
+ rt_se = rt_rq->tg->rt_se[cpu];
|
|
|
|
|
|
if (rt_rq->rt_nr_running) {
|
|
|
if (rt_se && !on_rt_rq(rt_se))
|
|
@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
|
|
|
|
|
|
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
|
|
|
{
|
|
|
- int this_cpu = smp_processor_id();
|
|
|
struct sched_rt_entity *rt_se;
|
|
|
+ int cpu = cpu_of(rq_of_rt_rq(rt_rq));
|
|
|
|
|
|
- rt_se = rt_rq->tg->rt_se[this_cpu];
|
|
|
+ rt_se = rt_rq->tg->rt_se[cpu];
|
|
|
|
|
|
if (rt_se && on_rt_rq(rt_se))
|
|
|
dequeue_rt_entity(rt_se);
|
|
@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
|
|
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
|
|
idle = 0;
|
|
|
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
|
|
- } else if (rt_rq->rt_nr_running)
|
|
|
+ } else if (rt_rq->rt_nr_running) {
|
|
|
idle = 0;
|
|
|
+ if (!rt_rq_throttled(rt_rq))
|
|
|
+ enqueue = 1;
|
|
|
+ }
|
|
|
|
|
|
if (enqueue)
|
|
|
sched_rt_rq_enqueue(rt_rq);
|