|
@@ -124,21 +124,33 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
|
|
update_rt_migration(rt_rq);
|
|
update_rt_migration(rt_rq);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline int has_pushable_tasks(struct rq *rq)
|
|
|
|
+{
|
|
|
|
+ return !plist_head_empty(&rq->rt.pushable_tasks);
|
|
|
|
+}
|
|
|
|
+
|
|
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
plist_node_init(&p->pushable_tasks, p->prio);
|
|
plist_node_init(&p->pushable_tasks, p->prio);
|
|
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
|
|
+
|
|
|
|
+ /* Update the highest prio pushable task */
|
|
|
|
+ if (p->prio < rq->rt.highest_prio.next)
|
|
|
|
+ rq->rt.highest_prio.next = p->prio;
|
|
}
|
|
}
|
|
|
|
|
|
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
|
|
-}
|
|
|
|
|
|
|
|
-static inline int has_pushable_tasks(struct rq *rq)
|
|
|
|
-{
|
|
|
|
- return !plist_head_empty(&rq->rt.pushable_tasks);
|
|
|
|
|
|
+ /* Update the new highest prio pushable task */
|
|
|
|
+ if (has_pushable_tasks(rq)) {
|
|
|
|
+ p = plist_first_entry(&rq->rt.pushable_tasks,
|
|
|
|
+ struct task_struct, pushable_tasks);
|
|
|
|
+ rq->rt.highest_prio.next = p->prio;
|
|
|
|
+ } else
|
|
|
|
+ rq->rt.highest_prio.next = MAX_RT_PRIO;
|
|
}
|
|
}
|
|
|
|
|
|
#else
|
|
#else
|
|
@@ -643,6 +655,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
|
|
|
|
|
|
if (rt_rq->rt_time > runtime) {
|
|
if (rt_rq->rt_time > runtime) {
|
|
rt_rq->rt_throttled = 1;
|
|
rt_rq->rt_throttled = 1;
|
|
|
|
+ printk_once(KERN_WARNING "sched: RT throttling activated\n");
|
|
if (rt_rq_throttled(rt_rq)) {
|
|
if (rt_rq_throttled(rt_rq)) {
|
|
sched_rt_rq_dequeue(rt_rq);
|
|
sched_rt_rq_dequeue(rt_rq);
|
|
return 1;
|
|
return 1;
|
|
@@ -698,47 +711,13 @@ static void update_curr_rt(struct rq *rq)
|
|
|
|
|
|
#if defined CONFIG_SMP
|
|
#if defined CONFIG_SMP
|
|
|
|
|
|
-static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
|
|
|
|
-
|
|
|
|
-static inline int next_prio(struct rq *rq)
|
|
|
|
-{
|
|
|
|
- struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
|
|
|
|
-
|
|
|
|
- if (next && rt_prio(next->prio))
|
|
|
|
- return next->prio;
|
|
|
|
- else
|
|
|
|
- return MAX_RT_PRIO;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void
|
|
static void
|
|
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
|
|
inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
|
|
{
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
|
|
- if (prio < prev_prio) {
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * If the new task is higher in priority than anything on the
|
|
|
|
- * run-queue, we know that the previous high becomes our
|
|
|
|
- * next-highest.
|
|
|
|
- */
|
|
|
|
- rt_rq->highest_prio.next = prev_prio;
|
|
|
|
-
|
|
|
|
- if (rq->online)
|
|
|
|
- cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
|
|
|
|
-
|
|
|
|
- } else if (prio == rt_rq->highest_prio.curr)
|
|
|
|
- /*
|
|
|
|
- * If the next task is equal in priority to the highest on
|
|
|
|
- * the run-queue, then we implicitly know that the next highest
|
|
|
|
- * task cannot be any lower than current
|
|
|
|
- */
|
|
|
|
- rt_rq->highest_prio.next = prio;
|
|
|
|
- else if (prio < rt_rq->highest_prio.next)
|
|
|
|
- /*
|
|
|
|
- * Otherwise, we need to recompute next-highest
|
|
|
|
- */
|
|
|
|
- rt_rq->highest_prio.next = next_prio(rq);
|
|
|
|
|
|
+ if (rq->online && prio < prev_prio)
|
|
|
|
+ cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -746,9 +725,6 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
|
|
{
|
|
{
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
struct rq *rq = rq_of_rt_rq(rt_rq);
|
|
|
|
|
|
- if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
|
|
|
|
- rt_rq->highest_prio.next = next_prio(rq);
|
|
|
|
-
|
|
|
|
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
|
|
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
|
|
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
|
|
}
|
|
}
|
|
@@ -961,6 +937,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
|
|
|
|
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
|
|
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
|
|
enqueue_pushable_task(rq, p);
|
|
enqueue_pushable_task(rq, p);
|
|
|
|
+
|
|
|
|
+ inc_nr_running(rq);
|
|
}
|
|
}
|
|
|
|
|
|
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
@@ -971,6 +949,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|
dequeue_rt_entity(rt_se);
|
|
dequeue_rt_entity(rt_se);
|
|
|
|
|
|
dequeue_pushable_task(rq, p);
|
|
dequeue_pushable_task(rq, p);
|
|
|
|
+
|
|
|
|
+ dec_nr_running(rq);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1017,10 +997,12 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
|
|
struct rq *rq;
|
|
struct rq *rq;
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
- if (sd_flag != SD_BALANCE_WAKE)
|
|
|
|
- return smp_processor_id();
|
|
|
|
-
|
|
|
|
cpu = task_cpu(p);
|
|
cpu = task_cpu(p);
|
|
|
|
+
|
|
|
|
+ /* For anything but wake ups, just return the task_cpu */
|
|
|
|
+ if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
|
|
|
|
+ goto out;
|
|
|
|
+
|
|
rq = cpu_rq(cpu);
|
|
rq = cpu_rq(cpu);
|
|
|
|
|
|
rcu_read_lock();
|
|
rcu_read_lock();
|
|
@@ -1059,6 +1041,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
|
|
}
|
|
}
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
+out:
|
|
return cpu;
|
|
return cpu;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1178,7 +1161,6 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
|
|
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
|
static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
|
|
{
|
|
{
|
|
update_curr_rt(rq);
|
|
update_curr_rt(rq);
|
|
- p->se.exec_start = 0;
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* The previous task needs to be made eligible for pushing
|
|
* The previous task needs to be made eligible for pushing
|
|
@@ -1198,7 +1180,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
|
|
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
|
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
|
|
{
|
|
{
|
|
if (!task_running(rq, p) &&
|
|
if (!task_running(rq, p) &&
|
|
- (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
|
|
|
|
|
|
+ (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
|
|
(p->rt.nr_cpus_allowed > 1))
|
|
(p->rt.nr_cpus_allowed > 1))
|
|
return 1;
|
|
return 1;
|
|
return 0;
|
|
return 0;
|
|
@@ -1343,7 +1325,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
|
*/
|
|
*/
|
|
if (unlikely(task_rq(task) != rq ||
|
|
if (unlikely(task_rq(task) != rq ||
|
|
!cpumask_test_cpu(lowest_rq->cpu,
|
|
!cpumask_test_cpu(lowest_rq->cpu,
|
|
- &task->cpus_allowed) ||
|
|
|
|
|
|
+ tsk_cpus_allowed(task)) ||
|
|
task_running(rq, task) ||
|
|
task_running(rq, task) ||
|
|
!task->on_rq)) {
|
|
!task->on_rq)) {
|
|
|
|
|
|
@@ -1394,6 +1376,7 @@ static int push_rt_task(struct rq *rq)
|
|
{
|
|
{
|
|
struct task_struct *next_task;
|
|
struct task_struct *next_task;
|
|
struct rq *lowest_rq;
|
|
struct rq *lowest_rq;
|
|
|
|
+ int ret = 0;
|
|
|
|
|
|
if (!rq->rt.overloaded)
|
|
if (!rq->rt.overloaded)
|
|
return 0;
|
|
return 0;
|
|
@@ -1426,7 +1409,7 @@ retry:
|
|
if (!lowest_rq) {
|
|
if (!lowest_rq) {
|
|
struct task_struct *task;
|
|
struct task_struct *task;
|
|
/*
|
|
/*
|
|
- * find lock_lowest_rq releases rq->lock
|
|
|
|
|
|
+ * find_lock_lowest_rq releases rq->lock
|
|
* so it is possible that next_task has migrated.
|
|
* so it is possible that next_task has migrated.
|
|
*
|
|
*
|
|
* We need to make sure that the task is still on the same
|
|
* We need to make sure that the task is still on the same
|
|
@@ -1436,12 +1419,11 @@ retry:
|
|
task = pick_next_pushable_task(rq);
|
|
task = pick_next_pushable_task(rq);
|
|
if (task_cpu(next_task) == rq->cpu && task == next_task) {
|
|
if (task_cpu(next_task) == rq->cpu && task == next_task) {
|
|
/*
|
|
/*
|
|
- * If we get here, the task hasn't moved at all, but
|
|
|
|
- * it has failed to push. We will not try again,
|
|
|
|
- * since the other cpus will pull from us when they
|
|
|
|
- * are ready.
|
|
|
|
|
|
+ * The task hasn't migrated, and is still the next
|
|
|
|
+ * eligible task, but we failed to find a run-queue
|
|
|
|
+ * to push it to. Do not retry in this case, since
|
|
|
|
+ * other cpus will pull from us when ready.
|
|
*/
|
|
*/
|
|
- dequeue_pushable_task(rq, next_task);
|
|
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1460,6 +1442,7 @@ retry:
|
|
deactivate_task(rq, next_task, 0);
|
|
deactivate_task(rq, next_task, 0);
|
|
set_task_cpu(next_task, lowest_rq->cpu);
|
|
set_task_cpu(next_task, lowest_rq->cpu);
|
|
activate_task(lowest_rq, next_task, 0);
|
|
activate_task(lowest_rq, next_task, 0);
|
|
|
|
+ ret = 1;
|
|
|
|
|
|
resched_task(lowest_rq->curr);
|
|
resched_task(lowest_rq->curr);
|
|
|
|
|
|
@@ -1468,7 +1451,7 @@ retry:
|
|
out:
|
|
out:
|
|
put_task_struct(next_task);
|
|
put_task_struct(next_task);
|
|
|
|
|
|
- return 1;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static void push_rt_tasks(struct rq *rq)
|
|
static void push_rt_tasks(struct rq *rq)
|
|
@@ -1626,9 +1609,6 @@ static void set_cpus_allowed_rt(struct task_struct *p,
|
|
|
|
|
|
update_rt_migration(&rq->rt);
|
|
update_rt_migration(&rq->rt);
|
|
}
|
|
}
|
|
-
|
|
|
|
- cpumask_copy(&p->cpus_allowed, new_mask);
|
|
|
|
- p->rt.nr_cpus_allowed = weight;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* Assumes rq->lock is held */
|
|
/* Assumes rq->lock is held */
|
|
@@ -1863,4 +1843,3 @@ static void print_rt_stats(struct seq_file *m, int cpu)
|
|
rcu_read_unlock();
|
|
rcu_read_unlock();
|
|
}
|
|
}
|
|
#endif /* CONFIG_SCHED_DEBUG */
|
|
#endif /* CONFIG_SCHED_DEBUG */
|
|
-
|
|
|