|
@@ -2195,13 +2195,15 @@ static int migration_cpu_stop(void *data);
|
|
* The task's runqueue lock must be held.
|
|
* The task's runqueue lock must be held.
|
|
* Returns true if you have to wait for migration thread.
|
|
* Returns true if you have to wait for migration thread.
|
|
*/
|
|
*/
|
|
-static bool migrate_task(struct task_struct *p, struct rq *rq)
|
|
|
|
|
|
+static bool need_migrate_task(struct task_struct *p)
|
|
{
|
|
{
|
|
/*
|
|
/*
|
|
* If the task is not on a runqueue (and not running), then
|
|
* If the task is not on a runqueue (and not running), then
|
|
* the next wake-up will properly place the task.
|
|
* the next wake-up will properly place the task.
|
|
*/
|
|
*/
|
|
- return p->on_rq || task_running(rq, p);
|
|
|
|
|
|
+ bool running = p->on_rq || p->on_cpu;
|
|
|
|
+ smp_rmb(); /* finish_lock_switch() */
|
|
|
|
+ return running;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -2376,9 +2378,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
|
|
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
|
|
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
|
|
*/
|
|
*/
|
|
static inline
|
|
static inline
|
|
-int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
|
|
|
|
|
|
+int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
|
|
{
|
|
{
|
|
- int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
|
|
|
|
|
|
+ int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
|
|
|
|
|
|
/*
|
|
/*
|
|
* In order not to call set_task_cpu() on a blocking task we need
|
|
* In order not to call set_task_cpu() on a blocking task we need
|
|
@@ -2533,7 +2535,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
|
|
en_flags |= ENQUEUE_WAKING;
|
|
en_flags |= ENQUEUE_WAKING;
|
|
}
|
|
}
|
|
|
|
|
|
- cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
|
|
|
|
|
|
+ cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
|
if (cpu != orig_cpu)
|
|
if (cpu != orig_cpu)
|
|
set_task_cpu(p, cpu);
|
|
set_task_cpu(p, cpu);
|
|
__task_rq_unlock(rq);
|
|
__task_rq_unlock(rq);
|
|
@@ -2744,7 +2746,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
* We set TASK_WAKING so that select_task_rq() can drop rq->lock
|
|
* We set TASK_WAKING so that select_task_rq() can drop rq->lock
|
|
* without people poking at ->cpus_allowed.
|
|
* without people poking at ->cpus_allowed.
|
|
*/
|
|
*/
|
|
- cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
|
|
|
|
|
|
+ cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
|
|
set_task_cpu(p, cpu);
|
|
set_task_cpu(p, cpu);
|
|
|
|
|
|
p->state = TASK_RUNNING;
|
|
p->state = TASK_RUNNING;
|
|
@@ -3474,7 +3476,7 @@ void sched_exec(void)
|
|
int dest_cpu;
|
|
int dest_cpu;
|
|
|
|
|
|
rq = task_rq_lock(p, &flags);
|
|
rq = task_rq_lock(p, &flags);
|
|
- dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
|
|
|
|
|
|
+ dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
|
|
if (dest_cpu == smp_processor_id())
|
|
if (dest_cpu == smp_processor_id())
|
|
goto unlock;
|
|
goto unlock;
|
|
|
|
|
|
@@ -3482,7 +3484,7 @@ void sched_exec(void)
|
|
* select_task_rq() can race against ->cpus_allowed
|
|
* select_task_rq() can race against ->cpus_allowed
|
|
*/
|
|
*/
|
|
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
|
|
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
|
|
- likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
|
|
|
|
|
|
+ likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
|
|
|
|
task_rq_unlock(rq, &flags);
|
|
task_rq_unlock(rq, &flags);
|
|
@@ -5911,7 +5913,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
|
|
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
|
|
- if (migrate_task(p, rq)) {
|
|
|
|
|
|
+ if (need_migrate_task(p)) {
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
/* Need help from migration thread: drop lock and wait. */
|
|
__task_rq_unlock(rq);
|
|
__task_rq_unlock(rq);
|