|
@@ -3465,27 +3465,22 @@ void sched_exec(void)
|
|
|
{
|
|
|
struct task_struct *p = current;
|
|
|
unsigned long flags;
|
|
|
- struct rq *rq;
|
|
|
int dest_cpu;
|
|
|
|
|
|
- rq = task_rq_lock(p, &flags);
|
|
|
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
|
dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
|
|
|
if (dest_cpu == smp_processor_id())
|
|
|
goto unlock;
|
|
|
|
|
|
- /*
|
|
|
- * select_task_rq() can race against ->cpus_allowed
|
|
|
- */
|
|
|
- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
|
|
|
- likely(cpu_active(dest_cpu)) && need_migrate_task(p)) {
|
|
|
+ if (likely(cpu_active(dest_cpu))) {
|
|
|
struct migration_arg arg = { p, dest_cpu };
|
|
|
|
|
|
- task_rq_unlock(rq, p, &flags);
|
|
|
- stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
|
|
|
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
|
|
|
return;
|
|
|
}
|
|
|
unlock:
|
|
|
- task_rq_unlock(rq, p, &flags);
|
|
|
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
}
|
|
|
|
|
|
#endif
|