|
@@ -2736,28 +2736,18 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
struct rq *rq;
|
|
|
- int cpu __maybe_unused = get_cpu();
|
|
|
|
|
|
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
|
#ifdef CONFIG_SMP
|
|
|
- rq = task_rq_lock(p, &flags);
|
|
|
- p->state = TASK_WAKING;
|
|
|
-
|
|
|
/*
|
|
|
* Fork balancing, do it here and not earlier because:
|
|
|
* - cpus_allowed can change in the fork path
|
|
|
* - any previously selected cpu might disappear through hotplug
|
|
|
- *
|
|
|
- * We set TASK_WAKING so that select_task_rq() can drop rq->lock
|
|
|
- * without people poking at ->cpus_allowed.
|
|
|
*/
|
|
|
- cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
|
|
|
- set_task_cpu(p, cpu);
|
|
|
-
|
|
|
- p->state = TASK_RUNNING;
|
|
|
- task_rq_unlock(rq, p, &flags);
|
|
|
+ set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
|
|
|
#endif
|
|
|
|
|
|
- rq = task_rq_lock(p, &flags);
|
|
|
+ rq = __task_rq_lock(p);
|
|
|
activate_task(rq, p, 0);
|
|
|
p->on_rq = 1;
|
|
|
trace_sched_wakeup_new(p, true);
|
|
@@ -2767,7 +2757,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
|
|
|
p->sched_class->task_woken(rq, p);
|
|
|
#endif
|
|
|
task_rq_unlock(rq, p, &flags);
|
|
|
- put_cpu();
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_PREEMPT_NOTIFIERS
|