|
@@ -2483,6 +2483,48 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
|
|
#endif
|
|
|
}
|
|
|
|
|
|
+static void
|
|
|
+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
|
|
|
+{
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ if (p->sched_contributes_to_load)
|
|
|
+ rq->nr_uninterruptible--;
|
|
|
+#endif
|
|
|
+
|
|
|
+ ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
|
|
|
+ ttwu_do_wakeup(rq, p, wake_flags);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Called in case the task @p isn't fully descheduled from its runqueue,
|
|
|
+ * in this case we must do a remote wakeup. Its a 'light' wakeup though,
|
|
|
+ * since all we need to do is flip p->state to TASK_RUNNING, since
|
|
|
+ * the task is still ->on_rq.
|
|
|
+ */
|
|
|
+static int ttwu_remote(struct task_struct *p, int wake_flags)
|
|
|
+{
|
|
|
+ struct rq *rq;
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ rq = __task_rq_lock(p);
|
|
|
+ if (p->on_rq) {
|
|
|
+ ttwu_do_wakeup(rq, p, wake_flags);
|
|
|
+ ret = 1;
|
|
|
+ }
|
|
|
+ __task_rq_unlock(rq);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static void ttwu_queue(struct task_struct *p, int cpu)
|
|
|
+{
|
|
|
+ struct rq *rq = cpu_rq(cpu);
|
|
|
+
|
|
|
+ raw_spin_lock(&rq->lock);
|
|
|
+ ttwu_do_activate(rq, p, 0);
|
|
|
+ raw_spin_unlock(&rq->lock);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* try_to_wake_up - wake up a thread
|
|
|
* @p: the thread to be awakened
|
|
@@ -2501,27 +2543,25 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
|
|
|
static int
|
|
|
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
|
{
|
|
|
- int cpu, this_cpu, success = 0;
|
|
|
unsigned long flags;
|
|
|
- struct rq *rq;
|
|
|
-
|
|
|
- this_cpu = get_cpu();
|
|
|
+ int cpu, success = 0;
|
|
|
|
|
|
smp_wmb();
|
|
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
|
|
if (!(p->state & state))
|
|
|
goto out;
|
|
|
|
|
|
+ success = 1; /* we're going to change ->state */
|
|
|
cpu = task_cpu(p);
|
|
|
|
|
|
- if (p->on_rq) {
|
|
|
- rq = __task_rq_lock(p);
|
|
|
- if (p->on_rq)
|
|
|
- goto out_running;
|
|
|
- __task_rq_unlock(rq);
|
|
|
- }
|
|
|
+ if (p->on_rq && ttwu_remote(p, wake_flags))
|
|
|
+ goto stat;
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
+ /*
|
|
|
+ * If the owning (remote) cpu is still in the middle of schedule() with
|
|
|
+ * this task as prev, wait until its done referencing the task.
|
|
|
+ */
|
|
|
while (p->on_cpu) {
|
|
|
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
|
/*
|
|
@@ -2530,8 +2570,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
|
* to spin on ->on_cpu if p is current, since that would
|
|
|
* deadlock.
|
|
|
*/
|
|
|
- if (p == current)
|
|
|
- goto out_activate;
|
|
|
+ if (p == current) {
|
|
|
+ ttwu_queue(p, cpu);
|
|
|
+ goto stat;
|
|
|
+ }
|
|
|
#endif
|
|
|
cpu_relax();
|
|
|
}
|
|
@@ -2547,32 +2589,15 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|
|
p->sched_class->task_waking(p);
|
|
|
|
|
|
cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
|
|
|
-#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
|
|
-out_activate:
|
|
|
-#endif
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
-
|
|
|
- rq = cpu_rq(cpu);
|
|
|
- raw_spin_lock(&rq->lock);
|
|
|
-
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- if (cpu != task_cpu(p))
|
|
|
+ if (task_cpu(p) != cpu)
|
|
|
set_task_cpu(p, cpu);
|
|
|
+#endif /* CONFIG_SMP */
|
|
|
|
|
|
- if (p->sched_contributes_to_load)
|
|
|
- rq->nr_uninterruptible--;
|
|
|
-#endif
|
|
|
-
|
|
|
- ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
|
|
|
-out_running:
|
|
|
- ttwu_do_wakeup(rq, p, wake_flags);
|
|
|
- success = 1;
|
|
|
- __task_rq_unlock(rq);
|
|
|
-
|
|
|
+ ttwu_queue(p, cpu);
|
|
|
+stat:
|
|
|
ttwu_stat(p, cpu, wake_flags);
|
|
|
out:
|
|
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
|
|
- put_cpu();
|
|
|
|
|
|
return success;
|
|
|
}
|