|
@@ -1686,6 +1686,39 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
|
|
|
__release(rq2->lock);
|
|
|
}
|
|
|
|
|
|
+#else /* CONFIG_SMP */
|
|
|
+
|
|
|
+/*
|
|
|
+ * double_rq_lock - safely lock two runqueues
|
|
|
+ *
|
|
|
+ * Note this does not disable interrupts like task_rq_lock,
|
|
|
+ * you need to do so manually before calling.
|
|
|
+ */
|
|
|
+static void double_rq_lock(struct rq *rq1, struct rq *rq2)
|
|
|
+ __acquires(rq1->lock)
|
|
|
+ __acquires(rq2->lock)
|
|
|
+{
|
|
|
+ BUG_ON(!irqs_disabled());
|
|
|
+ BUG_ON(rq1 != rq2);
|
|
|
+ raw_spin_lock(&rq1->lock);
|
|
|
+ __acquire(rq2->lock); /* Fake it out ;) */
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * double_rq_unlock - safely unlock two runqueues
|
|
|
+ *
|
|
|
+ * Note this does not restore interrupts like task_rq_unlock,
|
|
|
+ * you need to do so manually after calling.
|
|
|
+ */
|
|
|
+static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
|
|
|
+ __releases(rq1->lock)
|
|
|
+ __releases(rq2->lock)
|
|
|
+{
|
|
|
+ BUG_ON(rq1 != rq2);
|
|
|
+ raw_spin_unlock(&rq1->lock);
|
|
|
+ __release(rq2->lock);
|
|
|
+}
|
|
|
+
|
|
|
#endif
|
|
|
|
|
|
static void calc_load_account_idle(struct rq *this_rq);
|
|
@@ -5448,6 +5481,58 @@ void __sched yield(void)
|
|
|
}
|
|
|
EXPORT_SYMBOL(yield);
|
|
|
|
|
|
+/**
|
|
|
+ * yield_to - yield the current processor to another thread in
|
|
|
+ * your thread group, or accelerate that thread toward the
|
|
|
+ * processor it's on.
|
|
|
+ *
|
|
|
+ * It's the caller's job to ensure that the target task struct
|
|
|
+ * can't go away on us before we can do any checks.
|
|
|
+ *
|
|
|
+ * Returns true if we indeed boosted the target task.
|
|
|
+ */
|
|
|
+bool __sched yield_to(struct task_struct *p, bool preempt)
|
|
|
+{
|
|
|
+ struct task_struct *curr = current;
|
|
|
+ struct rq *rq, *p_rq;
|
|
|
+ unsigned long flags;
|
|
|
+ bool yielded = 0;
|
|
|
+
|
|
|
+ local_irq_save(flags);
|
|
|
+ rq = this_rq();
|
|
|
+
|
|
|
+again:
|
|
|
+ p_rq = task_rq(p);
|
|
|
+ double_rq_lock(rq, p_rq);
|
|
|
+ while (task_rq(p) != p_rq) {
|
|
|
+ double_rq_unlock(rq, p_rq);
|
|
|
+ goto again;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!curr->sched_class->yield_to_task)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ if (curr->sched_class != p->sched_class)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ if (task_running(p_rq, p) || p->state)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ yielded = curr->sched_class->yield_to_task(rq, p, preempt);
|
|
|
+ if (yielded)
|
|
|
+ schedstat_inc(rq, yld_count);
|
|
|
+
|
|
|
+out:
|
|
|
+ double_rq_unlock(rq, p_rq);
|
|
|
+ local_irq_restore(flags);
|
|
|
+
|
|
|
+ if (yielded)
|
|
|
+ schedule();
|
|
|
+
|
|
|
+ return yielded;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(yield_to);
|
|
|
+
|
|
|
/*
|
|
|
* This task is about to go to sleep on IO. Increment rq->nr_iowait so
|
|
|
* that process accounting knows that this is a task in IO wait state.
|