|
@@ -2002,6 +2002,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * wait_task_context_switch - wait for a thread to complete at least one
|
|
|
+ * context switch.
|
|
|
+ *
|
|
|
+ * @p must not be current.
|
|
|
+ */
|
|
|
+void wait_task_context_switch(struct task_struct *p)
|
|
|
+{
|
|
|
+ unsigned long nvcsw, nivcsw, flags;
|
|
|
+ int running;
|
|
|
+ struct rq *rq;
|
|
|
+
|
|
|
+ nvcsw = p->nvcsw;
|
|
|
+ nivcsw = p->nivcsw;
|
|
|
+ for (;;) {
|
|
|
+ /*
|
|
|
+ * The runqueue is assigned before the actual context
|
|
|
+ * switch. We need to take the runqueue lock.
|
|
|
+ *
|
|
|
+ * We could check initially without the lock but it is
|
|
|
+ * very likely that we need to take the lock in every
|
|
|
+ * iteration.
|
|
|
+ */
|
|
|
+ rq = task_rq_lock(p, &flags);
|
|
|
+ running = task_running(rq, p);
|
|
|
+ task_rq_unlock(rq, &flags);
|
|
|
+
|
|
|
+ if (likely(!running))
|
|
|
+ break;
|
|
|
+ /*
|
|
|
+ * The switch count is incremented before the actual
|
|
|
+ * context switch. We thus wait for two switches to be
|
|
|
+ * sure at least one completed.
|
|
|
+ */
|
|
|
+ if ((p->nvcsw - nvcsw) > 1)
|
|
|
+ break;
|
|
|
+ if ((p->nivcsw - nivcsw) > 1)
|
|
|
+ break;
|
|
|
+
|
|
|
+ cpu_relax();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* wait_task_inactive - wait for a thread to unschedule.
|
|
|
*
|