|
@@ -1867,16 +1867,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
|
|
|
/*
|
|
|
* wait_task_inactive - wait for a thread to unschedule.
|
|
|
*
|
|
|
+ * If @match_state is nonzero, it's the @p->state value just checked and
|
|
|
+ * not expected to change. If it changes, i.e. @p might have woken up,
|
|
|
+ * then return zero. When we succeed in waiting for @p to be off its CPU,
|
|
|
+ * we return a positive number (its total switch count). If a second call
|
|
|
+ * a short while later returns the same number, the caller can be sure that
|
|
|
+ * @p has remained unscheduled the whole time.
|
|
|
+ *
|
|
|
* The caller must ensure that the task *will* unschedule sometime soon,
|
|
|
* else this function might spin for a *long* time. This function can't
|
|
|
* be called with interrupts off, or it may introduce deadlock with
|
|
|
* smp_call_function() if an IPI is sent by the same process we are
|
|
|
* waiting to become inactive.
|
|
|
*/
|
|
|
-void wait_task_inactive(struct task_struct *p)
|
|
|
+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
int running, on_rq;
|
|
|
+ unsigned long ncsw;
|
|
|
struct rq *rq;
|
|
|
|
|
|
for (;;) {
|
|
@@ -1899,8 +1907,11 @@ void wait_task_inactive(struct task_struct *p)
|
|
|
* return false if the runqueue has changed and p
|
|
|
* is actually now running somewhere else!
|
|
|
*/
|
|
|
- while (task_running(rq, p))
|
|
|
+ while (task_running(rq, p)) {
|
|
|
+ if (match_state && unlikely(p->state != match_state))
|
|
|
+ return 0;
|
|
|
cpu_relax();
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Ok, time to look more closely! We need the rq
|
|
@@ -1910,8 +1921,20 @@ void wait_task_inactive(struct task_struct *p)
|
|
|
rq = task_rq_lock(p, &flags);
|
|
|
running = task_running(rq, p);
|
|
|
on_rq = p->se.on_rq;
|
|
|
+ ncsw = 0;
|
|
|
+ if (!match_state || p->state == match_state) {
|
|
|
+ ncsw = p->nivcsw + p->nvcsw;
|
|
|
+ if (unlikely(!ncsw))
|
|
|
+ ncsw = 1;
|
|
|
+ }
|
|
|
task_rq_unlock(rq, &flags);
|
|
|
|
|
|
+ /*
|
|
|
+ * If it changed from the expected state, bail out now.
|
|
|
+ */
|
|
|
+ if (unlikely(!ncsw))
|
|
|
+ break;
|
|
|
+
|
|
|
/*
|
|
|
* Was it really running after all now that we
|
|
|
* checked with the proper locks actually held?
|
|
@@ -1944,6 +1967,8 @@ void wait_task_inactive(struct task_struct *p)
|
|
|
*/
|
|
|
break;
|
|
|
}
|
|
|
+
|
|
|
+ return ncsw;
|
|
|
}
|
|
|
|
|
|
/***
|