|
@@ -1349,8 +1349,16 @@ static void busy_worker_rebind_fn(struct work_struct *work)
|
|
struct worker *worker = container_of(work, struct worker, rebind_work);
|
|
struct worker *worker = container_of(work, struct worker, rebind_work);
|
|
struct global_cwq *gcwq = worker->pool->gcwq;
|
|
struct global_cwq *gcwq = worker->pool->gcwq;
|
|
|
|
|
|
- if (worker_maybe_bind_and_lock(worker))
|
|
|
|
- worker_clr_flags(worker, WORKER_REBIND);
|
|
|
|
|
|
+ worker_maybe_bind_and_lock(worker);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * %WORKER_REBIND must be cleared even if the above binding failed;
|
|
|
|
+ * otherwise, we may confuse the next CPU_UP cycle or oops / get
|
|
|
|
+ * stuck by calling idle_worker_rebind() prematurely. If CPU went
|
|
|
|
+ * down again inbetween, %WORKER_UNBOUND would be set, so clearing
|
|
|
|
+ * %WORKER_REBIND is always safe.
|
|
|
|
+ */
|
|
|
|
+ worker_clr_flags(worker, WORKER_REBIND);
|
|
|
|
|
|
spin_unlock_irq(&gcwq->lock);
|
|
spin_unlock_irq(&gcwq->lock);
|
|
}
|
|
}
|