|
@@ -752,18 +752,17 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
|
|
if (cwq->thread == NULL)
|
|
if (cwq->thread == NULL)
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ flush_cpu_workqueue(cwq);
|
|
/*
|
|
/*
|
|
- * If the caller is CPU_DEAD the single flush_cpu_workqueue()
|
|
|
|
- * is not enough, a concurrent flush_workqueue() can insert a
|
|
|
|
- * barrier after us.
|
|
|
|
|
|
+ * If the caller is CPU_DEAD and cwq->worklist was not empty,
|
|
|
|
+ * a concurrent flush_workqueue() can insert a barrier after us.
|
|
|
|
+ * However, in that case run_workqueue() won't return and check
|
|
|
|
+ * kthread_should_stop() until it flushes all work_struct's.
|
|
* When ->worklist becomes empty it is safe to exit because no
|
|
* When ->worklist becomes empty it is safe to exit because no
|
|
* more work_structs can be queued on this cwq: flush_workqueue
|
|
* more work_structs can be queued on this cwq: flush_workqueue
|
|
* checks list_empty(), and a "normal" queue_work() can't use
|
|
* checks list_empty(), and a "normal" queue_work() can't use
|
|
* a dead CPU.
|
|
* a dead CPU.
|
|
*/
|
|
*/
|
|
- while (flush_cpu_workqueue(cwq))
|
|
|
|
- ;
|
|
|
|
-
|
|
|
|
kthread_stop(cwq->thread);
|
|
kthread_stop(cwq->thread);
|
|
cwq->thread = NULL;
|
|
cwq->thread = NULL;
|
|
}
|
|
}
|