|
@@ -2817,6 +2817,19 @@ already_gone:
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
+static bool __flush_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct wq_barrier barr;
|
|
|
+
|
|
|
+ if (start_flush_work(work, &barr)) {
|
|
|
+ wait_for_completion(&barr.done);
|
|
|
+ destroy_work_on_stack(&barr.work);
|
|
|
+ return true;
|
|
|
+ } else {
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* flush_work - wait for a work to finish executing the last queueing instance
|
|
|
* @work: the work to flush
|
|
@@ -2830,18 +2843,10 @@ already_gone:
|
|
|
*/
|
|
|
bool flush_work(struct work_struct *work)
|
|
|
{
|
|
|
- struct wq_barrier barr;
|
|
|
-
|
|
|
lock_map_acquire(&work->lockdep_map);
|
|
|
lock_map_release(&work->lockdep_map);
|
|
|
|
|
|
- if (start_flush_work(work, &barr)) {
|
|
|
- wait_for_completion(&barr.done);
|
|
|
- destroy_work_on_stack(&barr.work);
|
|
|
- return true;
|
|
|
- } else {
|
|
|
- return false;
|
|
|
- }
|
|
|
+ return __flush_work(work);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(flush_work);
|
|
|
|
|
@@ -3411,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
|
|
|
{
|
|
|
to->nice = from->nice;
|
|
|
cpumask_copy(to->cpumask, from->cpumask);
|
|
|
+ /*
|
|
|
+ * Unlike hash and equality test, this function doesn't ignore
|
|
|
+ * ->no_numa as it is used for both pool and wq attrs. Instead,
|
|
|
+ * get_unbound_pool() explicitly clears ->no_numa after copying.
|
|
|
+ */
|
|
|
+ to->no_numa = from->no_numa;
|
|
|
}
|
|
|
|
|
|
/* hash value of the content of @attr */
|
|
@@ -3578,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
|
|
|
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
|
|
|
copy_workqueue_attrs(pool->attrs, attrs);
|
|
|
|
|
|
+ /*
|
|
|
+ * no_numa isn't a worker_pool attribute, always clear it. See
|
|
|
+ * 'struct workqueue_attrs' comments for detail.
|
|
|
+ */
|
|
|
+ pool->attrs->no_numa = false;
|
|
|
+
|
|
|
/* if cpumask is contained inside a NUMA node, we belong to that node */
|
|
|
if (wq_numa_enabled) {
|
|
|
for_each_node(node) {
|
|
@@ -4756,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
|
|
|
|
|
|
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
|
|
|
schedule_work_on(cpu, &wfc.work);
|
|
|
- flush_work(&wfc.work);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The work item is on-stack and can't lead to deadlock through
|
|
|
+ * flushing. Use __flush_work() to avoid spurious lockdep warnings
|
|
|
+ * when work_on_cpu()s are nested.
|
|
|
+ */
|
|
|
+ __flush_work(&wfc.work);
|
|
|
+
|
|
|
return wfc.ret;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(work_on_cpu);
|