|
@@ -2326,35 +2326,17 @@ out_unlock:
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(flush_workqueue);
|
|
|
|
|
|
-/**
|
|
|
- * flush_work - wait for a work to finish executing the last queueing instance
|
|
|
- * @work: the work to flush
|
|
|
- *
|
|
|
- * Wait until @work has finished execution. This function considers
|
|
|
- * only the last queueing instance of @work. If @work has been
|
|
|
- * enqueued across different CPUs on a non-reentrant workqueue or on
|
|
|
- * multiple workqueues, @work might still be executing on return on
|
|
|
- * some of the CPUs from earlier queueing.
|
|
|
- *
|
|
|
- * If @work was queued only on a non-reentrant, ordered or unbound
|
|
|
- * workqueue, @work is guaranteed to be idle on return if it hasn't
|
|
|
- * been requeued since flush started.
|
|
|
- *
|
|
|
- * RETURNS:
|
|
|
- * %true if flush_work() waited for the work to finish execution,
|
|
|
- * %false if it was already idle.
|
|
|
- */
|
|
|
-bool flush_work(struct work_struct *work)
|
|
|
+static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|
|
+ bool wait_executing)
|
|
|
{
|
|
|
struct worker *worker = NULL;
|
|
|
struct global_cwq *gcwq;
|
|
|
struct cpu_workqueue_struct *cwq;
|
|
|
- struct wq_barrier barr;
|
|
|
|
|
|
might_sleep();
|
|
|
gcwq = get_work_gcwq(work);
|
|
|
if (!gcwq)
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
|
|
|
spin_lock_irq(&gcwq->lock);
|
|
|
if (!list_empty(&work->entry)) {
|
|
@@ -2367,26 +2349,54 @@ bool flush_work(struct work_struct *work)
|
|
|
cwq = get_work_cwq(work);
|
|
|
if (unlikely(!cwq || gcwq != cwq->gcwq))
|
|
|
goto already_gone;
|
|
|
- } else {
|
|
|
+ } else if (wait_executing) {
|
|
|
worker = find_worker_executing_work(gcwq, work);
|
|
|
if (!worker)
|
|
|
goto already_gone;
|
|
|
cwq = worker->current_cwq;
|
|
|
- }
|
|
|
+ } else
|
|
|
+ goto already_gone;
|
|
|
|
|
|
- insert_wq_barrier(cwq, &barr, work, worker);
|
|
|
+ insert_wq_barrier(cwq, barr, work, worker);
|
|
|
spin_unlock_irq(&gcwq->lock);
|
|
|
|
|
|
lock_map_acquire(&cwq->wq->lockdep_map);
|
|
|
lock_map_release(&cwq->wq->lockdep_map);
|
|
|
-
|
|
|
- wait_for_completion(&barr.done);
|
|
|
- destroy_work_on_stack(&barr.work);
|
|
|
return true;
|
|
|
already_gone:
|
|
|
spin_unlock_irq(&gcwq->lock);
|
|
|
return false;
|
|
|
}
|
|
|
+
|
|
|
+/**
|
|
|
+ * flush_work - wait for a work to finish executing the last queueing instance
|
|
|
+ * @work: the work to flush
|
|
|
+ *
|
|
|
+ * Wait until @work has finished execution. This function considers
|
|
|
+ * only the last queueing instance of @work. If @work has been
|
|
|
+ * enqueued across different CPUs on a non-reentrant workqueue or on
|
|
|
+ * multiple workqueues, @work might still be executing on return on
|
|
|
+ * some of the CPUs from earlier queueing.
|
|
|
+ *
|
|
|
+ * If @work was queued only on a non-reentrant, ordered or unbound
|
|
|
+ * workqueue, @work is guaranteed to be idle on return if it hasn't
|
|
|
+ * been requeued since flush started.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * %true if flush_work() waited for the work to finish execution,
|
|
|
+ * %false if it was already idle.
|
|
|
+ */
|
|
|
+bool flush_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct wq_barrier barr;
|
|
|
+
|
|
|
+ if (start_flush_work(work, &barr, true)) {
|
|
|
+ wait_for_completion(&barr.done);
|
|
|
+ destroy_work_on_stack(&barr.work);
|
|
|
+ return true;
|
|
|
+ } else
|
|
|
+ return false;
|
|
|
+}
|
|
|
EXPORT_SYMBOL_GPL(flush_work);
|
|
|
|
|
|
static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
|