|
@@ -42,9 +42,6 @@
|
|
|
#include <linux/lockdep.h>
|
|
|
#include <linux/idr.h>
|
|
|
|
|
|
-#define CREATE_TRACE_POINTS
|
|
|
-#include <trace/events/workqueue.h>
|
|
|
-
|
|
|
#include "workqueue_sched.h"
|
|
|
|
|
|
enum {
|
|
@@ -257,6 +254,9 @@ EXPORT_SYMBOL_GPL(system_long_wq);
|
|
|
EXPORT_SYMBOL_GPL(system_nrt_wq);
|
|
|
EXPORT_SYMBOL_GPL(system_unbound_wq);
|
|
|
|
|
|
+#define CREATE_TRACE_POINTS
|
|
|
+#include <trace/events/workqueue.h>
|
|
|
+
|
|
|
#define for_each_busy_worker(worker, i, pos, gcwq) \
|
|
|
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
|
|
|
hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
|
|
@@ -310,21 +310,6 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
|
|
|
(cpu) < WORK_CPU_NONE; \
|
|
|
(cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
|
|
|
|
|
|
-#ifdef CONFIG_LOCKDEP
|
|
|
-/**
|
|
|
- * in_workqueue_context() - in context of specified workqueue?
|
|
|
- * @wq: the workqueue of interest
|
|
|
- *
|
|
|
- * Checks lockdep state to see if the current task is executing from
|
|
|
- * within a workqueue item. This function exists only if lockdep is
|
|
|
- * enabled.
|
|
|
- */
|
|
|
-int in_workqueue_context(struct workqueue_struct *wq)
|
|
|
-{
|
|
|
- return lock_is_held(&wq->lockdep_map);
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
#ifdef CONFIG_DEBUG_OBJECTS_WORK
|
|
|
|
|
|
static struct debug_obj_descr work_debug_descr;
|
|
@@ -604,7 +589,9 @@ static bool keep_working(struct global_cwq *gcwq)
|
|
|
{
|
|
|
atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
|
|
|
|
|
|
- return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
|
|
|
+ return !list_empty(&gcwq->worklist) &&
|
|
|
+ (atomic_read(nr_running) <= 1 ||
|
|
|
+ gcwq->flags & GCWQ_HIGHPRI_PENDING);
|
|
|
}
|
|
|
|
|
|
/* Do we need a new worker? Called from manager. */
|
|
@@ -997,6 +984,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
|
|
|
|
|
|
/* gcwq determined, get cwq and queue */
|
|
|
cwq = get_cwq(gcwq->cpu, wq);
|
|
|
+ trace_workqueue_queue_work(cpu, cwq, work);
|
|
|
|
|
|
BUG_ON(!list_empty(&work->entry));
|
|
|
|
|
@@ -1004,6 +992,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
|
|
|
work_flags = work_color_to_flags(cwq->work_color);
|
|
|
|
|
|
if (likely(cwq->nr_active < cwq->max_active)) {
|
|
|
+ trace_workqueue_activate_work(work);
|
|
|
cwq->nr_active++;
|
|
|
worklist = gcwq_determine_ins_pos(gcwq, cwq);
|
|
|
} else {
|
|
@@ -1679,6 +1668,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
|
|
|
struct work_struct, entry);
|
|
|
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
|
|
|
|
|
|
+ trace_workqueue_activate_work(work);
|
|
|
move_linked_works(work, pos, NULL);
|
|
|
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
|
|
|
cwq->nr_active++;
|
|
@@ -2326,27 +2316,17 @@ out_unlock:
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(flush_workqueue);
|
|
|
|
|
|
-/**
|
|
|
- * flush_work - block until a work_struct's callback has terminated
|
|
|
- * @work: the work which is to be flushed
|
|
|
- *
|
|
|
- * Returns false if @work has already terminated.
|
|
|
- *
|
|
|
- * It is expected that, prior to calling flush_work(), the caller has
|
|
|
- * arranged for the work to not be requeued, otherwise it doesn't make
|
|
|
- * sense to use this function.
|
|
|
- */
|
|
|
-int flush_work(struct work_struct *work)
|
|
|
+static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
|
|
+ bool wait_executing)
|
|
|
{
|
|
|
struct worker *worker = NULL;
|
|
|
struct global_cwq *gcwq;
|
|
|
struct cpu_workqueue_struct *cwq;
|
|
|
- struct wq_barrier barr;
|
|
|
|
|
|
might_sleep();
|
|
|
gcwq = get_work_gcwq(work);
|
|
|
if (!gcwq)
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
|
|
|
spin_lock_irq(&gcwq->lock);
|
|
|
if (!list_empty(&work->entry)) {
|
|
@@ -2359,28 +2339,127 @@ int flush_work(struct work_struct *work)
|
|
|
cwq = get_work_cwq(work);
|
|
|
if (unlikely(!cwq || gcwq != cwq->gcwq))
|
|
|
goto already_gone;
|
|
|
- } else {
|
|
|
+ } else if (wait_executing) {
|
|
|
worker = find_worker_executing_work(gcwq, work);
|
|
|
if (!worker)
|
|
|
goto already_gone;
|
|
|
cwq = worker->current_cwq;
|
|
|
- }
|
|
|
+ } else
|
|
|
+ goto already_gone;
|
|
|
|
|
|
- insert_wq_barrier(cwq, &barr, work, worker);
|
|
|
+ insert_wq_barrier(cwq, barr, work, worker);
|
|
|
spin_unlock_irq(&gcwq->lock);
|
|
|
|
|
|
lock_map_acquire(&cwq->wq->lockdep_map);
|
|
|
lock_map_release(&cwq->wq->lockdep_map);
|
|
|
-
|
|
|
- wait_for_completion(&barr.done);
|
|
|
- destroy_work_on_stack(&barr.work);
|
|
|
- return 1;
|
|
|
+ return true;
|
|
|
already_gone:
|
|
|
spin_unlock_irq(&gcwq->lock);
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * flush_work - wait for a work to finish executing the last queueing instance
|
|
|
+ * @work: the work to flush
|
|
|
+ *
|
|
|
+ * Wait until @work has finished execution. This function considers
|
|
|
+ * only the last queueing instance of @work. If @work has been
|
|
|
+ * enqueued across different CPUs on a non-reentrant workqueue or on
|
|
|
+ * multiple workqueues, @work might still be executing on return on
|
|
|
+ * some of the CPUs from earlier queueing.
|
|
|
+ *
|
|
|
+ * If @work was queued only on a non-reentrant, ordered or unbound
|
|
|
+ * workqueue, @work is guaranteed to be idle on return if it hasn't
|
|
|
+ * been requeued since flush started.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * %true if flush_work() waited for the work to finish execution,
|
|
|
+ * %false if it was already idle.
|
|
|
+ */
|
|
|
+bool flush_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct wq_barrier barr;
|
|
|
+
|
|
|
+ if (start_flush_work(work, &barr, true)) {
|
|
|
+ wait_for_completion(&barr.done);
|
|
|
+ destroy_work_on_stack(&barr.work);
|
|
|
+ return true;
|
|
|
+ } else
|
|
|
+ return false;
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(flush_work);
|
|
|
|
|
|
+static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
|
|
|
+{
|
|
|
+ struct wq_barrier barr;
|
|
|
+ struct worker *worker;
|
|
|
+
|
|
|
+ spin_lock_irq(&gcwq->lock);
|
|
|
+
|
|
|
+ worker = find_worker_executing_work(gcwq, work);
|
|
|
+ if (unlikely(worker))
|
|
|
+ insert_wq_barrier(worker->current_cwq, &barr, work, worker);
|
|
|
+
|
|
|
+ spin_unlock_irq(&gcwq->lock);
|
|
|
+
|
|
|
+ if (unlikely(worker)) {
|
|
|
+ wait_for_completion(&barr.done);
|
|
|
+ destroy_work_on_stack(&barr.work);
|
|
|
+ return true;
|
|
|
+ } else
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static bool wait_on_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ bool ret = false;
|
|
|
+ int cpu;
|
|
|
+
|
|
|
+ might_sleep();
|
|
|
+
|
|
|
+ lock_map_acquire(&work->lockdep_map);
|
|
|
+ lock_map_release(&work->lockdep_map);
|
|
|
+
|
|
|
+ for_each_gcwq_cpu(cpu)
|
|
|
+ ret |= wait_on_cpu_work(get_gcwq(cpu), work);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * flush_work_sync - wait until a work has finished execution
|
|
|
+ * @work: the work to flush
|
|
|
+ *
|
|
|
+ * Wait until @work has finished execution. On return, it's
|
|
|
+ * guaranteed that all queueing instances of @work which happened
|
|
|
+ * before this function is called are finished. In other words, if
|
|
|
+ * @work hasn't been requeued since this function was called, @work is
|
|
|
+ * guaranteed to be idle on return.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * %true if flush_work_sync() waited for the work to finish execution,
|
|
|
+ * %false if it was already idle.
|
|
|
+ */
|
|
|
+bool flush_work_sync(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct wq_barrier barr;
|
|
|
+ bool pending, waited;
|
|
|
+
|
|
|
+ /* we'll wait for executions separately, queue barr only if pending */
|
|
|
+ pending = start_flush_work(work, &barr, false);
|
|
|
+
|
|
|
+ /* wait for executions to finish */
|
|
|
+ waited = wait_on_work(work);
|
|
|
+
|
|
|
+ /* wait for the pending one */
|
|
|
+ if (pending) {
|
|
|
+ wait_for_completion(&barr.done);
|
|
|
+ destroy_work_on_stack(&barr.work);
|
|
|
+ }
|
|
|
+
|
|
|
+ return pending || waited;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(flush_work_sync);
|
|
|
+
|
|
|
/*
|
|
|
* Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
|
|
|
* so this work can't be re-armed in any way.
|
|
@@ -2423,39 +2502,7 @@ static int try_to_grab_pending(struct work_struct *work)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
|
|
|
-{
|
|
|
- struct wq_barrier barr;
|
|
|
- struct worker *worker;
|
|
|
-
|
|
|
- spin_lock_irq(&gcwq->lock);
|
|
|
-
|
|
|
- worker = find_worker_executing_work(gcwq, work);
|
|
|
- if (unlikely(worker))
|
|
|
- insert_wq_barrier(worker->current_cwq, &barr, work, worker);
|
|
|
-
|
|
|
- spin_unlock_irq(&gcwq->lock);
|
|
|
-
|
|
|
- if (unlikely(worker)) {
|
|
|
- wait_for_completion(&barr.done);
|
|
|
- destroy_work_on_stack(&barr.work);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void wait_on_work(struct work_struct *work)
|
|
|
-{
|
|
|
- int cpu;
|
|
|
-
|
|
|
- might_sleep();
|
|
|
-
|
|
|
- lock_map_acquire(&work->lockdep_map);
|
|
|
- lock_map_release(&work->lockdep_map);
|
|
|
-
|
|
|
- for_each_gcwq_cpu(cpu)
|
|
|
- wait_on_cpu_work(get_gcwq(cpu), work);
|
|
|
-}
|
|
|
-
|
|
|
-static int __cancel_work_timer(struct work_struct *work,
|
|
|
+static bool __cancel_work_timer(struct work_struct *work,
|
|
|
struct timer_list* timer)
|
|
|
{
|
|
|
int ret;
|
|
@@ -2472,42 +2519,81 @@ static int __cancel_work_timer(struct work_struct *work,
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * cancel_work_sync - block until a work_struct's callback has terminated
|
|
|
- * @work: the work which is to be flushed
|
|
|
- *
|
|
|
- * Returns true if @work was pending.
|
|
|
+ * cancel_work_sync - cancel a work and wait for it to finish
|
|
|
+ * @work: the work to cancel
|
|
|
*
|
|
|
- * cancel_work_sync() will cancel the work if it is queued. If the work's
|
|
|
- * callback appears to be running, cancel_work_sync() will block until it
|
|
|
- * has completed.
|
|
|
- *
|
|
|
- * It is possible to use this function if the work re-queues itself. It can
|
|
|
- * cancel the work even if it migrates to another workqueue, however in that
|
|
|
- * case it only guarantees that work->func() has completed on the last queued
|
|
|
- * workqueue.
|
|
|
+ * Cancel @work and wait for its execution to finish. This function
|
|
|
+ * can be used even if the work re-queues itself or migrates to
|
|
|
+ * another workqueue. On return from this function, @work is
|
|
|
+ * guaranteed to be not pending or executing on any CPU.
|
|
|
*
|
|
|
- * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
|
|
|
- * pending, otherwise it goes into a busy-wait loop until the timer expires.
|
|
|
+ * cancel_work_sync(&delayed_work->work) must not be used for
|
|
|
+ * delayed_work's. Use cancel_delayed_work_sync() instead.
|
|
|
*
|
|
|
- * The caller must ensure that workqueue_struct on which this work was last
|
|
|
+ * The caller must ensure that the workqueue on which @work was last
|
|
|
* queued can't be destroyed before this function returns.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * %true if @work was pending, %false otherwise.
|
|
|
*/
|
|
|
-int cancel_work_sync(struct work_struct *work)
|
|
|
+bool cancel_work_sync(struct work_struct *work)
|
|
|
{
|
|
|
return __cancel_work_timer(work, NULL);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(cancel_work_sync);
|
|
|
|
|
|
/**
|
|
|
- * cancel_delayed_work_sync - reliably kill off a delayed work.
|
|
|
- * @dwork: the delayed work struct
|
|
|
+ * flush_delayed_work - wait for a dwork to finish executing the last queueing
|
|
|
+ * @dwork: the delayed work to flush
|
|
|
+ *
|
|
|
+ * Delayed timer is cancelled and the pending work is queued for
|
|
|
+ * immediate execution. Like flush_work(), this function only
|
|
|
+ * considers the last queueing instance of @dwork.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * %true if flush_work() waited for the work to finish execution,
|
|
|
+ * %false if it was already idle.
|
|
|
+ */
|
|
|
+bool flush_delayed_work(struct delayed_work *dwork)
|
|
|
+{
|
|
|
+ if (del_timer_sync(&dwork->timer))
|
|
|
+ __queue_work(raw_smp_processor_id(),
|
|
|
+ get_work_cwq(&dwork->work)->wq, &dwork->work);
|
|
|
+ return flush_work(&dwork->work);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(flush_delayed_work);
|
|
|
+
|
|
|
+/**
|
|
|
+ * flush_delayed_work_sync - wait for a dwork to finish
|
|
|
+ * @dwork: the delayed work to flush
|
|
|
*
|
|
|
- * Returns true if @dwork was pending.
|
|
|
+ * Delayed timer is cancelled and the pending work is queued for
|
|
|
+ * execution immediately. Other than timer handling, its behavior
|
|
|
+ * is identical to flush_work_sync().
|
|
|
*
|
|
|
- * It is possible to use this function if @dwork rearms itself via queue_work()
|
|
|
- * or queue_delayed_work(). See also the comment for cancel_work_sync().
|
|
|
+ * RETURNS:
|
|
|
+ * %true if flush_work_sync() waited for the work to finish execution,
|
|
|
+ * %false if it was already idle.
|
|
|
*/
|
|
|
-int cancel_delayed_work_sync(struct delayed_work *dwork)
|
|
|
+bool flush_delayed_work_sync(struct delayed_work *dwork)
|
|
|
+{
|
|
|
+ if (del_timer_sync(&dwork->timer))
|
|
|
+ __queue_work(raw_smp_processor_id(),
|
|
|
+ get_work_cwq(&dwork->work)->wq, &dwork->work);
|
|
|
+ return flush_work_sync(&dwork->work);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(flush_delayed_work_sync);
|
|
|
+
|
|
|
+/**
|
|
|
+ * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
|
|
|
+ * @dwork: the delayed work cancel
|
|
|
+ *
|
|
|
+ * This is cancel_work_sync() for delayed works.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * %true if @dwork was pending, %false otherwise.
|
|
|
+ */
|
|
|
+bool cancel_delayed_work_sync(struct delayed_work *dwork)
|
|
|
{
|
|
|
return __cancel_work_timer(&dwork->work, &dwork->timer);
|
|
|
}
|
|
@@ -2558,23 +2644,6 @@ int schedule_delayed_work(struct delayed_work *dwork,
|
|
|
}
|
|
|
EXPORT_SYMBOL(schedule_delayed_work);
|
|
|
|
|
|
-/**
|
|
|
- * flush_delayed_work - block until a dwork_struct's callback has terminated
|
|
|
- * @dwork: the delayed work which is to be flushed
|
|
|
- *
|
|
|
- * Any timeout is cancelled, and any pending work is run immediately.
|
|
|
- */
|
|
|
-void flush_delayed_work(struct delayed_work *dwork)
|
|
|
-{
|
|
|
- if (del_timer_sync(&dwork->timer)) {
|
|
|
- __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
|
|
|
- &dwork->work);
|
|
|
- put_cpu();
|
|
|
- }
|
|
|
- flush_work(&dwork->work);
|
|
|
-}
|
|
|
-EXPORT_SYMBOL(flush_delayed_work);
|
|
|
-
|
|
|
/**
|
|
|
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
|
|
|
* @cpu: cpu to use
|
|
@@ -2592,13 +2661,15 @@ int schedule_delayed_work_on(int cpu,
|
|
|
EXPORT_SYMBOL(schedule_delayed_work_on);
|
|
|
|
|
|
/**
|
|
|
- * schedule_on_each_cpu - call a function on each online CPU from keventd
|
|
|
+ * schedule_on_each_cpu - execute a function synchronously on each online CPU
|
|
|
* @func: the function to call
|
|
|
*
|
|
|
- * Returns zero on success.
|
|
|
- * Returns -ve errno on failure.
|
|
|
- *
|
|
|
+ * schedule_on_each_cpu() executes @func on each online CPU using the
|
|
|
+ * system workqueue and blocks until all CPUs have completed.
|
|
|
* schedule_on_each_cpu() is very slow.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * 0 on success, -errno on failure.
|
|
|
*/
|
|
|
int schedule_on_each_cpu(work_func_t func)
|
|
|
{
|
|
@@ -2763,6 +2834,13 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
|
|
|
struct workqueue_struct *wq;
|
|
|
unsigned int cpu;
|
|
|
|
|
|
+ /*
|
|
|
+ * Workqueues which may be used during memory reclaim should
|
|
|
+ * have a rescuer to guarantee forward progress.
|
|
|
+ */
|
|
|
+ if (flags & WQ_MEM_RECLAIM)
|
|
|
+ flags |= WQ_RESCUER;
|
|
|
+
|
|
|
/*
|
|
|
* Unbound workqueues aren't concurrency managed and should be
|
|
|
* dispatched to workers immediately.
|