|
@@ -98,79 +98,6 @@ static inline void *get_wq_data(struct work_struct *work)
|
|
return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
|
|
return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
|
|
}
|
|
}
|
|
|
|
|
|
-static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
|
|
|
|
-{
|
|
|
|
- int ret = 0;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&cwq->lock, flags);
|
|
|
|
- /*
|
|
|
|
- * We need to re-validate the work info after we've gotten
|
|
|
|
- * the cpu_workqueue lock. We can run the work now iff:
|
|
|
|
- *
|
|
|
|
- * - the wq_data still matches the cpu_workqueue_struct
|
|
|
|
- * - AND the work is still marked pending
|
|
|
|
- * - AND the work is still on a list (which will be this
|
|
|
|
- * workqueue_struct list)
|
|
|
|
- *
|
|
|
|
- * All these conditions are important, because we
|
|
|
|
- * need to protect against the work being run right
|
|
|
|
- * now on another CPU (all but the last one might be
|
|
|
|
- * true if it's currently running and has not been
|
|
|
|
- * released yet, for example).
|
|
|
|
- */
|
|
|
|
- if (get_wq_data(work) == cwq
|
|
|
|
- && work_pending(work)
|
|
|
|
- && !list_empty(&work->entry)) {
|
|
|
|
- work_func_t f = work->func;
|
|
|
|
- cwq->current_work = work;
|
|
|
|
- list_del_init(&work->entry);
|
|
|
|
- spin_unlock_irqrestore(&cwq->lock, flags);
|
|
|
|
-
|
|
|
|
- if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work)))
|
|
|
|
- work_release(work);
|
|
|
|
- f(work);
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&cwq->lock, flags);
|
|
|
|
- cwq->current_work = NULL;
|
|
|
|
- ret = 1;
|
|
|
|
- }
|
|
|
|
- spin_unlock_irqrestore(&cwq->lock, flags);
|
|
|
|
- return ret;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/**
|
|
|
|
- * run_scheduled_work - run scheduled work synchronously
|
|
|
|
- * @work: work to run
|
|
|
|
- *
|
|
|
|
- * This checks if the work was pending, and runs it
|
|
|
|
- * synchronously if so. It returns a boolean to indicate
|
|
|
|
- * whether it had any scheduled work to run or not.
|
|
|
|
- *
|
|
|
|
- * NOTE! This _only_ works for normal work_structs. You
|
|
|
|
- * CANNOT use this for delayed work, because the wq data
|
|
|
|
- * for delayed work will not point properly to the per-
|
|
|
|
- * CPU workqueue struct, but will change!
|
|
|
|
- */
|
|
|
|
-int fastcall run_scheduled_work(struct work_struct *work)
|
|
|
|
-{
|
|
|
|
- for (;;) {
|
|
|
|
- struct cpu_workqueue_struct *cwq;
|
|
|
|
-
|
|
|
|
- if (!work_pending(work))
|
|
|
|
- return 0;
|
|
|
|
- if (list_empty(&work->entry))
|
|
|
|
- return 0;
|
|
|
|
- /* NOTE! This depends intimately on __queue_work! */
|
|
|
|
- cwq = get_wq_data(work);
|
|
|
|
- if (!cwq)
|
|
|
|
- return 0;
|
|
|
|
- if (__run_work(cwq, work))
|
|
|
|
- return 1;
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-EXPORT_SYMBOL(run_scheduled_work);
|
|
|
|
-
|
|
|
|
static void insert_work(struct cpu_workqueue_struct *cwq,
|
|
static void insert_work(struct cpu_workqueue_struct *cwq,
|
|
struct work_struct *work, int tail)
|
|
struct work_struct *work, int tail)
|
|
{
|
|
{
|