|
@@ -451,7 +451,6 @@ static DEFINE_MUTEX(worker_pool_idr_mutex);
|
|
|
static DEFINE_IDR(worker_pool_idr);
|
|
|
|
|
|
static int worker_thread(void *__worker);
|
|
|
-static unsigned int work_cpu(struct work_struct *work);
|
|
|
|
|
|
static int std_worker_pool_pri(struct worker_pool *pool)
|
|
|
{
|
|
@@ -479,6 +478,15 @@ static int worker_pool_assign_id(struct worker_pool *pool)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Lookup worker_pool by id. The idr currently is built during boot and
|
|
|
+ * never modified. Don't worry about locking for now.
|
|
|
+ */
|
|
|
+static struct worker_pool *worker_pool_by_id(int pool_id)
|
|
|
+{
|
|
|
+ return idr_find(&worker_pool_idr, pool_id);
|
|
|
+}
|
|
|
+
|
|
|
static atomic_t *get_pool_nr_running(struct worker_pool *pool)
|
|
|
{
|
|
|
int cpu = pool->gcwq->cpu;
|
|
@@ -520,17 +528,17 @@ static int work_next_color(int color)
|
|
|
/*
|
|
|
* While queued, %WORK_STRUCT_CWQ is set and non flag bits of a work's data
|
|
|
* contain the pointer to the queued cwq. Once execution starts, the flag
|
|
|
- * is cleared and the high bits contain OFFQ flags and CPU number.
|
|
|
+ * is cleared and the high bits contain OFFQ flags and pool ID.
|
|
|
*
|
|
|
- * set_work_cwq(), set_work_cpu_and_clear_pending(), mark_work_canceling()
|
|
|
- * and clear_work_data() can be used to set the cwq, cpu or clear
|
|
|
+ * set_work_cwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
|
|
|
+ * and clear_work_data() can be used to set the cwq, pool or clear
|
|
|
* work->data. These functions should only be called while the work is
|
|
|
* owned - ie. while the PENDING bit is set.
|
|
|
*
|
|
|
- * get_work_[g]cwq() can be used to obtain the gcwq or cwq corresponding to
|
|
|
- * a work. gcwq is available once the work has been queued anywhere after
|
|
|
- * initialization until it is sync canceled. cwq is available only while
|
|
|
- * the work item is queued.
|
|
|
+ * get_work_pool() and get_work_cwq() can be used to obtain the pool or cwq
|
|
|
+ * corresponding to a work. Pool is available once the work has been
|
|
|
+ * queued anywhere after initialization until it is sync canceled. cwq is
|
|
|
+ * available only while the work item is queued.
|
|
|
*
|
|
|
* %WORK_OFFQ_CANCELING is used to mark a work item which is being
|
|
|
* canceled. While being canceled, a work item may have its PENDING set
|
|
@@ -552,8 +560,8 @@ static void set_work_cwq(struct work_struct *work,
|
|
|
WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
|
|
|
}
|
|
|
|
|
|
-static void set_work_cpu_and_clear_pending(struct work_struct *work,
|
|
|
- unsigned int cpu)
|
|
|
+static void set_work_pool_and_clear_pending(struct work_struct *work,
|
|
|
+ int pool_id)
|
|
|
{
|
|
|
/*
|
|
|
* The following wmb is paired with the implied mb in
|
|
@@ -562,13 +570,13 @@ static void set_work_cpu_and_clear_pending(struct work_struct *work,
|
|
|
* owner.
|
|
|
*/
|
|
|
smp_wmb();
|
|
|
- set_work_data(work, (unsigned long)cpu << WORK_OFFQ_CPU_SHIFT, 0);
|
|
|
+ set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
|
|
|
}
|
|
|
|
|
|
static void clear_work_data(struct work_struct *work)
|
|
|
{
|
|
|
- smp_wmb(); /* see set_work_cpu_and_clear_pending() */
|
|
|
- set_work_data(work, WORK_STRUCT_NO_CPU, 0);
|
|
|
+ smp_wmb(); /* see set_work_pool_and_clear_pending() */
|
|
|
+ set_work_data(work, WORK_STRUCT_NO_POOL, 0);
|
|
|
}
|
|
|
|
|
|
static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
|
|
@@ -581,30 +589,58 @@ static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
-static struct global_cwq *get_work_gcwq(struct work_struct *work)
|
|
|
+/**
|
|
|
+ * get_work_pool - return the worker_pool a given work was associated with
|
|
|
+ * @work: the work item of interest
|
|
|
+ *
|
|
|
+ * Return the worker_pool @work was last associated with. %NULL if none.
|
|
|
+ */
|
|
|
+static struct worker_pool *get_work_pool(struct work_struct *work)
|
|
|
{
|
|
|
unsigned long data = atomic_long_read(&work->data);
|
|
|
- unsigned int cpu;
|
|
|
+ struct worker_pool *pool;
|
|
|
+ int pool_id;
|
|
|
|
|
|
if (data & WORK_STRUCT_CWQ)
|
|
|
return ((struct cpu_workqueue_struct *)
|
|
|
- (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
|
|
|
+ (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
|
|
|
|
|
|
- cpu = data >> WORK_OFFQ_CPU_SHIFT;
|
|
|
- if (cpu == WORK_OFFQ_CPU_NONE)
|
|
|
+ pool_id = data >> WORK_OFFQ_POOL_SHIFT;
|
|
|
+ if (pool_id == WORK_OFFQ_POOL_NONE)
|
|
|
return NULL;
|
|
|
|
|
|
- BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
|
|
|
- return get_gcwq(cpu);
|
|
|
+ pool = worker_pool_by_id(pool_id);
|
|
|
+ WARN_ON_ONCE(!pool);
|
|
|
+ return pool;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * get_work_pool_id - return the worker pool ID a given work is associated with
|
|
|
+ * @work: the work item of interest
|
|
|
+ *
|
|
|
+ * Return the worker_pool ID @work was last associated with.
|
|
|
+ * %WORK_OFFQ_POOL_NONE if none.
|
|
|
+ */
|
|
|
+static int get_work_pool_id(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct worker_pool *pool = get_work_pool(work);
|
|
|
+
|
|
|
+ return pool ? pool->id : WORK_OFFQ_POOL_NONE;
|
|
|
+}
|
|
|
+
|
|
|
+static struct global_cwq *get_work_gcwq(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct worker_pool *pool = get_work_pool(work);
|
|
|
+
|
|
|
+ return pool ? pool->gcwq : NULL;
|
|
|
}
|
|
|
|
|
|
static void mark_work_canceling(struct work_struct *work)
|
|
|
{
|
|
|
- struct global_cwq *gcwq = get_work_gcwq(work);
|
|
|
- unsigned long cpu = gcwq ? gcwq->cpu : WORK_OFFQ_CPU_NONE;
|
|
|
+ unsigned long pool_id = get_work_pool_id(work);
|
|
|
|
|
|
- set_work_data(work, (cpu << WORK_OFFQ_CPU_SHIFT) | WORK_OFFQ_CANCELING,
|
|
|
- WORK_STRUCT_PENDING);
|
|
|
+ pool_id <<= WORK_OFFQ_POOL_SHIFT;
|
|
|
+ set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
|
|
|
}
|
|
|
|
|
|
static bool work_is_canceling(struct work_struct *work)
|
|
@@ -2192,12 +2228,12 @@ __acquires(&gcwq->lock)
|
|
|
wake_up_worker(pool);
|
|
|
|
|
|
/*
|
|
|
- * Record the last CPU and clear PENDING which should be the last
|
|
|
+ * Record the last pool and clear PENDING which should be the last
|
|
|
* update to @work. Also, do this inside @gcwq->lock so that
|
|
|
* PENDING and queued state changes happen together while IRQ is
|
|
|
* disabled.
|
|
|
*/
|
|
|
- set_work_cpu_and_clear_pending(work, gcwq->cpu);
|
|
|
+ set_work_pool_and_clear_pending(work, pool->id);
|
|
|
|
|
|
spin_unlock_irq(&gcwq->lock);
|
|
|
|
|
@@ -2967,7 +3003,8 @@ bool cancel_delayed_work(struct delayed_work *dwork)
|
|
|
if (unlikely(ret < 0))
|
|
|
return false;
|
|
|
|
|
|
- set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
|
|
|
+ set_work_pool_and_clear_pending(&dwork->work,
|
|
|
+ get_work_pool_id(&dwork->work));
|
|
|
local_irq_restore(flags);
|
|
|
return ret;
|
|
|
}
|
|
@@ -3430,20 +3467,6 @@ bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(workqueue_congested);
|
|
|
|
|
|
-/**
|
|
|
- * work_cpu - return the last known associated cpu for @work
|
|
|
- * @work: the work of interest
|
|
|
- *
|
|
|
- * RETURNS:
|
|
|
- * CPU number if @work was ever queued. WORK_CPU_NONE otherwise.
|
|
|
- */
|
|
|
-static unsigned int work_cpu(struct work_struct *work)
|
|
|
-{
|
|
|
- struct global_cwq *gcwq = get_work_gcwq(work);
|
|
|
-
|
|
|
- return gcwq ? gcwq->cpu : WORK_CPU_NONE;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* work_busy - test whether a work is currently pending or running
|
|
|
* @work: the work to be tested
|
|
@@ -3816,9 +3839,9 @@ static int __init init_workqueues(void)
|
|
|
{
|
|
|
unsigned int cpu;
|
|
|
|
|
|
- /* make sure we have enough bits for OFFQ CPU number */
|
|
|
- BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_CPU_SHIFT)) <
|
|
|
- WORK_CPU_LAST);
|
|
|
+ /* make sure we have enough bits for OFFQ pool ID */
|
|
|
+ BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
|
|
|
+ WORK_CPU_LAST * NR_STD_WORKER_POOLS);
|
|
|
|
|
|
cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
|
|
|
hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
|