|
@@ -521,14 +521,21 @@ static inline void debug_work_activate(struct work_struct *work) { }
|
|
|
static inline void debug_work_deactivate(struct work_struct *work) { }
|
|
|
#endif
|
|
|
|
|
|
-/* allocate ID and assign it to @pool */
|
|
|
+/**
|
|
|
+ * worker_pool_assign_id - allocate ID and assing it to @pool
|
|
|
+ * @pool: the pool pointer of interest
|
|
|
+ *
|
|
|
+ * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
|
|
|
+ * successfully, -errno on failure.
|
|
|
+ */
|
|
|
static int worker_pool_assign_id(struct worker_pool *pool)
|
|
|
{
|
|
|
int ret;
|
|
|
|
|
|
lockdep_assert_held(&wq_pool_mutex);
|
|
|
|
|
|
- ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
|
|
|
+ ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
|
|
|
+ GFP_KERNEL);
|
|
|
if (ret >= 0) {
|
|
|
pool->id = ret;
|
|
|
return 0;
|
|
@@ -5020,10 +5027,6 @@ static int __init init_workqueues(void)
|
|
|
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
|
|
|
int i, cpu;
|
|
|
|
|
|
- /* make sure we have enough bits for OFFQ pool ID */
|
|
|
- BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) <
|
|
|
- WORK_CPU_END * NR_STD_WORKER_POOLS);
|
|
|
-
|
|
|
WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
|
|
|
|
|
|
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
|