|
@@ -226,7 +226,7 @@ struct wq_device;
|
|
|
* the appropriate worker_pool through its pool_workqueues.
|
|
|
*/
|
|
|
struct workqueue_struct {
|
|
|
- unsigned int flags; /* PL: WQ_* flags */
|
|
|
+ unsigned int flags; /* WQ: WQ_* flags */
|
|
|
struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwq's */
|
|
|
struct list_head pwqs; /* WR: all pwqs of this wq */
|
|
|
struct list_head list; /* PL: list of all workqueues */
|
|
@@ -242,7 +242,7 @@ struct workqueue_struct {
|
|
|
struct list_head maydays; /* MD: pwqs requesting rescue */
|
|
|
struct worker *rescuer; /* I: rescue worker */
|
|
|
|
|
|
- int nr_drainers; /* PL: drain in progress */
|
|
|
+ int nr_drainers; /* WQ: drain in progress */
|
|
|
int saved_max_active; /* PW: saved pwq max_active */
|
|
|
|
|
|
#ifdef CONFIG_SYSFS
|
|
@@ -2684,10 +2684,10 @@ void drain_workqueue(struct workqueue_struct *wq)
|
|
|
* hotter than drain_workqueue() and already looks at @wq->flags.
|
|
|
* Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
|
|
|
*/
|
|
|
- mutex_lock(&wq_pool_mutex);
|
|
|
+ mutex_lock(&wq->mutex);
|
|
|
if (!wq->nr_drainers++)
|
|
|
wq->flags |= __WQ_DRAINING;
|
|
|
- mutex_unlock(&wq_pool_mutex);
|
|
|
+ mutex_unlock(&wq->mutex);
|
|
|
reflush:
|
|
|
flush_workqueue(wq);
|
|
|
|
|
@@ -2714,10 +2714,10 @@ reflush:
|
|
|
|
|
|
local_irq_enable();
|
|
|
|
|
|
- mutex_lock(&wq_pool_mutex);
|
|
|
+ mutex_lock(&wq->mutex);
|
|
|
if (!--wq->nr_drainers)
|
|
|
wq->flags &= ~__WQ_DRAINING;
|
|
|
- mutex_unlock(&wq_pool_mutex);
|
|
|
+ mutex_unlock(&wq->mutex);
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(drain_workqueue);
|
|
|
|