|
@@ -3358,19 +3358,21 @@ EXPORT_SYMBOL_GPL(work_busy);
|
|
*/
|
|
*/
|
|
|
|
|
|
/* claim manager positions of all pools */
|
|
/* claim manager positions of all pools */
|
|
-static void gcwq_claim_management(struct global_cwq *gcwq)
|
|
|
|
|
|
+static void gcwq_claim_management_and_lock(struct global_cwq *gcwq)
|
|
{
|
|
{
|
|
struct worker_pool *pool;
|
|
struct worker_pool *pool;
|
|
|
|
|
|
for_each_worker_pool(pool, gcwq)
|
|
for_each_worker_pool(pool, gcwq)
|
|
mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
|
|
mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
|
|
|
|
+ spin_lock_irq(&gcwq->lock);
|
|
}
|
|
}
|
|
|
|
|
|
/* release manager positions */
|
|
/* release manager positions */
|
|
-static void gcwq_release_management(struct global_cwq *gcwq)
|
|
|
|
|
|
+static void gcwq_release_management_and_unlock(struct global_cwq *gcwq)
|
|
{
|
|
{
|
|
struct worker_pool *pool;
|
|
struct worker_pool *pool;
|
|
|
|
|
|
|
|
+ spin_unlock_irq(&gcwq->lock);
|
|
for_each_worker_pool(pool, gcwq)
|
|
for_each_worker_pool(pool, gcwq)
|
|
mutex_unlock(&pool->manager_mutex);
|
|
mutex_unlock(&pool->manager_mutex);
|
|
}
|
|
}
|
|
@@ -3385,8 +3387,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
|
|
|
|
|
|
BUG_ON(gcwq->cpu != smp_processor_id());
|
|
BUG_ON(gcwq->cpu != smp_processor_id());
|
|
|
|
|
|
- gcwq_claim_management(gcwq);
|
|
|
|
- spin_lock_irq(&gcwq->lock);
|
|
|
|
|
|
+ gcwq_claim_management_and_lock(gcwq);
|
|
|
|
|
|
/*
|
|
/*
|
|
* We've claimed all manager positions. Make all workers unbound
|
|
* We've claimed all manager positions. Make all workers unbound
|
|
@@ -3403,8 +3404,7 @@ static void gcwq_unbind_fn(struct work_struct *work)
|
|
|
|
|
|
gcwq->flags |= GCWQ_DISASSOCIATED;
|
|
gcwq->flags |= GCWQ_DISASSOCIATED;
|
|
|
|
|
|
- spin_unlock_irq(&gcwq->lock);
|
|
|
|
- gcwq_release_management(gcwq);
|
|
|
|
|
|
+ gcwq_release_management_and_unlock(gcwq);
|
|
|
|
|
|
/*
|
|
/*
|
|
* Call schedule() so that we cross rq->lock and thus can guarantee
|
|
* Call schedule() so that we cross rq->lock and thus can guarantee
|
|
@@ -3428,26 +3428,19 @@ static void gcwq_unbind_fn(struct work_struct *work)
|
|
atomic_set(get_pool_nr_running(pool), 0);
|
|
atomic_set(get_pool_nr_running(pool), 0);
|
|
}
|
|
}
|
|
|
|
|
|
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|
|
|
- unsigned long action,
|
|
|
|
- void *hcpu)
|
|
|
|
|
|
+/*
|
|
|
|
+ * Workqueues should be brought up before normal priority CPU notifiers.
|
|
|
|
+ * This will be registered high priority CPU notifier.
|
|
|
|
+ */
|
|
|
|
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
|
|
|
|
+ unsigned long action,
|
|
|
|
+ void *hcpu)
|
|
{
|
|
{
|
|
unsigned int cpu = (unsigned long)hcpu;
|
|
unsigned int cpu = (unsigned long)hcpu;
|
|
struct global_cwq *gcwq = get_gcwq(cpu);
|
|
struct global_cwq *gcwq = get_gcwq(cpu);
|
|
struct worker_pool *pool;
|
|
struct worker_pool *pool;
|
|
- struct work_struct unbind_work;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- action &= ~CPU_TASKS_FROZEN;
|
|
|
|
-
|
|
|
|
- switch (action) {
|
|
|
|
- case CPU_DOWN_PREPARE:
|
|
|
|
- /* unbinding should happen on the local CPU */
|
|
|
|
- INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
|
|
|
|
- schedule_work_on(cpu, &unbind_work);
|
|
|
|
- flush_work(&unbind_work);
|
|
|
|
- break;
|
|
|
|
|
|
|
|
|
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
|
case CPU_UP_PREPARE:
|
|
case CPU_UP_PREPARE:
|
|
for_each_worker_pool(pool, gcwq) {
|
|
for_each_worker_pool(pool, gcwq) {
|
|
struct worker *worker;
|
|
struct worker *worker;
|
|
@@ -3463,45 +3456,16 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
|
|
start_worker(worker);
|
|
start_worker(worker);
|
|
spin_unlock_irq(&gcwq->lock);
|
|
spin_unlock_irq(&gcwq->lock);
|
|
}
|
|
}
|
|
- }
|
|
|
|
-
|
|
|
|
- /* some are called w/ irq disabled, don't disturb irq status */
|
|
|
|
- spin_lock_irqsave(&gcwq->lock, flags);
|
|
|
|
|
|
+ break;
|
|
|
|
|
|
- switch (action) {
|
|
|
|
case CPU_DOWN_FAILED:
|
|
case CPU_DOWN_FAILED:
|
|
case CPU_ONLINE:
|
|
case CPU_ONLINE:
|
|
- spin_unlock_irq(&gcwq->lock);
|
|
|
|
- gcwq_claim_management(gcwq);
|
|
|
|
- spin_lock_irq(&gcwq->lock);
|
|
|
|
-
|
|
|
|
|
|
+ gcwq_claim_management_and_lock(gcwq);
|
|
gcwq->flags &= ~GCWQ_DISASSOCIATED;
|
|
gcwq->flags &= ~GCWQ_DISASSOCIATED;
|
|
-
|
|
|
|
rebind_workers(gcwq);
|
|
rebind_workers(gcwq);
|
|
-
|
|
|
|
- gcwq_release_management(gcwq);
|
|
|
|
|
|
+ gcwq_release_management_and_unlock(gcwq);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
-
|
|
|
|
- spin_unlock_irqrestore(&gcwq->lock, flags);
|
|
|
|
-
|
|
|
|
- return notifier_from_errno(0);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Workqueues should be brought up before normal priority CPU notifiers.
|
|
|
|
- * This will be registered high priority CPU notifier.
|
|
|
|
- */
|
|
|
|
-static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
|
|
|
|
- unsigned long action,
|
|
|
|
- void *hcpu)
|
|
|
|
-{
|
|
|
|
- switch (action & ~CPU_TASKS_FROZEN) {
|
|
|
|
- case CPU_UP_PREPARE:
|
|
|
|
- case CPU_DOWN_FAILED:
|
|
|
|
- case CPU_ONLINE:
|
|
|
|
- return workqueue_cpu_callback(nfb, action, hcpu);
|
|
|
|
- }
|
|
|
|
return NOTIFY_OK;
|
|
return NOTIFY_OK;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3513,9 +3477,16 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
|
|
unsigned long action,
|
|
unsigned long action,
|
|
void *hcpu)
|
|
void *hcpu)
|
|
{
|
|
{
|
|
|
|
+ unsigned int cpu = (unsigned long)hcpu;
|
|
|
|
+ struct work_struct unbind_work;
|
|
|
|
+
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
switch (action & ~CPU_TASKS_FROZEN) {
|
|
case CPU_DOWN_PREPARE:
|
|
case CPU_DOWN_PREPARE:
|
|
- return workqueue_cpu_callback(nfb, action, hcpu);
|
|
|
|
|
|
+ /* unbinding should happen on the local CPU */
|
|
|
|
+ INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
|
|
|
|
+ schedule_work_on(cpu, &unbind_work);
|
|
|
|
+ flush_work(&unbind_work);
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
return NOTIFY_OK;
|
|
return NOTIFY_OK;
|
|
}
|
|
}
|