|
@@ -3962,7 +3962,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
|
|
|
struct pool_workqueue *pwq;
|
|
|
bool ret;
|
|
|
|
|
|
- preempt_disable();
|
|
|
+ rcu_read_lock_sched();
|
|
|
|
|
|
if (!(wq->flags & WQ_UNBOUND))
|
|
|
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
|
|
@@ -3970,7 +3970,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
|
|
|
pwq = first_pwq(wq);
|
|
|
|
|
|
ret = !list_empty(&pwq->delayed_works);
|
|
|
- preempt_enable();
|
|
|
+ rcu_read_unlock_sched();
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -4354,16 +4354,16 @@ bool freeze_workqueues_busy(void)
|
|
|
* nr_active is monotonically decreasing. It's safe
|
|
|
* to peek without lock.
|
|
|
*/
|
|
|
- preempt_disable();
|
|
|
+ rcu_read_lock_sched();
|
|
|
for_each_pwq(pwq, wq) {
|
|
|
WARN_ON_ONCE(pwq->nr_active < 0);
|
|
|
if (pwq->nr_active) {
|
|
|
busy = true;
|
|
|
- preempt_enable();
|
|
|
+ rcu_read_unlock_sched();
|
|
|
goto out_unlock;
|
|
|
}
|
|
|
}
|
|
|
- preempt_enable();
|
|
|
+ rcu_read_unlock_sched();
|
|
|
}
|
|
|
out_unlock:
|
|
|
mutex_unlock(&wq_mutex);
|