|
@@ -416,7 +416,7 @@ void flush_workqueue(struct workqueue_struct *wq)
|
|
might_sleep();
|
|
might_sleep();
|
|
lock_map_acquire(&wq->lockdep_map);
|
|
lock_map_acquire(&wq->lockdep_map);
|
|
lock_map_release(&wq->lockdep_map);
|
|
lock_map_release(&wq->lockdep_map);
|
|
- for_each_cpu_mask_nr(cpu, *cpu_map)
|
|
|
|
|
|
+ for_each_cpu(cpu, cpu_map)
|
|
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
|
|
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(flush_workqueue);
|
|
EXPORT_SYMBOL_GPL(flush_workqueue);
|
|
@@ -547,7 +547,7 @@ static void wait_on_work(struct work_struct *work)
|
|
wq = cwq->wq;
|
|
wq = cwq->wq;
|
|
cpu_map = wq_cpu_map(wq);
|
|
cpu_map = wq_cpu_map(wq);
|
|
|
|
|
|
- for_each_cpu_mask_nr(cpu, *cpu_map)
|
|
|
|
|
|
+ for_each_cpu(cpu, cpu_map)
|
|
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
|
wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -911,7 +911,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|
list_del(&wq->list);
|
|
list_del(&wq->list);
|
|
spin_unlock(&workqueue_lock);
|
|
spin_unlock(&workqueue_lock);
|
|
|
|
|
|
- for_each_cpu_mask_nr(cpu, *cpu_map)
|
|
|
|
|
|
+ for_each_cpu(cpu, cpu_map)
|
|
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
|
|
cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
|
|
cpu_maps_update_done();
|
|
cpu_maps_update_done();
|
|
|
|
|