|
@@ -78,7 +78,7 @@ struct cpu_workqueue_struct {
|
|
int nr_in_flight[WORK_NR_COLORS];
|
|
int nr_in_flight[WORK_NR_COLORS];
|
|
/* L: nr of in_flight works */
|
|
/* L: nr of in_flight works */
|
|
int nr_active; /* L: nr of active works */
|
|
int nr_active; /* L: nr of active works */
|
|
- int max_active; /* I: max active works */
|
|
|
|
|
|
+ int max_active; /* L: max active works */
|
|
struct list_head delayed_works; /* L: delayed works */
|
|
struct list_head delayed_works; /* L: delayed works */
|
|
};
|
|
};
|
|
|
|
|
|
@@ -108,6 +108,7 @@ struct workqueue_struct {
|
|
struct list_head flusher_queue; /* F: flush waiters */
|
|
struct list_head flusher_queue; /* F: flush waiters */
|
|
struct list_head flusher_overflow; /* F: flush overflow list */
|
|
struct list_head flusher_overflow; /* F: flush overflow list */
|
|
|
|
|
|
|
|
+ int saved_max_active; /* I: saved cwq max_active */
|
|
const char *name; /* I: workqueue name */
|
|
const char *name; /* I: workqueue name */
|
|
#ifdef CONFIG_LOCKDEP
|
|
#ifdef CONFIG_LOCKDEP
|
|
struct lockdep_map lockdep_map;
|
|
struct lockdep_map lockdep_map;
|
|
@@ -228,6 +229,7 @@ static inline void debug_work_deactivate(struct work_struct *work) { }
|
|
static DEFINE_SPINLOCK(workqueue_lock);
|
|
static DEFINE_SPINLOCK(workqueue_lock);
|
|
static LIST_HEAD(workqueues);
|
|
static LIST_HEAD(workqueues);
|
|
static DEFINE_PER_CPU(struct ida, worker_ida);
|
|
static DEFINE_PER_CPU(struct ida, worker_ida);
|
|
|
|
+static bool workqueue_freezing; /* W: have wqs started freezing? */
|
|
|
|
|
|
static int worker_thread(void *__worker);
|
|
static int worker_thread(void *__worker);
|
|
|
|
|
|
@@ -745,19 +747,13 @@ static int worker_thread(void *__worker)
|
|
struct cpu_workqueue_struct *cwq = worker->cwq;
|
|
struct cpu_workqueue_struct *cwq = worker->cwq;
|
|
DEFINE_WAIT(wait);
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
- if (cwq->wq->flags & WQ_FREEZEABLE)
|
|
|
|
- set_freezable();
|
|
|
|
-
|
|
|
|
for (;;) {
|
|
for (;;) {
|
|
prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
|
|
prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
|
|
- if (!freezing(current) &&
|
|
|
|
- !kthread_should_stop() &&
|
|
|
|
|
|
+ if (!kthread_should_stop() &&
|
|
list_empty(&cwq->worklist))
|
|
list_empty(&cwq->worklist))
|
|
schedule();
|
|
schedule();
|
|
finish_wait(&cwq->more_work, &wait);
|
|
finish_wait(&cwq->more_work, &wait);
|
|
|
|
|
|
- try_to_freeze();
|
|
|
|
-
|
|
|
|
if (kthread_should_stop())
|
|
if (kthread_should_stop())
|
|
break;
|
|
break;
|
|
|
|
|
|
@@ -1553,6 +1549,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
|
|
goto err;
|
|
goto err;
|
|
|
|
|
|
wq->flags = flags;
|
|
wq->flags = flags;
|
|
|
|
+ wq->saved_max_active = max_active;
|
|
mutex_init(&wq->flush_mutex);
|
|
mutex_init(&wq->flush_mutex);
|
|
atomic_set(&wq->nr_cwqs_to_flush, 0);
|
|
atomic_set(&wq->nr_cwqs_to_flush, 0);
|
|
INIT_LIST_HEAD(&wq->flusher_queue);
|
|
INIT_LIST_HEAD(&wq->flusher_queue);
|
|
@@ -1591,8 +1588,19 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
|
|
failed = true;
|
|
failed = true;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * workqueue_lock protects global freeze state and workqueues
|
|
|
|
+ * list. Grab it, set max_active accordingly and add the new
|
|
|
|
+ * workqueue to workqueues list.
|
|
|
|
+ */
|
|
spin_lock(&workqueue_lock);
|
|
spin_lock(&workqueue_lock);
|
|
|
|
+
|
|
|
|
+ if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
|
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
|
+ get_cwq(cpu, wq)->max_active = 0;
|
|
|
|
+
|
|
list_add(&wq->list, &workqueues);
|
|
list_add(&wq->list, &workqueues);
|
|
|
|
+
|
|
spin_unlock(&workqueue_lock);
|
|
spin_unlock(&workqueue_lock);
|
|
|
|
|
|
cpu_maps_update_done();
|
|
cpu_maps_update_done();
|
|
@@ -1621,14 +1629,18 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|
{
|
|
{
|
|
int cpu;
|
|
int cpu;
|
|
|
|
|
|
|
|
+ flush_workqueue(wq);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * wq list is used to freeze wq, remove from list after
|
|
|
|
+ * flushing is complete in case freeze races us.
|
|
|
|
+ */
|
|
cpu_maps_update_begin();
|
|
cpu_maps_update_begin();
|
|
spin_lock(&workqueue_lock);
|
|
spin_lock(&workqueue_lock);
|
|
list_del(&wq->list);
|
|
list_del(&wq->list);
|
|
spin_unlock(&workqueue_lock);
|
|
spin_unlock(&workqueue_lock);
|
|
cpu_maps_update_done();
|
|
cpu_maps_update_done();
|
|
|
|
|
|
- flush_workqueue(wq);
|
|
|
|
-
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
for_each_possible_cpu(cpu) {
|
|
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
|
|
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
|
|
int i;
|
|
int i;
|
|
@@ -1722,6 +1734,137 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
|
|
EXPORT_SYMBOL_GPL(work_on_cpu);
|
|
EXPORT_SYMBOL_GPL(work_on_cpu);
|
|
#endif /* CONFIG_SMP */
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
|
|
+#ifdef CONFIG_FREEZER
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * freeze_workqueues_begin - begin freezing workqueues
|
|
|
|
+ *
|
|
|
|
+ * Start freezing workqueues. After this function returns, all
|
|
|
|
+ * freezeable workqueues will queue new works to their frozen_works
|
|
|
|
+ * list instead of the cwq ones.
|
|
|
|
+ *
|
|
|
|
+ * CONTEXT:
|
|
|
|
+ * Grabs and releases workqueue_lock and cwq->lock's.
|
|
|
|
+ */
|
|
|
|
+void freeze_workqueues_begin(void)
|
|
|
|
+{
|
|
|
|
+ struct workqueue_struct *wq;
|
|
|
|
+ unsigned int cpu;
|
|
|
|
+
|
|
|
|
+ spin_lock(&workqueue_lock);
|
|
|
|
+
|
|
|
|
+ BUG_ON(workqueue_freezing);
|
|
|
|
+ workqueue_freezing = true;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
|
+ list_for_each_entry(wq, &workqueues, list) {
|
|
|
|
+ struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
|
|
|
|
+
|
|
|
|
+ spin_lock_irq(&cwq->lock);
|
|
|
|
+
|
|
|
|
+ if (wq->flags & WQ_FREEZEABLE)
|
|
|
|
+ cwq->max_active = 0;
|
|
|
|
+
|
|
|
|
+ spin_unlock_irq(&cwq->lock);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_unlock(&workqueue_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * freeze_workqueues_busy - are freezeable workqueues still busy?
|
|
|
|
+ *
|
|
|
|
+ * Check whether freezing is complete. This function must be called
|
|
|
|
+ * between freeze_workqueues_begin() and thaw_workqueues().
|
|
|
|
+ *
|
|
|
|
+ * CONTEXT:
|
|
|
|
+ * Grabs and releases workqueue_lock.
|
|
|
|
+ *
|
|
|
|
+ * RETURNS:
|
|
|
|
+ * %true if some freezeable workqueues are still busy. %false if
|
|
|
|
+ * freezing is complete.
|
|
|
|
+ */
|
|
|
|
+bool freeze_workqueues_busy(void)
|
|
|
|
+{
|
|
|
|
+ struct workqueue_struct *wq;
|
|
|
|
+ unsigned int cpu;
|
|
|
|
+ bool busy = false;
|
|
|
|
+
|
|
|
|
+ spin_lock(&workqueue_lock);
|
|
|
|
+
|
|
|
|
+ BUG_ON(!workqueue_freezing);
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
|
+ /*
|
|
|
|
+ * nr_active is monotonically decreasing. It's safe
|
|
|
|
+ * to peek without lock.
|
|
|
|
+ */
|
|
|
|
+ list_for_each_entry(wq, &workqueues, list) {
|
|
|
|
+ struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
|
|
|
|
+
|
|
|
|
+ if (!(wq->flags & WQ_FREEZEABLE))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ BUG_ON(cwq->nr_active < 0);
|
|
|
|
+ if (cwq->nr_active) {
|
|
|
|
+ busy = true;
|
|
|
|
+ goto out_unlock;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+out_unlock:
|
|
|
|
+ spin_unlock(&workqueue_lock);
|
|
|
|
+ return busy;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * thaw_workqueues - thaw workqueues
|
|
|
|
+ *
|
|
|
|
+ * Thaw workqueues. Normal queueing is restored and all collected
|
|
|
|
+ * frozen works are transferred to their respective cwq worklists.
|
|
|
|
+ *
|
|
|
|
+ * CONTEXT:
|
|
|
|
+ * Grabs and releases workqueue_lock and cwq->lock's.
|
|
|
|
+ */
|
|
|
|
+void thaw_workqueues(void)
|
|
|
|
+{
|
|
|
|
+ struct workqueue_struct *wq;
|
|
|
|
+ unsigned int cpu;
|
|
|
|
+
|
|
|
|
+ spin_lock(&workqueue_lock);
|
|
|
|
+
|
|
|
|
+ if (!workqueue_freezing)
|
|
|
|
+ goto out_unlock;
|
|
|
|
+
|
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
|
+ list_for_each_entry(wq, &workqueues, list) {
|
|
|
|
+ struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
|
|
|
|
+
|
|
|
|
+ if (!(wq->flags & WQ_FREEZEABLE))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ spin_lock_irq(&cwq->lock);
|
|
|
|
+
|
|
|
|
+ /* restore max_active and repopulate worklist */
|
|
|
|
+ cwq->max_active = wq->saved_max_active;
|
|
|
|
+
|
|
|
|
+ while (!list_empty(&cwq->delayed_works) &&
|
|
|
|
+ cwq->nr_active < cwq->max_active)
|
|
|
|
+ cwq_activate_first_delayed(cwq);
|
|
|
|
+
|
|
|
|
+ wake_up(&cwq->more_work);
|
|
|
|
+
|
|
|
|
+ spin_unlock_irq(&cwq->lock);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ workqueue_freezing = false;
|
|
|
|
+out_unlock:
|
|
|
|
+ spin_unlock(&workqueue_lock);
|
|
|
|
+}
|
|
|
|
+#endif /* CONFIG_FREEZER */
|
|
|
|
+
|
|
void __init init_workqueues(void)
|
|
void __init init_workqueues(void)
|
|
{
|
|
{
|
|
unsigned int cpu;
|
|
unsigned int cpu;
|