|
@@ -203,7 +203,7 @@ struct workqueue_struct {
|
|
|
cpumask_var_t mayday_mask; /* cpus requesting rescue */
|
|
|
struct worker *rescuer; /* I: rescue worker */
|
|
|
|
|
|
- int saved_max_active; /* I: saved cwq max_active */
|
|
|
+ int saved_max_active; /* W: saved cwq max_active */
|
|
|
const char *name; /* I: workqueue name */
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
struct lockdep_map lockdep_map;
|
|
@@ -2675,6 +2675,112 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(destroy_workqueue);
|
|
|
|
|
|
+/**
|
|
|
+ * workqueue_set_max_active - adjust max_active of a workqueue
|
|
|
+ * @wq: target workqueue
|
|
|
+ * @max_active: new max_active value.
|
|
|
+ *
|
|
|
+ * Set max_active of @wq to @max_active.
|
|
|
+ *
|
|
|
+ * CONTEXT:
|
|
|
+ * Don't call from IRQ context.
|
|
|
+ */
|
|
|
+void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
|
|
+{
|
|
|
+ unsigned int cpu;
|
|
|
+
|
|
|
+ max_active = wq_clamp_max_active(max_active, wq->name);
|
|
|
+
|
|
|
+ spin_lock(&workqueue_lock);
|
|
|
+
|
|
|
+ wq->saved_max_active = max_active;
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ struct global_cwq *gcwq = get_gcwq(cpu);
|
|
|
+
|
|
|
+ spin_lock_irq(&gcwq->lock);
|
|
|
+
|
|
|
+ if (!(wq->flags & WQ_FREEZEABLE) ||
|
|
|
+ !(gcwq->flags & GCWQ_FREEZING))
|
|
|
+ get_cwq(gcwq->cpu, wq)->max_active = max_active;
|
|
|
+
|
|
|
+ spin_unlock_irq(&gcwq->lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_unlock(&workqueue_lock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(workqueue_set_max_active);
|
|
|
+
|
|
|
+/**
|
|
|
+ * workqueue_congested - test whether a workqueue is congested
|
|
|
+ * @cpu: CPU in question
|
|
|
+ * @wq: target workqueue
|
|
|
+ *
|
|
|
+ * Test whether @wq's cpu workqueue for @cpu is congested. There is
|
|
|
+ * no synchronization around this function and the test result is
|
|
|
+ * unreliable and only useful as advisory hints or for debugging.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * %true if congested, %false otherwise.
|
|
|
+ */
|
|
|
+bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
|
|
|
+{
|
|
|
+ struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
|
|
|
+
|
|
|
+ return !list_empty(&cwq->delayed_works);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(workqueue_congested);
|
|
|
+
|
|
|
+/**
|
|
|
+ * work_cpu - return the last known associated cpu for @work
|
|
|
+ * @work: the work of interest
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * CPU number if @work was ever queued. NR_CPUS otherwise.
|
|
|
+ */
|
|
|
+unsigned int work_cpu(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct global_cwq *gcwq = get_work_gcwq(work);
|
|
|
+
|
|
|
+ return gcwq ? gcwq->cpu : NR_CPUS;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(work_cpu);
|
|
|
+
|
|
|
+/**
|
|
|
+ * work_busy - test whether a work is currently pending or running
|
|
|
+ * @work: the work to be tested
|
|
|
+ *
|
|
|
+ * Test whether @work is currently pending or running. There is no
|
|
|
+ * synchronization around this function and the test result is
|
|
|
+ * unreliable and only useful as advisory hints or for debugging.
|
|
|
+ * Especially for reentrant wqs, the pending state might hide the
|
|
|
+ * running state.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * OR'd bitmask of WORK_BUSY_* bits.
|
|
|
+ */
|
|
|
+unsigned int work_busy(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct global_cwq *gcwq = get_work_gcwq(work);
|
|
|
+ unsigned long flags;
|
|
|
+ unsigned int ret = 0;
|
|
|
+
|
|
|
+ if (!gcwq)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&gcwq->lock, flags);
|
|
|
+
|
|
|
+ if (work_pending(work))
|
|
|
+ ret |= WORK_BUSY_PENDING;
|
|
|
+ if (find_worker_executing_work(gcwq, work))
|
|
|
+ ret |= WORK_BUSY_RUNNING;
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&gcwq->lock, flags);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL_GPL(work_busy);
|
|
|
+
|
|
|
/*
|
|
|
* CPU hotplug.
|
|
|
*
|