|
@@ -11,6 +11,7 @@
|
|
|
#include <linux/lockdep.h>
|
|
|
#include <linux/threads.h>
|
|
|
#include <linux/atomic.h>
|
|
|
+#include <linux/cpumask.h>
|
|
|
|
|
|
struct workqueue_struct;
|
|
|
|
|
@@ -68,7 +69,7 @@ enum {
|
|
|
WORK_STRUCT_COLOR_BITS,
|
|
|
|
|
|
/* data contains off-queue information when !WORK_STRUCT_PWQ */
|
|
|
- WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
|
|
|
+ WORK_OFFQ_FLAG_BASE = WORK_STRUCT_COLOR_SHIFT,
|
|
|
|
|
|
WORK_OFFQ_CANCELING = (1 << WORK_OFFQ_FLAG_BASE),
|
|
|
|
|
@@ -115,6 +116,20 @@ struct delayed_work {
|
|
|
int cpu;
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * A struct for workqueue attributes. This can be used to change
|
|
|
+ * attributes of an unbound workqueue.
|
|
|
+ *
|
|
|
+ * Unlike other fields, ->no_numa isn't a property of a worker_pool. It
|
|
|
+ * only modifies how apply_workqueue_attrs() select pools and thus doesn't
|
|
|
+ * participate in pool hash calculations or equality comparisons.
|
|
|
+ */
|
|
|
+struct workqueue_attrs {
|
|
|
+ int nice; /* nice level */
|
|
|
+ cpumask_var_t cpumask; /* allowed CPUs */
|
|
|
+ bool no_numa; /* disable NUMA affinity */
|
|
|
+};
|
|
|
+
|
|
|
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
|
|
|
{
|
|
|
return container_of(work, struct delayed_work, work);
|
|
@@ -283,9 +298,10 @@ enum {
|
|
|
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
|
|
|
WQ_HIGHPRI = 1 << 4, /* high priority */
|
|
|
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
|
|
+ WQ_SYSFS = 1 << 6, /* visible in sysfs, see wq_sysfs_register() */
|
|
|
|
|
|
- WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
|
|
|
- WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
|
|
|
+ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
|
|
|
+ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
|
|
|
|
|
|
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
|
|
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
|
|
@@ -388,7 +404,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
|
|
* Pointer to the allocated workqueue on success, %NULL on failure.
|
|
|
*/
|
|
|
#define alloc_ordered_workqueue(fmt, flags, args...) \
|
|
|
- alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
|
|
|
+ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
|
|
|
|
|
|
#define create_workqueue(name) \
|
|
|
alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
|
|
@@ -399,30 +415,23 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
|
|
|
|
|
extern void destroy_workqueue(struct workqueue_struct *wq);
|
|
|
|
|
|
+struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask);
|
|
|
+void free_workqueue_attrs(struct workqueue_attrs *attrs);
|
|
|
+int apply_workqueue_attrs(struct workqueue_struct *wq,
|
|
|
+ const struct workqueue_attrs *attrs);
|
|
|
+
|
|
|
extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
|
|
|
struct work_struct *work);
|
|
|
-extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
|
|
|
extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|
|
struct delayed_work *work, unsigned long delay);
|
|
|
-extern bool queue_delayed_work(struct workqueue_struct *wq,
|
|
|
- struct delayed_work *work, unsigned long delay);
|
|
|
extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|
|
struct delayed_work *dwork, unsigned long delay);
|
|
|
-extern bool mod_delayed_work(struct workqueue_struct *wq,
|
|
|
- struct delayed_work *dwork, unsigned long delay);
|
|
|
|
|
|
extern void flush_workqueue(struct workqueue_struct *wq);
|
|
|
extern void drain_workqueue(struct workqueue_struct *wq);
|
|
|
extern void flush_scheduled_work(void);
|
|
|
|
|
|
-extern bool schedule_work_on(int cpu, struct work_struct *work);
|
|
|
-extern bool schedule_work(struct work_struct *work);
|
|
|
-extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
|
|
|
- unsigned long delay);
|
|
|
-extern bool schedule_delayed_work(struct delayed_work *work,
|
|
|
- unsigned long delay);
|
|
|
extern int schedule_on_each_cpu(work_func_t func);
|
|
|
-extern int keventd_up(void);
|
|
|
|
|
|
int execute_in_process_context(work_func_t fn, struct execute_work *);
|
|
|
|
|
@@ -435,9 +444,121 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
|
|
|
|
|
|
extern void workqueue_set_max_active(struct workqueue_struct *wq,
|
|
|
int max_active);
|
|
|
-extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
|
|
|
+extern bool current_is_workqueue_rescuer(void);
|
|
|
+extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
|
|
|
extern unsigned int work_busy(struct work_struct *work);
|
|
|
|
|
|
+/**
|
|
|
+ * queue_work - queue work on a workqueue
|
|
|
+ * @wq: workqueue to use
|
|
|
+ * @work: work to queue
|
|
|
+ *
|
|
|
+ * Returns %false if @work was already on a queue, %true otherwise.
|
|
|
+ *
|
|
|
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
|
|
|
+ * it can be processed by another CPU.
|
|
|
+ */
|
|
|
+static inline bool queue_work(struct workqueue_struct *wq,
|
|
|
+ struct work_struct *work)
|
|
|
+{
|
|
|
+ return queue_work_on(WORK_CPU_UNBOUND, wq, work);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * queue_delayed_work - queue work on a workqueue after delay
|
|
|
+ * @wq: workqueue to use
|
|
|
+ * @dwork: delayable work to queue
|
|
|
+ * @delay: number of jiffies to wait before queueing
|
|
|
+ *
|
|
|
+ * Equivalent to queue_delayed_work_on() but tries to use the local CPU.
|
|
|
+ */
|
|
|
+static inline bool queue_delayed_work(struct workqueue_struct *wq,
|
|
|
+ struct delayed_work *dwork,
|
|
|
+ unsigned long delay)
|
|
|
+{
|
|
|
+ return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * mod_delayed_work - modify delay of or queue a delayed work
|
|
|
+ * @wq: workqueue to use
|
|
|
+ * @dwork: work to queue
|
|
|
+ * @delay: number of jiffies to wait before queueing
|
|
|
+ *
|
|
|
+ * mod_delayed_work_on() on local CPU.
|
|
|
+ */
|
|
|
+static inline bool mod_delayed_work(struct workqueue_struct *wq,
|
|
|
+ struct delayed_work *dwork,
|
|
|
+ unsigned long delay)
|
|
|
+{
|
|
|
+ return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * schedule_work_on - put work task on a specific cpu
|
|
|
+ * @cpu: cpu to put the work task on
|
|
|
+ * @work: job to be done
|
|
|
+ *
|
|
|
+ * This puts a job on a specific cpu
|
|
|
+ */
|
|
|
+static inline bool schedule_work_on(int cpu, struct work_struct *work)
|
|
|
+{
|
|
|
+ return queue_work_on(cpu, system_wq, work);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * schedule_work - put work task in global workqueue
|
|
|
+ * @work: job to be done
|
|
|
+ *
|
|
|
+ * Returns %false if @work was already on the kernel-global workqueue and
|
|
|
+ * %true otherwise.
|
|
|
+ *
|
|
|
+ * This puts a job in the kernel-global workqueue if it was not already
|
|
|
+ * queued and leaves it in the same position on the kernel-global
|
|
|
+ * workqueue otherwise.
|
|
|
+ */
|
|
|
+static inline bool schedule_work(struct work_struct *work)
|
|
|
+{
|
|
|
+ return queue_work(system_wq, work);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
|
|
|
+ * @cpu: cpu to use
|
|
|
+ * @dwork: job to be done
|
|
|
+ * @delay: number of jiffies to wait
|
|
|
+ *
|
|
|
+ * After waiting for a given time this puts a job in the kernel-global
|
|
|
+ * workqueue on the specified CPU.
|
|
|
+ */
|
|
|
+static inline bool schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
|
|
|
+ unsigned long delay)
|
|
|
+{
|
|
|
+ return queue_delayed_work_on(cpu, system_wq, dwork, delay);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * schedule_delayed_work - put work task in global workqueue after delay
|
|
|
+ * @dwork: job to be done
|
|
|
+ * @delay: number of jiffies to wait or 0 for immediate execution
|
|
|
+ *
|
|
|
+ * After waiting for a given time this puts a job in the kernel-global
|
|
|
+ * workqueue.
|
|
|
+ */
|
|
|
+static inline bool schedule_delayed_work(struct delayed_work *dwork,
|
|
|
+ unsigned long delay)
|
|
|
+{
|
|
|
+ return queue_delayed_work(system_wq, dwork, delay);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * keventd_up - is workqueue initialized yet?
|
|
|
+ */
|
|
|
+static inline bool keventd_up(void)
|
|
|
+{
|
|
|
+ return system_wq != NULL;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Like above, but uses del_timer() instead of del_timer_sync(). This means,
|
|
|
* if it returns 0 the timer function may be running and the queueing is in
|
|
@@ -466,12 +587,12 @@ static inline bool __deprecated flush_delayed_work_sync(struct delayed_work *dwo
|
|
|
}
|
|
|
|
|
|
#ifndef CONFIG_SMP
|
|
|
-static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
|
|
|
+static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
|
|
|
{
|
|
|
return fn(arg);
|
|
|
}
|
|
|
#else
|
|
|
-long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
|
|
|
+long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
#ifdef CONFIG_FREEZER
|
|
@@ -480,4 +601,11 @@ extern bool freeze_workqueues_busy(void);
|
|
|
extern void thaw_workqueues(void);
|
|
|
#endif /* CONFIG_FREEZER */
|
|
|
|
|
|
+#ifdef CONFIG_SYSFS
|
|
|
+int workqueue_sysfs_register(struct workqueue_struct *wq);
|
|
|
+#else /* CONFIG_SYSFS */
|
|
|
+static inline int workqueue_sysfs_register(struct workqueue_struct *wq)
|
|
|
+{ return 0; }
|
|
|
+#endif /* CONFIG_SYSFS */
|
|
|
+
|
|
|
#endif
|