|
@@ -7,6 +7,43 @@
|
|
|
#ifndef _KERNEL_WORKQUEUE_INTERNAL_H
|
|
|
#define _KERNEL_WORKQUEUE_INTERNAL_H
|
|
|
|
|
|
+#include <linux/workqueue.h>
|
|
|
+
|
|
|
+struct global_cwq;
|
|
|
+struct worker_pool;
|
|
|
+
|
|
|
+/*
|
|
|
+ * The poor guys doing the actual heavy lifting. All on-duty workers are
|
|
|
+ * either serving the manager role, on idle list or on busy hash. For
|
|
|
+ * details on the locking annotation (L, I, X...), refer to workqueue.c.
|
|
|
+ *
|
|
|
+ * Only to be used in workqueue and async.
|
|
|
+ */
|
|
|
+struct worker {
|
|
|
+ /* on idle list while idle, on busy hash table while busy */
|
|
|
+ union {
|
|
|
+ struct list_head entry; /* L: while idle */
|
|
|
+ struct hlist_node hentry; /* L: while busy */
|
|
|
+ };
|
|
|
+
|
|
|
+ struct work_struct *current_work; /* L: work being processed */
|
|
|
+ work_func_t current_func; /* L: current_work's fn */
|
|
|
+ struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
|
|
|
+ struct list_head scheduled; /* L: scheduled works */
|
|
|
+ struct task_struct *task; /* I: worker task */
|
|
|
+ struct worker_pool *pool; /* I: the associated pool */
|
|
|
+ /* 64 bytes boundary on 64bit, 32 on 32bit */
|
|
|
+ unsigned long last_active; /* L: last active timestamp */
|
|
|
+ unsigned int flags; /* X: flags */
|
|
|
+ int id; /* I: worker id */
|
|
|
+
|
|
|
+ /* for rebinding worker to CPU */
|
|
|
+ struct work_struct rebind_work; /* L: for busy worker */
|
|
|
+
|
|
|
+ /* used only by rescuers to point to the target workqueue */
|
|
|
+ struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
|
|
|
+};
|
|
|
+
|
|
|
/*
|
|
|
* Scheduler hooks for concurrency managed workqueue. Only to be used from
|
|
|
* sched.c and workqueue.c.
|