workqueue.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391
  1. /*
  2. * workqueue.h --- work queue handling for Linux.
  3. */
  4. #ifndef _LINUX_WORKQUEUE_H
  5. #define _LINUX_WORKQUEUE_H
  6. #include <linux/timer.h>
  7. #include <linux/linkage.h>
  8. #include <linux/bitops.h>
  9. #include <linux/lockdep.h>
  10. #include <linux/threads.h>
  11. #include <asm/atomic.h>
  12. struct workqueue_struct;
  13. struct work_struct;
  14. typedef void (*work_func_t)(struct work_struct *work);
  15. /*
  16. * The first word is the work queue pointer and the flags rolled into
  17. * one
  18. */
  19. #define work_data_bits(work) ((unsigned long *)(&(work)->data))
  20. enum {
  21. WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
  22. WORK_STRUCT_LINKED_BIT = 1, /* next work is linked to this one */
  23. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  24. WORK_STRUCT_STATIC_BIT = 2, /* static initializer (debugobjects) */
  25. WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
  26. #else
  27. WORK_STRUCT_COLOR_SHIFT = 2, /* color for workqueue flushing */
  28. #endif
  29. WORK_STRUCT_COLOR_BITS = 4,
  30. WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
  31. WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
  32. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  33. WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
  34. #else
  35. WORK_STRUCT_STATIC = 0,
  36. #endif
  37. /*
  38. * The last color is no color used for works which don't
  39. * participate in workqueue flushing.
  40. */
  41. WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
  42. WORK_NO_COLOR = WORK_NR_COLORS,
  43. /* special cpu IDs */
  44. WORK_CPU_NONE = NR_CPUS,
  45. WORK_CPU_LAST = WORK_CPU_NONE,
  46. /*
  47. * Reserve 6 bits off of cwq pointer w/ debugobjects turned
  48. * off. This makes cwqs aligned to 64 bytes which isn't too
  49. * excessive while allowing 15 workqueue flush colors.
  50. */
  51. WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
  52. WORK_STRUCT_COLOR_BITS,
  53. WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
  54. WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
  55. WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS,
  56. /* bit mask for work_busy() return values */
  57. WORK_BUSY_PENDING = 1 << 0,
  58. WORK_BUSY_RUNNING = 1 << 1,
  59. };
  60. struct work_struct {
  61. atomic_long_t data;
  62. struct list_head entry;
  63. work_func_t func;
  64. #ifdef CONFIG_LOCKDEP
  65. struct lockdep_map lockdep_map;
  66. #endif
  67. };
  68. #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
  69. #define WORK_DATA_STATIC_INIT() \
  70. ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
  71. struct delayed_work {
  72. struct work_struct work;
  73. struct timer_list timer;
  74. };
  75. static inline struct delayed_work *to_delayed_work(struct work_struct *work)
  76. {
  77. return container_of(work, struct delayed_work, work);
  78. }
  79. struct execute_work {
  80. struct work_struct work;
  81. };
  82. #ifdef CONFIG_LOCKDEP
  83. /*
  84. * NB: because we have to copy the lockdep_map, setting _key
  85. * here is required, otherwise it could get initialised to the
  86. * copy of the lockdep_map!
  87. */
  88. #define __WORK_INIT_LOCKDEP_MAP(n, k) \
  89. .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
  90. #else
  91. #define __WORK_INIT_LOCKDEP_MAP(n, k)
  92. #endif
  93. #define __WORK_INITIALIZER(n, f) { \
  94. .data = WORK_DATA_STATIC_INIT(), \
  95. .entry = { &(n).entry, &(n).entry }, \
  96. .func = (f), \
  97. __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
  98. }
  99. #define __DELAYED_WORK_INITIALIZER(n, f) { \
  100. .work = __WORK_INITIALIZER((n).work, (f)), \
  101. .timer = TIMER_INITIALIZER(NULL, 0, 0), \
  102. }
  103. #define DECLARE_WORK(n, f) \
  104. struct work_struct n = __WORK_INITIALIZER(n, f)
  105. #define DECLARE_DELAYED_WORK(n, f) \
  106. struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
  107. /*
  108. * initialize a work item's function pointer
  109. */
  110. #define PREPARE_WORK(_work, _func) \
  111. do { \
  112. (_work)->func = (_func); \
  113. } while (0)
  114. #define PREPARE_DELAYED_WORK(_work, _func) \
  115. PREPARE_WORK(&(_work)->work, (_func))
  116. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  117. extern void __init_work(struct work_struct *work, int onstack);
  118. extern void destroy_work_on_stack(struct work_struct *work);
  119. static inline unsigned int work_static(struct work_struct *work)
  120. {
  121. return *work_data_bits(work) & WORK_STRUCT_STATIC;
  122. }
  123. #else
  124. static inline void __init_work(struct work_struct *work, int onstack) { }
  125. static inline void destroy_work_on_stack(struct work_struct *work) { }
  126. static inline unsigned int work_static(struct work_struct *work) { return 0; }
  127. #endif
  128. /*
  129. * initialize all of a work item in one go
  130. *
  131. * NOTE! No point in using "atomic_long_set()": using a direct
  132. * assignment of the work data initializer allows the compiler
  133. * to generate better code.
  134. */
  135. #ifdef CONFIG_LOCKDEP
  136. #define __INIT_WORK(_work, _func, _onstack) \
  137. do { \
  138. static struct lock_class_key __key; \
  139. \
  140. __init_work((_work), _onstack); \
  141. (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
  142. lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
  143. INIT_LIST_HEAD(&(_work)->entry); \
  144. PREPARE_WORK((_work), (_func)); \
  145. } while (0)
  146. #else
  147. #define __INIT_WORK(_work, _func, _onstack) \
  148. do { \
  149. __init_work((_work), _onstack); \
  150. (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
  151. INIT_LIST_HEAD(&(_work)->entry); \
  152. PREPARE_WORK((_work), (_func)); \
  153. } while (0)
  154. #endif
  155. #define INIT_WORK(_work, _func) \
  156. do { \
  157. __INIT_WORK((_work), (_func), 0); \
  158. } while (0)
  159. #define INIT_WORK_ON_STACK(_work, _func) \
  160. do { \
  161. __INIT_WORK((_work), (_func), 1); \
  162. } while (0)
  163. #define INIT_DELAYED_WORK(_work, _func) \
  164. do { \
  165. INIT_WORK(&(_work)->work, (_func)); \
  166. init_timer(&(_work)->timer); \
  167. } while (0)
  168. #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
  169. do { \
  170. INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
  171. init_timer_on_stack(&(_work)->timer); \
  172. } while (0)
  173. #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
  174. do { \
  175. INIT_WORK(&(_work)->work, (_func)); \
  176. init_timer_deferrable(&(_work)->timer); \
  177. } while (0)
  178. /**
  179. * work_pending - Find out whether a work item is currently pending
  180. * @work: The work item in question
  181. */
  182. #define work_pending(work) \
  183. test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
  184. /**
  185. * delayed_work_pending - Find out whether a delayable work item is currently
  186. * pending
  187. * @work: The work item in question
  188. */
  189. #define delayed_work_pending(w) \
  190. work_pending(&(w)->work)
  191. /**
  192. * work_clear_pending - for internal use only, mark a work item as not pending
  193. * @work: The work item in question
  194. */
  195. #define work_clear_pending(work) \
  196. clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
  197. enum {
  198. WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
  199. WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */
  200. WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */
  201. WQ_RESCUER = 1 << 3, /* has an rescue worker */
  202. WQ_HIGHPRI = 1 << 4, /* high priority */
  203. WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
  204. WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
  205. WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
  206. };
  207. /*
  208. * System-wide workqueues which are always present.
  209. *
  210. * system_wq is the one used by schedule[_delayed]_work[_on]().
  211. * Multi-CPU multi-threaded. There are users which expect relatively
  212. * short queue flush time. Don't queue works which can run for too
  213. * long.
  214. *
  215. * system_long_wq is similar to system_wq but may host long running
  216. * works. Queue flushing might take relatively long.
  217. *
  218. * system_nrt_wq is non-reentrant and guarantees that any given work
  219. * item is never executed in parallel by multiple CPUs. Queue
  220. * flushing might take relatively long.
  221. */
  222. extern struct workqueue_struct *system_wq;
  223. extern struct workqueue_struct *system_long_wq;
  224. extern struct workqueue_struct *system_nrt_wq;
  225. extern struct workqueue_struct *
  226. __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
  227. struct lock_class_key *key, const char *lock_name);
  228. #ifdef CONFIG_LOCKDEP
  229. #define alloc_workqueue(name, flags, max_active) \
  230. ({ \
  231. static struct lock_class_key __key; \
  232. const char *__lock_name; \
  233. \
  234. if (__builtin_constant_p(name)) \
  235. __lock_name = (name); \
  236. else \
  237. __lock_name = #name; \
  238. \
  239. __alloc_workqueue_key((name), (flags), (max_active), \
  240. &__key, __lock_name); \
  241. })
  242. #else
  243. #define alloc_workqueue(name, flags, max_active) \
  244. __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL)
  245. #endif
  246. #define create_workqueue(name) \
  247. alloc_workqueue((name), WQ_RESCUER, 1)
  248. #define create_freezeable_workqueue(name) \
  249. alloc_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_CPU | WQ_RESCUER, 1)
  250. #define create_singlethread_workqueue(name) \
  251. alloc_workqueue((name), WQ_SINGLE_CPU | WQ_RESCUER, 1)
  252. extern void destroy_workqueue(struct workqueue_struct *wq);
  253. extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
  254. extern int queue_work_on(int cpu, struct workqueue_struct *wq,
  255. struct work_struct *work);
  256. extern int queue_delayed_work(struct workqueue_struct *wq,
  257. struct delayed_work *work, unsigned long delay);
  258. extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
  259. struct delayed_work *work, unsigned long delay);
  260. extern void flush_workqueue(struct workqueue_struct *wq);
  261. extern void flush_scheduled_work(void);
  262. extern void flush_delayed_work(struct delayed_work *work);
  263. extern int schedule_work(struct work_struct *work);
  264. extern int schedule_work_on(int cpu, struct work_struct *work);
  265. extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
  266. extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
  267. unsigned long delay);
  268. extern int schedule_on_each_cpu(work_func_t func);
  269. extern int keventd_up(void);
  270. extern void init_workqueues(void);
  271. int execute_in_process_context(work_func_t fn, struct execute_work *);
  272. extern int flush_work(struct work_struct *work);
  273. extern int cancel_work_sync(struct work_struct *work);
  274. extern void workqueue_set_max_active(struct workqueue_struct *wq,
  275. int max_active);
  276. extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
  277. extern unsigned int work_cpu(struct work_struct *work);
  278. extern unsigned int work_busy(struct work_struct *work);
  279. /*
  280. * Kill off a pending schedule_delayed_work(). Note that the work callback
  281. * function may still be running on return from cancel_delayed_work(), unless
  282. * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
  283. * cancel_work_sync() to wait on it.
  284. */
  285. static inline int cancel_delayed_work(struct delayed_work *work)
  286. {
  287. int ret;
  288. ret = del_timer_sync(&work->timer);
  289. if (ret)
  290. work_clear_pending(&work->work);
  291. return ret;
  292. }
  293. /*
  294. * Like above, but uses del_timer() instead of del_timer_sync(). This means,
  295. * if it returns 0 the timer function may be running and the queueing is in
  296. * progress.
  297. */
  298. static inline int __cancel_delayed_work(struct delayed_work *work)
  299. {
  300. int ret;
  301. ret = del_timer(&work->timer);
  302. if (ret)
  303. work_clear_pending(&work->work);
  304. return ret;
  305. }
  306. extern int cancel_delayed_work_sync(struct delayed_work *work);
  307. /* Obsolete. use cancel_delayed_work_sync() */
  308. static inline
  309. void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
  310. struct delayed_work *work)
  311. {
  312. cancel_delayed_work_sync(work);
  313. }
  314. /* Obsolete. use cancel_delayed_work_sync() */
  315. static inline
  316. void cancel_rearming_delayed_work(struct delayed_work *work)
  317. {
  318. cancel_delayed_work_sync(work);
  319. }
  320. #ifndef CONFIG_SMP
  321. static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
  322. {
  323. return fn(arg);
  324. }
  325. #else
  326. long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
  327. #endif /* CONFIG_SMP */
  328. #ifdef CONFIG_FREEZER
  329. extern void freeze_workqueues_begin(void);
  330. extern bool freeze_workqueues_busy(void);
  331. extern void thaw_workqueues(void);
  332. #endif /* CONFIG_FREEZER */
  333. #endif