workqueue.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. /*
  2. * workqueue.h --- work queue handling for Linux.
  3. */
  4. #ifndef _LINUX_WORKQUEUE_H
  5. #define _LINUX_WORKQUEUE_H
  6. #include <linux/timer.h>
  7. #include <linux/linkage.h>
  8. #include <linux/bitops.h>
  9. #include <linux/lockdep.h>
  10. #include <linux/threads.h>
  11. #include <linux/atomic.h>
  12. struct workqueue_struct;
  13. struct work_struct;
  14. typedef void (*work_func_t)(struct work_struct *work);
  15. void delayed_work_timer_fn(unsigned long __data);
  16. /*
  17. * The first word is the work queue pointer and the flags rolled into
  18. * one
  19. */
  20. #define work_data_bits(work) ((unsigned long *)(&(work)->data))
  21. enum {
  22. WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
  23. WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */
  24. WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */
  25. WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */
  26. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  27. WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */
  28. WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */
  29. #else
  30. WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */
  31. #endif
  32. WORK_STRUCT_COLOR_BITS = 4,
  33. WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
  34. WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT,
  35. WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT,
  36. WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
  37. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  38. WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
  39. #else
  40. WORK_STRUCT_STATIC = 0,
  41. #endif
  42. /*
  43. * The last color is no color used for works which don't
  44. * participate in workqueue flushing.
  45. */
  46. WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
  47. WORK_NO_COLOR = WORK_NR_COLORS,
  48. /* special cpu IDs */
  49. WORK_CPU_UNBOUND = NR_CPUS,
  50. WORK_CPU_NONE = NR_CPUS + 1,
  51. WORK_CPU_LAST = WORK_CPU_NONE,
  52. /*
  53. * Reserve 7 bits off of cwq pointer w/ debugobjects turned
  54. * off. This makes cwqs aligned to 256 bytes and allows 15
  55. * workqueue flush colors.
  56. */
  57. WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
  58. WORK_STRUCT_COLOR_BITS,
  59. /* data contains off-queue information when !WORK_STRUCT_CWQ */
  60. WORK_OFFQ_FLAG_BASE = WORK_STRUCT_FLAG_BITS,
  61. WORK_OFFQ_FLAG_BITS = 0,
  62. WORK_OFFQ_CPU_SHIFT = WORK_OFFQ_FLAG_BASE + WORK_OFFQ_FLAG_BITS,
  63. /* convenience constants */
  64. WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
  65. WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
  66. WORK_STRUCT_NO_CPU = (unsigned long)WORK_CPU_NONE << WORK_OFFQ_CPU_SHIFT,
  67. /* bit mask for work_busy() return values */
  68. WORK_BUSY_PENDING = 1 << 0,
  69. WORK_BUSY_RUNNING = 1 << 1,
  70. };
  71. struct work_struct {
  72. atomic_long_t data;
  73. struct list_head entry;
  74. work_func_t func;
  75. #ifdef CONFIG_LOCKDEP
  76. struct lockdep_map lockdep_map;
  77. #endif
  78. };
  79. #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU)
  80. #define WORK_DATA_STATIC_INIT() \
  81. ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC)
  82. struct delayed_work {
  83. struct work_struct work;
  84. struct timer_list timer;
  85. };
  86. static inline struct delayed_work *to_delayed_work(struct work_struct *work)
  87. {
  88. return container_of(work, struct delayed_work, work);
  89. }
  90. struct execute_work {
  91. struct work_struct work;
  92. };
  93. #ifdef CONFIG_LOCKDEP
  94. /*
  95. * NB: because we have to copy the lockdep_map, setting _key
  96. * here is required, otherwise it could get initialised to the
  97. * copy of the lockdep_map!
  98. */
  99. #define __WORK_INIT_LOCKDEP_MAP(n, k) \
  100. .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
  101. #else
  102. #define __WORK_INIT_LOCKDEP_MAP(n, k)
  103. #endif
  104. #define __WORK_INITIALIZER(n, f) { \
  105. .data = WORK_DATA_STATIC_INIT(), \
  106. .entry = { &(n).entry, &(n).entry }, \
  107. .func = (f), \
  108. __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
  109. }
  110. #define __DELAYED_WORK_INITIALIZER(n, f) { \
  111. .work = __WORK_INITIALIZER((n).work, (f)), \
  112. .timer = TIMER_INITIALIZER(delayed_work_timer_fn, \
  113. 0, (unsigned long)&(n)), \
  114. }
  115. #define __DEFERRED_WORK_INITIALIZER(n, f) { \
  116. .work = __WORK_INITIALIZER((n).work, (f)), \
  117. .timer = TIMER_DEFERRED_INITIALIZER(delayed_work_timer_fn, \
  118. 0, (unsigned long)&(n)), \
  119. }
  120. #define DECLARE_WORK(n, f) \
  121. struct work_struct n = __WORK_INITIALIZER(n, f)
  122. #define DECLARE_DELAYED_WORK(n, f) \
  123. struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
  124. #define DECLARE_DEFERRED_WORK(n, f) \
  125. struct delayed_work n = __DEFERRED_WORK_INITIALIZER(n, f)
  126. /*
  127. * initialize a work item's function pointer
  128. */
  129. #define PREPARE_WORK(_work, _func) \
  130. do { \
  131. (_work)->func = (_func); \
  132. } while (0)
  133. #define PREPARE_DELAYED_WORK(_work, _func) \
  134. PREPARE_WORK(&(_work)->work, (_func))
  135. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  136. extern void __init_work(struct work_struct *work, int onstack);
  137. extern void destroy_work_on_stack(struct work_struct *work);
  138. static inline unsigned int work_static(struct work_struct *work)
  139. {
  140. return *work_data_bits(work) & WORK_STRUCT_STATIC;
  141. }
  142. #else
  143. static inline void __init_work(struct work_struct *work, int onstack) { }
  144. static inline void destroy_work_on_stack(struct work_struct *work) { }
  145. static inline unsigned int work_static(struct work_struct *work) { return 0; }
  146. #endif
  147. /*
  148. * initialize all of a work item in one go
  149. *
  150. * NOTE! No point in using "atomic_long_set()": using a direct
  151. * assignment of the work data initializer allows the compiler
  152. * to generate better code.
  153. */
  154. #ifdef CONFIG_LOCKDEP
  155. #define __INIT_WORK(_work, _func, _onstack) \
  156. do { \
  157. static struct lock_class_key __key; \
  158. \
  159. __init_work((_work), _onstack); \
  160. (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
  161. lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
  162. INIT_LIST_HEAD(&(_work)->entry); \
  163. PREPARE_WORK((_work), (_func)); \
  164. } while (0)
  165. #else
  166. #define __INIT_WORK(_work, _func, _onstack) \
  167. do { \
  168. __init_work((_work), _onstack); \
  169. (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
  170. INIT_LIST_HEAD(&(_work)->entry); \
  171. PREPARE_WORK((_work), (_func)); \
  172. } while (0)
  173. #endif
  174. #define INIT_WORK(_work, _func) \
  175. do { \
  176. __INIT_WORK((_work), (_func), 0); \
  177. } while (0)
  178. #define INIT_WORK_ONSTACK(_work, _func) \
  179. do { \
  180. __INIT_WORK((_work), (_func), 1); \
  181. } while (0)
  182. #define INIT_DELAYED_WORK(_work, _func) \
  183. do { \
  184. INIT_WORK(&(_work)->work, (_func)); \
  185. init_timer(&(_work)->timer); \
  186. (_work)->timer.function = delayed_work_timer_fn;\
  187. (_work)->timer.data = (unsigned long)(_work); \
  188. } while (0)
  189. #define INIT_DELAYED_WORK_ONSTACK(_work, _func) \
  190. do { \
  191. INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
  192. init_timer_on_stack(&(_work)->timer); \
  193. (_work)->timer.function = delayed_work_timer_fn;\
  194. (_work)->timer.data = (unsigned long)(_work); \
  195. } while (0)
  196. #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
  197. do { \
  198. INIT_WORK(&(_work)->work, (_func)); \
  199. init_timer_deferrable(&(_work)->timer); \
  200. (_work)->timer.function = delayed_work_timer_fn;\
  201. (_work)->timer.data = (unsigned long)(_work); \
  202. } while (0)
  203. /**
  204. * work_pending - Find out whether a work item is currently pending
  205. * @work: The work item in question
  206. */
  207. #define work_pending(work) \
  208. test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
  209. /**
  210. * delayed_work_pending - Find out whether a delayable work item is currently
  211. * pending
  212. * @work: The work item in question
  213. */
  214. #define delayed_work_pending(w) \
  215. work_pending(&(w)->work)
  216. /**
  217. * work_clear_pending - for internal use only, mark a work item as not pending
  218. * @work: The work item in question
  219. */
  220. #define work_clear_pending(work) \
  221. clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
  222. /*
  223. * Workqueue flags and constants. For details, please refer to
  224. * Documentation/workqueue.txt.
  225. */
  226. enum {
  227. WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
  228. WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
  229. WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
  230. WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
  231. WQ_HIGHPRI = 1 << 4, /* high priority */
  232. WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
  233. WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
  234. WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
  235. WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
  236. WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
  237. WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
  238. };
  239. /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
  240. #define WQ_UNBOUND_MAX_ACTIVE \
  241. max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
  242. /*
  243. * System-wide workqueues which are always present.
  244. *
  245. * system_wq is the one used by schedule[_delayed]_work[_on]().
  246. * Multi-CPU multi-threaded. There are users which expect relatively
  247. * short queue flush time. Don't queue works which can run for too
  248. * long.
  249. *
  250. * system_long_wq is similar to system_wq but may host long running
  251. * works. Queue flushing might take relatively long.
  252. *
  253. * system_nrt_wq is non-reentrant and guarantees that any given work
  254. * item is never executed in parallel by multiple CPUs. Queue
  255. * flushing might take relatively long.
  256. *
  257. * system_unbound_wq is unbound workqueue. Workers are not bound to
  258. * any specific CPU, not concurrency managed, and all queued works are
  259. * executed immediately as long as max_active limit is not reached and
  260. * resources are available.
  261. *
  262. * system_freezable_wq is equivalent to system_wq except that it's
  263. * freezable.
  264. *
  265. * system_nrt_freezable_wq is equivalent to system_nrt_wq except that
  266. * it's freezable.
  267. */
  268. extern struct workqueue_struct *system_wq;
  269. extern struct workqueue_struct *system_long_wq;
  270. extern struct workqueue_struct *system_nrt_wq;
  271. extern struct workqueue_struct *system_unbound_wq;
  272. extern struct workqueue_struct *system_freezable_wq;
  273. extern struct workqueue_struct *system_nrt_freezable_wq;
  274. extern struct workqueue_struct *
  275. __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
  276. struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6);
  277. /**
  278. * alloc_workqueue - allocate a workqueue
  279. * @fmt: printf format for the name of the workqueue
  280. * @flags: WQ_* flags
  281. * @max_active: max in-flight work items, 0 for default
  282. * @args: args for @fmt
  283. *
  284. * Allocate a workqueue with the specified parameters. For detailed
  285. * information on WQ_* flags, please refer to Documentation/workqueue.txt.
  286. *
  287. * The __lock_name macro dance is to guarantee that single lock_class_key
  288. * doesn't end up with different namesm, which isn't allowed by lockdep.
  289. *
  290. * RETURNS:
  291. * Pointer to the allocated workqueue on success, %NULL on failure.
  292. */
  293. #ifdef CONFIG_LOCKDEP
  294. #define alloc_workqueue(fmt, flags, max_active, args...) \
  295. ({ \
  296. static struct lock_class_key __key; \
  297. const char *__lock_name; \
  298. \
  299. if (__builtin_constant_p(fmt)) \
  300. __lock_name = (fmt); \
  301. else \
  302. __lock_name = #fmt; \
  303. \
  304. __alloc_workqueue_key((fmt), (flags), (max_active), \
  305. &__key, __lock_name, ##args); \
  306. })
  307. #else
  308. #define alloc_workqueue(fmt, flags, max_active, args...) \
  309. __alloc_workqueue_key((fmt), (flags), (max_active), \
  310. NULL, NULL, ##args)
  311. #endif
  312. /**
  313. * alloc_ordered_workqueue - allocate an ordered workqueue
  314. * @fmt: printf format for the name of the workqueue
  315. * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
  316. * @args: args for @fmt
  317. *
  318. * Allocate an ordered workqueue. An ordered workqueue executes at
  319. * most one work item at any given time in the queued order. They are
  320. * implemented as unbound workqueues with @max_active of one.
  321. *
  322. * RETURNS:
  323. * Pointer to the allocated workqueue on success, %NULL on failure.
  324. */
  325. #define alloc_ordered_workqueue(fmt, flags, args...) \
  326. alloc_workqueue(fmt, WQ_UNBOUND | (flags), 1, ##args)
  327. #define create_workqueue(name) \
  328. alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
  329. #define create_freezable_workqueue(name) \
  330. alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
  331. #define create_singlethread_workqueue(name) \
  332. alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
  333. extern void destroy_workqueue(struct workqueue_struct *wq);
  334. extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
  335. struct work_struct *work);
  336. extern bool queue_work(struct workqueue_struct *wq, struct work_struct *work);
  337. extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
  338. struct delayed_work *work, unsigned long delay);
  339. extern bool queue_delayed_work(struct workqueue_struct *wq,
  340. struct delayed_work *work, unsigned long delay);
  341. extern void flush_workqueue(struct workqueue_struct *wq);
  342. extern void drain_workqueue(struct workqueue_struct *wq);
  343. extern void flush_scheduled_work(void);
  344. extern bool schedule_work_on(int cpu, struct work_struct *work);
  345. extern bool schedule_work(struct work_struct *work);
  346. extern bool schedule_delayed_work_on(int cpu, struct delayed_work *work,
  347. unsigned long delay);
  348. extern bool schedule_delayed_work(struct delayed_work *work,
  349. unsigned long delay);
  350. extern int schedule_on_each_cpu(work_func_t func);
  351. extern int keventd_up(void);
  352. int execute_in_process_context(work_func_t fn, struct execute_work *);
  353. extern bool flush_work(struct work_struct *work);
  354. extern bool flush_work_sync(struct work_struct *work);
  355. extern bool cancel_work_sync(struct work_struct *work);
  356. extern bool flush_delayed_work(struct delayed_work *dwork);
  357. extern bool flush_delayed_work_sync(struct delayed_work *work);
  358. extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
  359. extern void workqueue_set_max_active(struct workqueue_struct *wq,
  360. int max_active);
  361. extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq);
  362. extern unsigned int work_cpu(struct work_struct *work);
  363. extern unsigned int work_busy(struct work_struct *work);
  364. /*
  365. * Kill off a pending schedule_delayed_work(). Note that the work callback
  366. * function may still be running on return from cancel_delayed_work(), unless
  367. * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
  368. * cancel_work_sync() to wait on it.
  369. */
  370. static inline bool cancel_delayed_work(struct delayed_work *work)
  371. {
  372. bool ret;
  373. ret = del_timer_sync(&work->timer);
  374. if (ret)
  375. work_clear_pending(&work->work);
  376. return ret;
  377. }
  378. /*
  379. * Like above, but uses del_timer() instead of del_timer_sync(). This means,
  380. * if it returns 0 the timer function may be running and the queueing is in
  381. * progress.
  382. */
  383. static inline bool __cancel_delayed_work(struct delayed_work *work)
  384. {
  385. bool ret;
  386. ret = del_timer(&work->timer);
  387. if (ret)
  388. work_clear_pending(&work->work);
  389. return ret;
  390. }
  391. #ifndef CONFIG_SMP
  392. static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
  393. {
  394. return fn(arg);
  395. }
  396. #else
  397. long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
  398. #endif /* CONFIG_SMP */
  399. #ifdef CONFIG_FREEZER
  400. extern void freeze_workqueues_begin(void);
  401. extern bool freeze_workqueues_busy(void);
  402. extern void thaw_workqueues(void);
  403. #endif /* CONFIG_FREEZER */
  404. #endif