workqueue.h 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. /*
  2. * workqueue.h --- work queue handling for Linux.
  3. */
  4. #ifndef _LINUX_WORKQUEUE_H
  5. #define _LINUX_WORKQUEUE_H
  6. #include <linux/timer.h>
  7. #include <linux/linkage.h>
  8. #include <linux/bitops.h>
  9. #include <linux/lockdep.h>
  10. #include <asm/atomic.h>
  11. struct workqueue_struct;
  12. struct work_struct;
  13. typedef void (*work_func_t)(struct work_struct *work);
  14. /*
  15. * The first word is the work queue pointer and the flags rolled into
  16. * one
  17. */
  18. #define work_data_bits(work) ((unsigned long *)(&(work)->data))
  19. enum {
  20. WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */
  21. WORK_STRUCT_LINKED_BIT = 1, /* next work is linked to this one */
  22. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  23. WORK_STRUCT_STATIC_BIT = 2, /* static initializer (debugobjects) */
  24. WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */
  25. #else
  26. WORK_STRUCT_COLOR_SHIFT = 2, /* color for workqueue flushing */
  27. #endif
  28. WORK_STRUCT_COLOR_BITS = 4,
  29. WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT,
  30. WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT,
  31. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  32. WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT,
  33. #else
  34. WORK_STRUCT_STATIC = 0,
  35. #endif
  36. /*
  37. * The last color is no color used for works which don't
  38. * participate in workqueue flushing.
  39. */
  40. WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1,
  41. WORK_NO_COLOR = WORK_NR_COLORS,
  42. /*
  43. * Reserve 6 bits off of cwq pointer w/ debugobjects turned
  44. * off. This makes cwqs aligned to 64 bytes which isn't too
  45. * excessive while allowing 15 workqueue flush colors.
  46. */
  47. WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT +
  48. WORK_STRUCT_COLOR_BITS,
  49. WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1,
  50. WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK,
  51. };
  52. struct work_struct {
  53. atomic_long_t data;
  54. struct list_head entry;
  55. work_func_t func;
  56. #ifdef CONFIG_LOCKDEP
  57. struct lockdep_map lockdep_map;
  58. #endif
  59. };
  60. #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0)
  61. #define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_STATIC)
  62. struct delayed_work {
  63. struct work_struct work;
  64. struct timer_list timer;
  65. };
  66. static inline struct delayed_work *to_delayed_work(struct work_struct *work)
  67. {
  68. return container_of(work, struct delayed_work, work);
  69. }
  70. struct execute_work {
  71. struct work_struct work;
  72. };
  73. #ifdef CONFIG_LOCKDEP
  74. /*
  75. * NB: because we have to copy the lockdep_map, setting _key
  76. * here is required, otherwise it could get initialised to the
  77. * copy of the lockdep_map!
  78. */
  79. #define __WORK_INIT_LOCKDEP_MAP(n, k) \
  80. .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k),
  81. #else
  82. #define __WORK_INIT_LOCKDEP_MAP(n, k)
  83. #endif
  84. #define __WORK_INITIALIZER(n, f) { \
  85. .data = WORK_DATA_STATIC_INIT(), \
  86. .entry = { &(n).entry, &(n).entry }, \
  87. .func = (f), \
  88. __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \
  89. }
  90. #define __DELAYED_WORK_INITIALIZER(n, f) { \
  91. .work = __WORK_INITIALIZER((n).work, (f)), \
  92. .timer = TIMER_INITIALIZER(NULL, 0, 0), \
  93. }
  94. #define DECLARE_WORK(n, f) \
  95. struct work_struct n = __WORK_INITIALIZER(n, f)
  96. #define DECLARE_DELAYED_WORK(n, f) \
  97. struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
  98. /*
  99. * initialize a work item's function pointer
  100. */
  101. #define PREPARE_WORK(_work, _func) \
  102. do { \
  103. (_work)->func = (_func); \
  104. } while (0)
  105. #define PREPARE_DELAYED_WORK(_work, _func) \
  106. PREPARE_WORK(&(_work)->work, (_func))
  107. #ifdef CONFIG_DEBUG_OBJECTS_WORK
  108. extern void __init_work(struct work_struct *work, int onstack);
  109. extern void destroy_work_on_stack(struct work_struct *work);
  110. static inline unsigned int work_static(struct work_struct *work)
  111. {
  112. return *work_data_bits(work) & WORK_STRUCT_STATIC;
  113. }
  114. #else
  115. static inline void __init_work(struct work_struct *work, int onstack) { }
  116. static inline void destroy_work_on_stack(struct work_struct *work) { }
  117. static inline unsigned int work_static(struct work_struct *work) { return 0; }
  118. #endif
  119. /*
  120. * initialize all of a work item in one go
  121. *
  122. * NOTE! No point in using "atomic_long_set()": using a direct
  123. * assignment of the work data initializer allows the compiler
  124. * to generate better code.
  125. */
  126. #ifdef CONFIG_LOCKDEP
  127. #define __INIT_WORK(_work, _func, _onstack) \
  128. do { \
  129. static struct lock_class_key __key; \
  130. \
  131. __init_work((_work), _onstack); \
  132. (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
  133. lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\
  134. INIT_LIST_HEAD(&(_work)->entry); \
  135. PREPARE_WORK((_work), (_func)); \
  136. } while (0)
  137. #else
  138. #define __INIT_WORK(_work, _func, _onstack) \
  139. do { \
  140. __init_work((_work), _onstack); \
  141. (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
  142. INIT_LIST_HEAD(&(_work)->entry); \
  143. PREPARE_WORK((_work), (_func)); \
  144. } while (0)
  145. #endif
  146. #define INIT_WORK(_work, _func) \
  147. do { \
  148. __INIT_WORK((_work), (_func), 0); \
  149. } while (0)
  150. #define INIT_WORK_ON_STACK(_work, _func) \
  151. do { \
  152. __INIT_WORK((_work), (_func), 1); \
  153. } while (0)
  154. #define INIT_DELAYED_WORK(_work, _func) \
  155. do { \
  156. INIT_WORK(&(_work)->work, (_func)); \
  157. init_timer(&(_work)->timer); \
  158. } while (0)
  159. #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \
  160. do { \
  161. INIT_WORK_ON_STACK(&(_work)->work, (_func)); \
  162. init_timer_on_stack(&(_work)->timer); \
  163. } while (0)
  164. #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \
  165. do { \
  166. INIT_WORK(&(_work)->work, (_func)); \
  167. init_timer_deferrable(&(_work)->timer); \
  168. } while (0)
  169. /**
  170. * work_pending - Find out whether a work item is currently pending
  171. * @work: The work item in question
  172. */
  173. #define work_pending(work) \
  174. test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
  175. /**
  176. * delayed_work_pending - Find out whether a delayable work item is currently
  177. * pending
  178. * @work: The work item in question
  179. */
  180. #define delayed_work_pending(w) \
  181. work_pending(&(w)->work)
  182. /**
  183. * work_clear_pending - for internal use only, mark a work item as not pending
  184. * @work: The work item in question
  185. */
  186. #define work_clear_pending(work) \
  187. clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))
  188. enum {
  189. WQ_FREEZEABLE = 1 << 0, /* freeze during suspend */
  190. WQ_SINGLE_THREAD = 1 << 1, /* no per-cpu worker */
  191. };
  192. extern struct workqueue_struct *
  193. __create_workqueue_key(const char *name, unsigned int flags, int max_active,
  194. struct lock_class_key *key, const char *lock_name);
  195. #ifdef CONFIG_LOCKDEP
  196. #define __create_workqueue(name, flags, max_active) \
  197. ({ \
  198. static struct lock_class_key __key; \
  199. const char *__lock_name; \
  200. \
  201. if (__builtin_constant_p(name)) \
  202. __lock_name = (name); \
  203. else \
  204. __lock_name = #name; \
  205. \
  206. __create_workqueue_key((name), (flags), (max_active), \
  207. &__key, __lock_name); \
  208. })
  209. #else
  210. #define __create_workqueue(name, flags, max_active) \
  211. __create_workqueue_key((name), (flags), (max_active), NULL, NULL)
  212. #endif
  213. #define create_workqueue(name) \
  214. __create_workqueue((name), 0, 1)
  215. #define create_freezeable_workqueue(name) \
  216. __create_workqueue((name), WQ_FREEZEABLE | WQ_SINGLE_THREAD, 1)
  217. #define create_singlethread_workqueue(name) \
  218. __create_workqueue((name), WQ_SINGLE_THREAD, 1)
  219. extern void destroy_workqueue(struct workqueue_struct *wq);
  220. extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
  221. extern int queue_work_on(int cpu, struct workqueue_struct *wq,
  222. struct work_struct *work);
  223. extern int queue_delayed_work(struct workqueue_struct *wq,
  224. struct delayed_work *work, unsigned long delay);
  225. extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
  226. struct delayed_work *work, unsigned long delay);
  227. extern void flush_workqueue(struct workqueue_struct *wq);
  228. extern void flush_scheduled_work(void);
  229. extern void flush_delayed_work(struct delayed_work *work);
  230. extern int schedule_work(struct work_struct *work);
  231. extern int schedule_work_on(int cpu, struct work_struct *work);
  232. extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay);
  233. extern int schedule_delayed_work_on(int cpu, struct delayed_work *work,
  234. unsigned long delay);
  235. extern int schedule_on_each_cpu(work_func_t func);
  236. extern int current_is_keventd(void);
  237. extern int keventd_up(void);
  238. extern void init_workqueues(void);
  239. int execute_in_process_context(work_func_t fn, struct execute_work *);
  240. extern int flush_work(struct work_struct *work);
  241. extern int cancel_work_sync(struct work_struct *work);
  242. /*
  243. * Kill off a pending schedule_delayed_work(). Note that the work callback
  244. * function may still be running on return from cancel_delayed_work(), unless
  245. * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or
  246. * cancel_work_sync() to wait on it.
  247. */
  248. static inline int cancel_delayed_work(struct delayed_work *work)
  249. {
  250. int ret;
  251. ret = del_timer_sync(&work->timer);
  252. if (ret)
  253. work_clear_pending(&work->work);
  254. return ret;
  255. }
  256. /*
  257. * Like above, but uses del_timer() instead of del_timer_sync(). This means,
  258. * if it returns 0 the timer function may be running and the queueing is in
  259. * progress.
  260. */
  261. static inline int __cancel_delayed_work(struct delayed_work *work)
  262. {
  263. int ret;
  264. ret = del_timer(&work->timer);
  265. if (ret)
  266. work_clear_pending(&work->work);
  267. return ret;
  268. }
  269. extern int cancel_delayed_work_sync(struct delayed_work *work);
  270. /* Obsolete. use cancel_delayed_work_sync() */
  271. static inline
  272. void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
  273. struct delayed_work *work)
  274. {
  275. cancel_delayed_work_sync(work);
  276. }
  277. /* Obsolete. use cancel_delayed_work_sync() */
  278. static inline
  279. void cancel_rearming_delayed_work(struct delayed_work *work)
  280. {
  281. cancel_delayed_work_sync(work);
  282. }
  283. #ifndef CONFIG_SMP
  284. static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
  285. {
  286. return fn(arg);
  287. }
  288. #else
  289. long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
  290. #endif /* CONFIG_SMP */
  291. #endif