workqueue.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. /*
  2. * workqueue.h --- work queue handling for Linux.
  3. */
  4. #ifndef _LINUX_WORKQUEUE_H
  5. #define _LINUX_WORKQUEUE_H
  6. #include <linux/timer.h>
  7. #include <linux/linkage.h>
  8. #include <linux/bitops.h>
  9. struct workqueue_struct;
  10. struct work_struct;
  11. typedef void (*work_func_t)(struct work_struct *work);
  12. struct work_struct {
  13. /* the first word is the work queue pointer and the flags rolled into
  14. * one */
  15. unsigned long management;
  16. #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
  17. #define WORK_STRUCT_NOAUTOREL 1 /* F if work item automatically released on exec */
  18. #define WORK_STRUCT_FLAG_MASK (3UL)
  19. #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
  20. struct list_head entry;
  21. work_func_t func;
  22. };
  23. struct delayed_work {
  24. struct work_struct work;
  25. struct timer_list timer;
  26. };
  27. struct execute_work {
  28. struct work_struct work;
  29. };
  30. #define __WORK_INITIALIZER(n, f) { \
  31. .management = 0, \
  32. .entry = { &(n).entry, &(n).entry }, \
  33. .func = (f), \
  34. }
  35. #define __WORK_INITIALIZER_NAR(n, f) { \
  36. .management = (1 << WORK_STRUCT_NOAUTOREL), \
  37. .entry = { &(n).entry, &(n).entry }, \
  38. .func = (f), \
  39. }
  40. #define __DELAYED_WORK_INITIALIZER(n, f) { \
  41. .work = __WORK_INITIALIZER((n).work, (f)), \
  42. .timer = TIMER_INITIALIZER(NULL, 0, 0), \
  43. }
  44. #define __DELAYED_WORK_INITIALIZER_NAR(n, f) { \
  45. .work = __WORK_INITIALIZER_NAR((n).work, (f)), \
  46. .timer = TIMER_INITIALIZER(NULL, 0, 0), \
  47. }
  48. #define DECLARE_WORK(n, f) \
  49. struct work_struct n = __WORK_INITIALIZER(n, f)
  50. #define DECLARE_WORK_NAR(n, f) \
  51. struct work_struct n = __WORK_INITIALIZER_NAR(n, f)
  52. #define DECLARE_DELAYED_WORK(n, f) \
  53. struct delayed_work n = __DELAYED_WORK_INITIALIZER(n, f)
  54. #define DECLARE_DELAYED_WORK_NAR(n, f) \
  55. struct dwork_struct n = __DELAYED_WORK_INITIALIZER_NAR(n, f)
  56. /*
  57. * initialize a work item's function pointer
  58. */
  59. #define PREPARE_WORK(_work, _func) \
  60. do { \
  61. (_work)->func = (_func); \
  62. } while (0)
  63. #define PREPARE_DELAYED_WORK(_work, _func) \
  64. PREPARE_WORK(&(_work)->work, (_func))
  65. /*
  66. * initialize all of a work item in one go
  67. */
  68. #define INIT_WORK(_work, _func) \
  69. do { \
  70. (_work)->management = 0; \
  71. INIT_LIST_HEAD(&(_work)->entry); \
  72. PREPARE_WORK((_work), (_func)); \
  73. } while (0)
  74. #define INIT_WORK_NAR(_work, _func) \
  75. do { \
  76. (_work)->management = (1 << WORK_STRUCT_NOAUTOREL); \
  77. INIT_LIST_HEAD(&(_work)->entry); \
  78. PREPARE_WORK((_work), (_func)); \
  79. } while (0)
  80. #define INIT_DELAYED_WORK(_work, _func) \
  81. do { \
  82. INIT_WORK(&(_work)->work, (_func)); \
  83. init_timer(&(_work)->timer); \
  84. } while (0)
  85. #define INIT_DELAYED_WORK_NAR(_work, _func) \
  86. do { \
  87. INIT_WORK_NAR(&(_work)->work, (_func)); \
  88. init_timer(&(_work)->timer); \
  89. } while (0)
  90. /**
  91. * work_pending - Find out whether a work item is currently pending
  92. * @work: The work item in question
  93. */
  94. #define work_pending(work) \
  95. test_bit(WORK_STRUCT_PENDING, &(work)->management)
  96. /**
  97. * delayed_work_pending - Find out whether a delayable work item is currently
  98. * pending
  99. * @work: The work item in question
  100. */
  101. #define delayed_work_pending(work) \
  102. test_bit(WORK_STRUCT_PENDING, &(work)->work.management)
  103. /**
  104. * work_release - Release a work item under execution
  105. * @work: The work item to release
  106. *
  107. * This is used to release a work item that has been initialised with automatic
  108. * release mode disabled (WORK_STRUCT_NOAUTOREL is set). This gives the work
  109. * function the opportunity to grab auxiliary data from the container of the
  110. * work_struct before clearing the pending bit as the work_struct may be
  111. * subject to deallocation the moment the pending bit is cleared.
  112. *
  113. * In such a case, this should be called in the work function after it has
  114. * fetched any data it may require from the containter of the work_struct.
  115. * After this function has been called, the work_struct may be scheduled for
  116. * further execution or it may be deallocated unless other precautions are
  117. * taken.
  118. *
  119. * This should also be used to release a delayed work item.
  120. */
  121. #define work_release(work) \
  122. clear_bit(WORK_STRUCT_PENDING, &(work)->management)
  123. extern struct workqueue_struct *__create_workqueue(const char *name,
  124. int singlethread,
  125. int freezeable);
  126. #define create_workqueue(name) __create_workqueue((name), 0, 0)
  127. #define create_freezeable_workqueue(name) __create_workqueue((name), 0, 1)
  128. #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0)
  129. extern void destroy_workqueue(struct workqueue_struct *wq);
  130. extern int FASTCALL(queue_work(struct workqueue_struct *wq, struct work_struct *work));
  131. extern int FASTCALL(queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work, unsigned long delay));
  132. extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
  133. struct delayed_work *work, unsigned long delay);
  134. extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
  135. extern int FASTCALL(schedule_work(struct work_struct *work));
  136. extern int FASTCALL(run_scheduled_work(struct work_struct *work));
  137. extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
  138. extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
  139. extern int schedule_on_each_cpu(work_func_t func);
  140. extern void flush_scheduled_work(void);
  141. extern int current_is_keventd(void);
  142. extern int keventd_up(void);
  143. extern void init_workqueues(void);
  144. void cancel_rearming_delayed_work(struct delayed_work *work);
  145. void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
  146. struct delayed_work *);
  147. int execute_in_process_context(work_func_t fn, struct execute_work *);
  148. /*
  149. * Kill off a pending schedule_delayed_work(). Note that the work callback
  150. * function may still be running on return from cancel_delayed_work(). Run
  151. * flush_scheduled_work() to wait on it.
  152. */
  153. static inline int cancel_delayed_work(struct delayed_work *work)
  154. {
  155. int ret;
  156. ret = del_timer_sync(&work->timer);
  157. if (ret)
  158. clear_bit(WORK_STRUCT_PENDING, &work->work.management);
  159. return ret;
  160. }
  161. #endif