freezer.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. /* Freezer declarations */
  2. #ifndef FREEZER_H_INCLUDED
  3. #define FREEZER_H_INCLUDED
  4. #include <linux/sched.h>
  5. #include <linux/wait.h>
  6. #include <linux/atomic.h>
  7. #ifdef CONFIG_FREEZER
  8. extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
  9. extern bool pm_freezing; /* PM freezing in effect */
  10. extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
  11. /*
  12. * Timeout for stopping processes
  13. */
  14. extern unsigned int freeze_timeout_msecs;
  15. /*
  16. * Check if a process has been frozen
  17. */
  18. static inline bool frozen(struct task_struct *p)
  19. {
  20. return p->flags & PF_FROZEN;
  21. }
  22. extern bool freezing_slow_path(struct task_struct *p);
  23. /*
  24. * Check if there is a request to freeze a process
  25. */
  26. static inline bool freezing(struct task_struct *p)
  27. {
  28. if (likely(!atomic_read(&system_freezing_cnt)))
  29. return false;
  30. return freezing_slow_path(p);
  31. }
  32. /* Takes and releases task alloc lock using task_lock() */
  33. extern void __thaw_task(struct task_struct *t);
  34. extern bool __refrigerator(bool check_kthr_stop);
  35. extern int freeze_processes(void);
  36. extern int freeze_kernel_threads(void);
  37. extern void thaw_processes(void);
  38. extern void thaw_kernel_threads(void);
  39. /*
  40. * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
  41. * If try_to_freeze causes a lockdep warning it means the caller may deadlock
  42. */
  43. static inline bool try_to_freeze_unsafe(void)
  44. {
  45. might_sleep();
  46. if (likely(!freezing(current)))
  47. return false;
  48. return __refrigerator(false);
  49. }
  50. static inline bool try_to_freeze(void)
  51. {
  52. return try_to_freeze_unsafe();
  53. }
  54. extern bool freeze_task(struct task_struct *p);
  55. extern bool set_freezable(void);
  56. #ifdef CONFIG_CGROUP_FREEZER
  57. extern bool cgroup_freezing(struct task_struct *task);
  58. #else /* !CONFIG_CGROUP_FREEZER */
  59. static inline bool cgroup_freezing(struct task_struct *task)
  60. {
  61. return false;
  62. }
  63. #endif /* !CONFIG_CGROUP_FREEZER */
  64. /*
  65. * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
  66. * calls wait_for_completion(&vfork) and reset right after it returns from this
  67. * function. Next, the parent should call try_to_freeze() to freeze itself
  68. * appropriately in case the child has exited before the freezing of tasks is
  69. * complete. However, we don't want kernel threads to be frozen in unexpected
  70. * places, so we allow them to block freeze_processes() instead or to set
  71. * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
  72. * parent won't really block freeze_processes(), since ____call_usermodehelper()
  73. * (the child) does a little before exec/exit and it can't be frozen before
  74. * waking up the parent.
  75. */
  76. /**
  77. * freezer_do_not_count - tell freezer to ignore %current
  78. *
  79. * Tell freezers to ignore the current task when determining whether the
  80. * target frozen state is reached. IOW, the current task will be
  81. * considered frozen enough by freezers.
  82. *
  83. * The caller shouldn't do anything which isn't allowed for a frozen task
  84. * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
  85. * wrap a scheduling operation and nothing much else.
  86. */
  87. static inline void freezer_do_not_count(void)
  88. {
  89. current->flags |= PF_FREEZER_SKIP;
  90. }
  91. /**
  92. * freezer_count - tell freezer to stop ignoring %current
  93. *
  94. * Undo freezer_do_not_count(). It tells freezers that %current should be
  95. * considered again and tries to freeze if freezing condition is already in
  96. * effect.
  97. */
  98. static inline void freezer_count(void)
  99. {
  100. current->flags &= ~PF_FREEZER_SKIP;
  101. /*
  102. * If freezing is in progress, the following paired with smp_mb()
  103. * in freezer_should_skip() ensures that either we see %true
  104. * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
  105. */
  106. smp_mb();
  107. try_to_freeze();
  108. }
  109. /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
  110. static inline void freezer_count_unsafe(void)
  111. {
  112. current->flags &= ~PF_FREEZER_SKIP;
  113. smp_mb();
  114. try_to_freeze_unsafe();
  115. }
  116. /**
  117. * freezer_should_skip - whether to skip a task when determining frozen
  118. * state is reached
  119. * @p: task in quesion
  120. *
  121. * This function is used by freezers after establishing %true freezing() to
  122. * test whether a task should be skipped when determining the target frozen
  123. * state is reached. IOW, if this function returns %true, @p is considered
  124. * frozen enough.
  125. */
  126. static inline bool freezer_should_skip(struct task_struct *p)
  127. {
  128. /*
  129. * The following smp_mb() paired with the one in freezer_count()
  130. * ensures that either freezer_count() sees %true freezing() or we
  131. * see cleared %PF_FREEZER_SKIP and return %false. This makes it
  132. * impossible for a task to slip frozen state testing after
  133. * clearing %PF_FREEZER_SKIP.
  134. */
  135. smp_mb();
  136. return p->flags & PF_FREEZER_SKIP;
  137. }
  138. /*
  139. * These macros are intended to be used whenever you want allow a sleeping
  140. * task to be frozen. Note that neither return any clear indication of
  141. * whether a freeze event happened while in this function.
  142. */
  143. /* Like schedule(), but should not block the freezer. */
  144. #define freezable_schedule() \
  145. ({ \
  146. freezer_do_not_count(); \
  147. schedule(); \
  148. freezer_count(); \
  149. })
  150. /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
  151. #define freezable_schedule_unsafe() \
  152. ({ \
  153. freezer_do_not_count(); \
  154. schedule(); \
  155. freezer_count_unsafe(); \
  156. })
  157. /* Like schedule_timeout_killable(), but should not block the freezer. */
  158. #define freezable_schedule_timeout_killable(timeout) \
  159. ({ \
  160. long __retval; \
  161. freezer_do_not_count(); \
  162. __retval = schedule_timeout_killable(timeout); \
  163. freezer_count(); \
  164. __retval; \
  165. })
  166. /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
  167. #define freezable_schedule_timeout_killable_unsafe(timeout) \
  168. ({ \
  169. long __retval; \
  170. freezer_do_not_count(); \
  171. __retval = schedule_timeout_killable(timeout); \
  172. freezer_count_unsafe(); \
  173. __retval; \
  174. })
  175. /*
  176. * Freezer-friendly wrappers around wait_event_interruptible(),
  177. * wait_event_killable() and wait_event_interruptible_timeout(), originally
  178. * defined in <linux/wait.h>
  179. */
  180. #define wait_event_freezekillable(wq, condition) \
  181. ({ \
  182. int __retval; \
  183. freezer_do_not_count(); \
  184. __retval = wait_event_killable(wq, (condition)); \
  185. freezer_count(); \
  186. __retval; \
  187. })
  188. #define wait_event_freezable(wq, condition) \
  189. ({ \
  190. int __retval; \
  191. for (;;) { \
  192. __retval = wait_event_interruptible(wq, \
  193. (condition) || freezing(current)); \
  194. if (__retval || (condition)) \
  195. break; \
  196. try_to_freeze(); \
  197. } \
  198. __retval; \
  199. })
  200. #define wait_event_freezable_timeout(wq, condition, timeout) \
  201. ({ \
  202. long __retval = timeout; \
  203. for (;;) { \
  204. __retval = wait_event_interruptible_timeout(wq, \
  205. (condition) || freezing(current), \
  206. __retval); \
  207. if (__retval <= 0 || (condition)) \
  208. break; \
  209. try_to_freeze(); \
  210. } \
  211. __retval; \
  212. })
  213. #else /* !CONFIG_FREEZER */
  214. static inline bool frozen(struct task_struct *p) { return false; }
  215. static inline bool freezing(struct task_struct *p) { return false; }
  216. static inline void __thaw_task(struct task_struct *t) {}
  217. static inline bool __refrigerator(bool check_kthr_stop) { return false; }
  218. static inline int freeze_processes(void) { return -ENOSYS; }
  219. static inline int freeze_kernel_threads(void) { return -ENOSYS; }
  220. static inline void thaw_processes(void) {}
  221. static inline void thaw_kernel_threads(void) {}
  222. static inline bool try_to_freeze_nowarn(void) { return false; }
  223. static inline bool try_to_freeze(void) { return false; }
  224. static inline void freezer_do_not_count(void) {}
  225. static inline void freezer_count(void) {}
  226. static inline int freezer_should_skip(struct task_struct *p) { return 0; }
  227. static inline void set_freezable(void) {}
  228. #define freezable_schedule() schedule()
  229. #define freezable_schedule_unsafe() schedule()
  230. #define freezable_schedule_timeout_killable(timeout) \
  231. schedule_timeout_killable(timeout)
  232. #define freezable_schedule_timeout_killable_unsafe(timeout) \
  233. schedule_timeout_killable(timeout)
  234. #define wait_event_freezable(wq, condition) \
  235. wait_event_interruptible(wq, condition)
  236. #define wait_event_freezable_timeout(wq, condition, timeout) \
  237. wait_event_interruptible_timeout(wq, condition, timeout)
  238. #define wait_event_freezekillable(wq, condition) \
  239. wait_event_killable(wq, condition)
  240. #endif /* !CONFIG_FREEZER */
  241. #endif /* FREEZER_H_INCLUDED */