freezer.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /* Freezer declarations */
  2. #ifndef FREEZER_H_INCLUDED
  3. #define FREEZER_H_INCLUDED
  4. #include <linux/debug_locks.h>
  5. #include <linux/sched.h>
  6. #include <linux/wait.h>
  7. #include <linux/atomic.h>
  8. #ifdef CONFIG_FREEZER
  9. extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
  10. extern bool pm_freezing; /* PM freezing in effect */
  11. extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
  12. /*
  13. * Timeout for stopping processes
  14. */
  15. extern unsigned int freeze_timeout_msecs;
  16. /*
  17. * Check if a process has been frozen
  18. */
  19. static inline bool frozen(struct task_struct *p)
  20. {
  21. return p->flags & PF_FROZEN;
  22. }
  23. extern bool freezing_slow_path(struct task_struct *p);
  24. /*
  25. * Check if there is a request to freeze a process
  26. */
  27. static inline bool freezing(struct task_struct *p)
  28. {
  29. if (likely(!atomic_read(&system_freezing_cnt)))
  30. return false;
  31. return freezing_slow_path(p);
  32. }
  33. /* Takes and releases task alloc lock using task_lock() */
  34. extern void __thaw_task(struct task_struct *t);
  35. extern bool __refrigerator(bool check_kthr_stop);
  36. extern int freeze_processes(void);
  37. extern int freeze_kernel_threads(void);
  38. extern void thaw_processes(void);
  39. extern void thaw_kernel_threads(void);
  40. static inline bool try_to_freeze(void)
  41. {
  42. if (!(current->flags & PF_NOFREEZE))
  43. debug_check_no_locks_held();
  44. might_sleep();
  45. if (likely(!freezing(current)))
  46. return false;
  47. return __refrigerator(false);
  48. }
  49. extern bool freeze_task(struct task_struct *p);
  50. extern bool set_freezable(void);
  51. #ifdef CONFIG_CGROUP_FREEZER
  52. extern bool cgroup_freezing(struct task_struct *task);
  53. #else /* !CONFIG_CGROUP_FREEZER */
  54. static inline bool cgroup_freezing(struct task_struct *task)
  55. {
  56. return false;
  57. }
  58. #endif /* !CONFIG_CGROUP_FREEZER */
  59. /*
  60. * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
  61. * calls wait_for_completion(&vfork) and reset right after it returns from this
  62. * function. Next, the parent should call try_to_freeze() to freeze itself
  63. * appropriately in case the child has exited before the freezing of tasks is
  64. * complete. However, we don't want kernel threads to be frozen in unexpected
  65. * places, so we allow them to block freeze_processes() instead or to set
  66. * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
  67. * parent won't really block freeze_processes(), since ____call_usermodehelper()
  68. * (the child) does a little before exec/exit and it can't be frozen before
  69. * waking up the parent.
  70. */
  71. /**
  72. * freezer_do_not_count - tell freezer to ignore %current
  73. *
  74. * Tell freezers to ignore the current task when determining whether the
  75. * target frozen state is reached. IOW, the current task will be
  76. * considered frozen enough by freezers.
  77. *
  78. * The caller shouldn't do anything which isn't allowed for a frozen task
  79. * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
  80. * wrap a scheduling operation and nothing much else.
  81. */
  82. static inline void freezer_do_not_count(void)
  83. {
  84. current->flags |= PF_FREEZER_SKIP;
  85. }
  86. /**
  87. * freezer_count - tell freezer to stop ignoring %current
  88. *
  89. * Undo freezer_do_not_count(). It tells freezers that %current should be
  90. * considered again and tries to freeze if freezing condition is already in
  91. * effect.
  92. */
  93. static inline void freezer_count(void)
  94. {
  95. current->flags &= ~PF_FREEZER_SKIP;
  96. /*
  97. * If freezing is in progress, the following paired with smp_mb()
  98. * in freezer_should_skip() ensures that either we see %true
  99. * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
  100. */
  101. smp_mb();
  102. try_to_freeze();
  103. }
  104. /**
  105. * freezer_should_skip - whether to skip a task when determining frozen
  106. * state is reached
  107. * @p: task in quesion
  108. *
  109. * This function is used by freezers after establishing %true freezing() to
  110. * test whether a task should be skipped when determining the target frozen
  111. * state is reached. IOW, if this function returns %true, @p is considered
  112. * frozen enough.
  113. */
  114. static inline bool freezer_should_skip(struct task_struct *p)
  115. {
  116. /*
  117. * The following smp_mb() paired with the one in freezer_count()
  118. * ensures that either freezer_count() sees %true freezing() or we
  119. * see cleared %PF_FREEZER_SKIP and return %false. This makes it
  120. * impossible for a task to slip frozen state testing after
  121. * clearing %PF_FREEZER_SKIP.
  122. */
  123. smp_mb();
  124. return p->flags & PF_FREEZER_SKIP;
  125. }
  126. /*
  127. * These macros are intended to be used whenever you want allow a sleeping
  128. * task to be frozen. Note that neither return any clear indication of
  129. * whether a freeze event happened while in this function.
  130. */
  131. /* Like schedule(), but should not block the freezer. */
  132. #define freezable_schedule() \
  133. ({ \
  134. freezer_do_not_count(); \
  135. schedule(); \
  136. freezer_count(); \
  137. })
  138. /* Like schedule_timeout_killable(), but should not block the freezer. */
  139. #define freezable_schedule_timeout_killable(timeout) \
  140. ({ \
  141. long __retval; \
  142. freezer_do_not_count(); \
  143. __retval = schedule_timeout_killable(timeout); \
  144. freezer_count(); \
  145. __retval; \
  146. })
  147. /*
  148. * Freezer-friendly wrappers around wait_event_interruptible(),
  149. * wait_event_killable() and wait_event_interruptible_timeout(), originally
  150. * defined in <linux/wait.h>
  151. */
  152. #define wait_event_freezekillable(wq, condition) \
  153. ({ \
  154. int __retval; \
  155. freezer_do_not_count(); \
  156. __retval = wait_event_killable(wq, (condition)); \
  157. freezer_count(); \
  158. __retval; \
  159. })
  160. #define wait_event_freezable(wq, condition) \
  161. ({ \
  162. int __retval; \
  163. for (;;) { \
  164. __retval = wait_event_interruptible(wq, \
  165. (condition) || freezing(current)); \
  166. if (__retval || (condition)) \
  167. break; \
  168. try_to_freeze(); \
  169. } \
  170. __retval; \
  171. })
  172. #define wait_event_freezable_timeout(wq, condition, timeout) \
  173. ({ \
  174. long __retval = timeout; \
  175. for (;;) { \
  176. __retval = wait_event_interruptible_timeout(wq, \
  177. (condition) || freezing(current), \
  178. __retval); \
  179. if (__retval <= 0 || (condition)) \
  180. break; \
  181. try_to_freeze(); \
  182. } \
  183. __retval; \
  184. })
  185. #else /* !CONFIG_FREEZER */
  186. static inline bool frozen(struct task_struct *p) { return false; }
  187. static inline bool freezing(struct task_struct *p) { return false; }
  188. static inline void __thaw_task(struct task_struct *t) {}
  189. static inline bool __refrigerator(bool check_kthr_stop) { return false; }
  190. static inline int freeze_processes(void) { return -ENOSYS; }
  191. static inline int freeze_kernel_threads(void) { return -ENOSYS; }
  192. static inline void thaw_processes(void) {}
  193. static inline void thaw_kernel_threads(void) {}
  194. static inline bool try_to_freeze_nowarn(void) { return false; }
  195. static inline bool try_to_freeze(void) { return false; }
  196. static inline void freezer_do_not_count(void) {}
  197. static inline void freezer_count(void) {}
  198. static inline int freezer_should_skip(struct task_struct *p) { return 0; }
  199. static inline void set_freezable(void) {}
  200. #define freezable_schedule() schedule()
  201. #define freezable_schedule_timeout_killable(timeout) \
  202. schedule_timeout_killable(timeout)
  203. #define wait_event_freezable(wq, condition) \
  204. wait_event_interruptible(wq, condition)
  205. #define wait_event_freezable_timeout(wq, condition, timeout) \
  206. wait_event_interruptible_timeout(wq, condition, timeout)
  207. #define wait_event_freezekillable(wq, condition) \
  208. wait_event_killable(wq, condition)
  209. #endif /* !CONFIG_FREEZER */
  210. #endif /* FREEZER_H_INCLUDED */