wakeup.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * drivers/base/power/wakeup.c - System wakeup events framework
  3. *
  4. * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
  5. *
  6. * This file is released under the GPLv2.
  7. */
  8. #include <linux/device.h>
  9. #include <linux/slab.h>
  10. #include <linux/sched.h>
  11. #include <linux/capability.h>
  12. #include <linux/suspend.h>
  13. #include <linux/pm.h>
  14. /*
  15. * If set, the suspend/hibernate code will abort transitions to a sleep state
  16. * if wakeup events are registered during or immediately before the transition.
  17. */
  18. bool events_check_enabled;
  19. /* The counter of registered wakeup events. */
  20. static unsigned long event_count;
  21. /* A preserved old value of event_count. */
  22. static unsigned long saved_event_count;
  23. /* The counter of wakeup events being processed. */
  24. static unsigned long events_in_progress;
  25. static DEFINE_SPINLOCK(events_lock);
  26. static void pm_wakeup_timer_fn(unsigned long data);
  27. static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0);
  28. static unsigned long events_timer_expires;
  29. /*
  30. * The functions below use the observation that each wakeup event starts a
  31. * period in which the system should not be suspended. The moment this period
  32. * will end depends on how the wakeup event is going to be processed after being
  33. * detected and all of the possible cases can be divided into two distinct
  34. * groups.
  35. *
  36. * First, a wakeup event may be detected by the same functional unit that will
  37. * carry out the entire processing of it and possibly will pass it to user space
  38. * for further processing. In that case the functional unit that has detected
  39. * the event may later "close" the "no suspend" period associated with it
  40. * directly as soon as it has been dealt with. The pair of pm_stay_awake() and
  41. * pm_relax(), balanced with each other, is supposed to be used in such
  42. * situations.
  43. *
  44. * Second, a wakeup event may be detected by one functional unit and processed
  45. * by another one. In that case the unit that has detected it cannot really
  46. * "close" the "no suspend" period associated with it, unless it knows in
  47. * advance what's going to happen to the event during processing. This
  48. * knowledge, however, may not be available to it, so it can simply specify time
  49. * to wait before the system can be suspended and pass it as the second
  50. * argument of pm_wakeup_event().
  51. */
  52. /**
  53. * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
  54. * @dev: Device the wakeup event is related to.
  55. *
  56. * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the
  57. * counter of wakeup events being processed. If @dev is not NULL, the counter
  58. * of wakeup events related to @dev is incremented too.
  59. *
  60. * Call this function after detecting of a wakeup event if pm_relax() is going
  61. * to be called directly after processing the event (and possibly passing it to
  62. * user space for further processing).
  63. *
  64. * It is safe to call this function from interrupt context.
  65. */
  66. void pm_stay_awake(struct device *dev)
  67. {
  68. unsigned long flags;
  69. spin_lock_irqsave(&events_lock, flags);
  70. if (dev)
  71. dev->power.wakeup_count++;
  72. events_in_progress++;
  73. spin_unlock_irqrestore(&events_lock, flags);
  74. }
  75. /**
  76. * pm_relax - Notify the PM core that processing of a wakeup event has ended.
  77. *
  78. * Notify the PM core that a wakeup event has been processed by decrementing
  79. * the counter of wakeup events being processed and incrementing the counter
  80. * of registered wakeup events.
  81. *
  82. * Call this function for wakeup events whose processing started with calling
  83. * pm_stay_awake().
  84. *
  85. * It is safe to call it from interrupt context.
  86. */
  87. void pm_relax(void)
  88. {
  89. unsigned long flags;
  90. spin_lock_irqsave(&events_lock, flags);
  91. if (events_in_progress) {
  92. events_in_progress--;
  93. event_count++;
  94. }
  95. spin_unlock_irqrestore(&events_lock, flags);
  96. }
  97. /**
  98. * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
  99. *
  100. * Decrease the counter of wakeup events being processed after it was increased
  101. * by pm_wakeup_event().
  102. */
  103. static void pm_wakeup_timer_fn(unsigned long data)
  104. {
  105. unsigned long flags;
  106. spin_lock_irqsave(&events_lock, flags);
  107. if (events_timer_expires
  108. && time_before_eq(events_timer_expires, jiffies)) {
  109. events_in_progress--;
  110. events_timer_expires = 0;
  111. }
  112. spin_unlock_irqrestore(&events_lock, flags);
  113. }
  114. /**
  115. * pm_wakeup_event - Notify the PM core of a wakeup event.
  116. * @dev: Device the wakeup event is related to.
  117. * @msec: Anticipated event processing time (in milliseconds).
  118. *
  119. * Notify the PM core of a wakeup event (signaled by @dev) that will take
  120. * approximately @msec milliseconds to be processed by the kernel. Increment
  121. * the counter of registered wakeup events and (if @msec is nonzero) set up
  122. * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the
  123. * timer has not been set up already, increment the counter of wakeup events
  124. * being processed). If @dev is not NULL, the counter of wakeup events related
  125. * to @dev is incremented too.
  126. *
  127. * It is safe to call this function from interrupt context.
  128. */
  129. void pm_wakeup_event(struct device *dev, unsigned int msec)
  130. {
  131. unsigned long flags;
  132. spin_lock_irqsave(&events_lock, flags);
  133. event_count++;
  134. if (dev)
  135. dev->power.wakeup_count++;
  136. if (msec) {
  137. unsigned long expires;
  138. expires = jiffies + msecs_to_jiffies(msec);
  139. if (!expires)
  140. expires = 1;
  141. if (!events_timer_expires
  142. || time_after(expires, events_timer_expires)) {
  143. if (!events_timer_expires)
  144. events_in_progress++;
  145. mod_timer(&events_timer, expires);
  146. events_timer_expires = expires;
  147. }
  148. }
  149. spin_unlock_irqrestore(&events_lock, flags);
  150. }
  151. /**
  152. * pm_check_wakeup_events - Check for new wakeup events.
  153. *
  154. * Compare the current number of registered wakeup events with its preserved
  155. * value from the past to check if new wakeup events have been registered since
  156. * the old value was stored. Check if the current number of wakeup events being
  157. * processed is zero.
  158. */
  159. bool pm_check_wakeup_events(void)
  160. {
  161. unsigned long flags;
  162. bool ret = true;
  163. spin_lock_irqsave(&events_lock, flags);
  164. if (events_check_enabled) {
  165. ret = (event_count == saved_event_count) && !events_in_progress;
  166. events_check_enabled = ret;
  167. }
  168. spin_unlock_irqrestore(&events_lock, flags);
  169. return ret;
  170. }
  171. /**
  172. * pm_get_wakeup_count - Read the number of registered wakeup events.
  173. * @count: Address to store the value at.
  174. *
  175. * Store the number of registered wakeup events at the address in @count. Block
  176. * if the current number of wakeup events being processed is nonzero.
  177. *
  178. * Return false if the wait for the number of wakeup events being processed to
  179. * drop down to zero has been interrupted by a signal (and the current number
  180. * of wakeup events being processed is still nonzero). Otherwise return true.
  181. */
  182. bool pm_get_wakeup_count(unsigned long *count)
  183. {
  184. bool ret;
  185. spin_lock_irq(&events_lock);
  186. if (capable(CAP_SYS_ADMIN))
  187. events_check_enabled = false;
  188. while (events_in_progress && !signal_pending(current)) {
  189. spin_unlock_irq(&events_lock);
  190. schedule_timeout_interruptible(msecs_to_jiffies(100));
  191. spin_lock_irq(&events_lock);
  192. }
  193. *count = event_count;
  194. ret = !events_in_progress;
  195. spin_unlock_irq(&events_lock);
  196. return ret;
  197. }
  198. /**
  199. * pm_save_wakeup_count - Save the current number of registered wakeup events.
  200. * @count: Value to compare with the current number of registered wakeup events.
  201. *
  202. * If @count is equal to the current number of registered wakeup events and the
  203. * current number of wakeup events being processed is zero, store @count as the
  204. * old number of registered wakeup events to be used by pm_check_wakeup_events()
  205. * and return true. Otherwise return false.
  206. */
  207. bool pm_save_wakeup_count(unsigned long count)
  208. {
  209. bool ret = false;
  210. spin_lock_irq(&events_lock);
  211. if (count == event_count && !events_in_progress) {
  212. saved_event_count = count;
  213. events_check_enabled = true;
  214. ret = true;
  215. }
  216. spin_unlock_irq(&events_lock);
  217. return ret;
  218. }