|
@@ -24,12 +24,26 @@
|
|
|
*/
|
|
|
bool events_check_enabled;
|
|
|
|
|
|
-/* The counter of registered wakeup events. */
|
|
|
-static atomic_t event_count = ATOMIC_INIT(0);
|
|
|
-/* A preserved old value of event_count. */
|
|
|
+/*
|
|
|
+ * Combined counters of registered wakeup events and wakeup events in progress.
|
|
|
+ * They need to be modified together atomically, so it's better to use one
|
|
|
+ * atomic variable to hold them both.
|
|
|
+ */
|
|
|
+static atomic_t combined_event_count = ATOMIC_INIT(0);
|
|
|
+
|
|
|
+#define IN_PROGRESS_BITS (sizeof(int) * 4)
|
|
|
+#define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
|
|
|
+
|
|
|
+static void split_counters(unsigned int *cnt, unsigned int *inpr)
|
|
|
+{
|
|
|
+ unsigned int comb = atomic_read(&combined_event_count);
|
|
|
+
|
|
|
+ *cnt = (comb >> IN_PROGRESS_BITS);
|
|
|
+ *inpr = comb & MAX_IN_PROGRESS;
|
|
|
+}
|
|
|
+
|
|
|
+/* A preserved old value of the events counter. */
|
|
|
static unsigned int saved_count;
|
|
|
-/* The counter of wakeup events being processed. */
|
|
|
-static atomic_t events_in_progress = ATOMIC_INIT(0);
|
|
|
|
|
|
static DEFINE_SPINLOCK(events_lock);
|
|
|
|
|
@@ -307,7 +321,8 @@ static void wakeup_source_activate(struct wakeup_source *ws)
|
|
|
ws->timer_expires = jiffies;
|
|
|
ws->last_time = ktime_get();
|
|
|
|
|
|
- atomic_inc(&events_in_progress);
|
|
|
+ /* Increment the counter of events in progress. */
|
|
|
+ atomic_inc(&combined_event_count);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -394,14 +409,10 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
|
|
|
del_timer(&ws->timer);
|
|
|
|
|
|
/*
|
|
|
- * event_count has to be incremented before events_in_progress is
|
|
|
- * modified, so that the callers of pm_check_wakeup_events() and
|
|
|
- * pm_save_wakeup_count() don't see the old value of event_count and
|
|
|
- * events_in_progress equal to zero at the same time.
|
|
|
+ * Increment the counter of registered wakeup events and decrement the
|
|
|
+ * couter of wakeup events in progress simultaneously.
|
|
|
*/
|
|
|
- atomic_inc(&event_count);
|
|
|
- smp_mb__before_atomic_dec();
|
|
|
- atomic_dec(&events_in_progress);
|
|
|
+ atomic_add(MAX_IN_PROGRESS, &combined_event_count);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -556,8 +567,10 @@ bool pm_wakeup_pending(void)
|
|
|
|
|
|
spin_lock_irqsave(&events_lock, flags);
|
|
|
if (events_check_enabled) {
|
|
|
- ret = ((unsigned int)atomic_read(&event_count) != saved_count)
|
|
|
- || atomic_read(&events_in_progress);
|
|
|
+ unsigned int cnt, inpr;
|
|
|
+
|
|
|
+ split_counters(&cnt, &inpr);
|
|
|
+ ret = (cnt != saved_count || inpr > 0);
|
|
|
events_check_enabled = !ret;
|
|
|
}
|
|
|
spin_unlock_irqrestore(&events_lock, flags);
|
|
@@ -579,19 +592,22 @@ bool pm_wakeup_pending(void)
|
|
|
*/
|
|
|
bool pm_get_wakeup_count(unsigned int *count)
|
|
|
{
|
|
|
- bool ret;
|
|
|
+ unsigned int cnt, inpr;
|
|
|
|
|
|
if (capable(CAP_SYS_ADMIN))
|
|
|
events_check_enabled = false;
|
|
|
|
|
|
- while (atomic_read(&events_in_progress) && !signal_pending(current)) {
|
|
|
+ for (;;) {
|
|
|
+ split_counters(&cnt, &inpr);
|
|
|
+ if (inpr == 0 || signal_pending(current))
|
|
|
+ break;
|
|
|
pm_wakeup_update_hit_counts();
|
|
|
schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
|
|
|
}
|
|
|
|
|
|
- ret = !atomic_read(&events_in_progress);
|
|
|
- *count = atomic_read(&event_count);
|
|
|
- return ret;
|
|
|
+ split_counters(&cnt, &inpr);
|
|
|
+ *count = cnt;
|
|
|
+ return !inpr;
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -605,11 +621,12 @@ bool pm_get_wakeup_count(unsigned int *count)
|
|
|
*/
|
|
|
bool pm_save_wakeup_count(unsigned int count)
|
|
|
{
|
|
|
+ unsigned int cnt, inpr;
|
|
|
bool ret = false;
|
|
|
|
|
|
spin_lock_irq(&events_lock);
|
|
|
- if (count == (unsigned int)atomic_read(&event_count)
|
|
|
- && !atomic_read(&events_in_progress)) {
|
|
|
+ split_counters(&cnt, &inpr);
|
|
|
+ if (cnt == count && inpr == 0) {
|
|
|
saved_count = count;
|
|
|
events_check_enabled = true;
|
|
|
ret = true;
|