|
@@ -74,6 +74,8 @@ static struct clocksource *watchdog;
|
|
|
static struct timer_list watchdog_timer;
|
|
|
static DEFINE_SPINLOCK(watchdog_lock);
|
|
|
static cycle_t watchdog_last;
|
|
|
+static int watchdog_resumed;
|
|
|
+
|
|
|
/*
|
|
|
* Interval: 0.5sec Threshold: 0.0625s
|
|
|
*/
|
|
@@ -98,15 +100,26 @@ static void clocksource_watchdog(unsigned long data)
|
|
|
struct clocksource *cs, *tmp;
|
|
|
cycle_t csnow, wdnow;
|
|
|
int64_t wd_nsec, cs_nsec;
|
|
|
+ int resumed;
|
|
|
|
|
|
spin_lock(&watchdog_lock);
|
|
|
|
|
|
+ resumed = watchdog_resumed;
|
|
|
+ if (unlikely(resumed))
|
|
|
+ watchdog_resumed = 0;
|
|
|
+
|
|
|
wdnow = watchdog->read();
|
|
|
wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
|
|
|
watchdog_last = wdnow;
|
|
|
|
|
|
list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
|
|
|
csnow = cs->read();
|
|
|
+
|
|
|
+ if (unlikely(resumed)) {
|
|
|
+ cs->wd_last = csnow;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
/* Initialized ? */
|
|
|
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
|
|
|
if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
|
|
@@ -136,6 +149,13 @@ static void clocksource_watchdog(unsigned long data)
|
|
|
}
|
|
|
spin_unlock(&watchdog_lock);
|
|
|
}
|
|
|
+static void clocksource_resume_watchdog(void)
|
|
|
+{
|
|
|
+ spin_lock(&watchdog_lock);
|
|
|
+ watchdog_resumed = 1;
|
|
|
+ spin_unlock(&watchdog_lock);
|
|
|
+}
|
|
|
+
|
|
|
static void clocksource_check_watchdog(struct clocksource *cs)
|
|
|
{
|
|
|
struct clocksource *cse;
|
|
@@ -182,8 +202,33 @@ static void clocksource_check_watchdog(struct clocksource *cs)
|
|
|
if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
|
|
|
cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
|
|
|
}
|
|
|
+
|
|
|
+static inline void clocksource_resume_watchdog(void) { }
|
|
|
#endif
|
|
|
|
|
|
+/**
|
|
|
+ * clocksource_resume - resume the clocksource(s)
|
|
|
+ */
|
|
|
+void clocksource_resume(void)
|
|
|
+{
|
|
|
+ struct list_head *tmp;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&clocksource_lock, flags);
|
|
|
+
|
|
|
+ list_for_each(tmp, &clocksource_list) {
|
|
|
+ struct clocksource *cs;
|
|
|
+
|
|
|
+ cs = list_entry(tmp, struct clocksource, list);
|
|
|
+ if (cs->resume)
|
|
|
+ cs->resume();
|
|
|
+ }
|
|
|
+
|
|
|
+ clocksource_resume_watchdog();
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&clocksource_lock, flags);
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* clocksource_get_next - Returns the selected clocksource
|
|
|
*
|