|
@@ -165,13 +165,6 @@ struct timespec raw_time;
|
|
|
/* flag for if timekeeping is suspended */
|
|
|
int __read_mostly timekeeping_suspended;
|
|
|
|
|
|
-static struct timespec xtime_cache __attribute__ ((aligned (16)));
|
|
|
-void update_xtime_cache(u64 nsec)
|
|
|
-{
|
|
|
- xtime_cache = xtime;
|
|
|
- timespec_add_ns(&xtime_cache, nsec);
|
|
|
-}
|
|
|
-
|
|
|
/* must hold xtime_lock */
|
|
|
void timekeeping_leap_insert(int leapsecond)
|
|
|
{
|
|
@@ -332,8 +325,6 @@ int do_settimeofday(struct timespec *tv)
|
|
|
|
|
|
xtime = *tv;
|
|
|
|
|
|
- update_xtime_cache(0);
|
|
|
-
|
|
|
timekeeper.ntp_error = 0;
|
|
|
ntp_clear();
|
|
|
|
|
@@ -559,7 +550,6 @@ void __init timekeeping_init(void)
|
|
|
}
|
|
|
set_normalized_timespec(&wall_to_monotonic,
|
|
|
-boot.tv_sec, -boot.tv_nsec);
|
|
|
- update_xtime_cache(0);
|
|
|
total_sleep_time.tv_sec = 0;
|
|
|
total_sleep_time.tv_nsec = 0;
|
|
|
write_sequnlock_irqrestore(&xtime_lock, flags);
|
|
@@ -593,7 +583,6 @@ static int timekeeping_resume(struct sys_device *dev)
|
|
|
wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
|
|
|
total_sleep_time = timespec_add_safe(total_sleep_time, ts);
|
|
|
}
|
|
|
- update_xtime_cache(0);
|
|
|
/* re-base the last cycle value */
|
|
|
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
|
|
|
timekeeper.ntp_error = 0;
|
|
@@ -788,7 +777,6 @@ void update_wall_time(void)
|
|
|
{
|
|
|
struct clocksource *clock;
|
|
|
cycle_t offset;
|
|
|
- u64 nsecs;
|
|
|
int shift = 0, maxshift;
|
|
|
|
|
|
/* Make sure we're fully resumed: */
|
|
@@ -846,7 +834,9 @@ void update_wall_time(void)
|
|
|
timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
|
|
|
}
|
|
|
|
|
|
- /* store full nanoseconds into xtime after rounding it up and
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Store full nanoseconds into xtime after rounding it up and
|
|
|
* add the remainder to the error difference.
|
|
|
*/
|
|
|
xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
|
|
@@ -854,8 +844,15 @@ void update_wall_time(void)
|
|
|
timekeeper.ntp_error += timekeeper.xtime_nsec <<
|
|
|
timekeeper.ntp_error_shift;
|
|
|
|
|
|
- nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift);
|
|
|
- update_xtime_cache(nsecs);
|
|
|
+ /*
|
|
|
+ * Finally, make sure that after the rounding
|
|
|
+ * xtime.tv_nsec isn't larger then NSEC_PER_SEC
|
|
|
+ */
|
|
|
+ if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
|
|
|
+ xtime.tv_nsec -= NSEC_PER_SEC;
|
|
|
+ xtime.tv_sec++;
|
|
|
+ second_overflow();
|
|
|
+ }
|
|
|
|
|
|
/* check to see if there is a new clocksource to use */
|
|
|
update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult);
|
|
@@ -895,13 +892,13 @@ EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
|
|
|
|
|
|
unsigned long get_seconds(void)
|
|
|
{
|
|
|
- return xtime_cache.tv_sec;
|
|
|
+ return xtime.tv_sec;
|
|
|
}
|
|
|
EXPORT_SYMBOL(get_seconds);
|
|
|
|
|
|
struct timespec __current_kernel_time(void)
|
|
|
{
|
|
|
- return xtime_cache;
|
|
|
+ return xtime;
|
|
|
}
|
|
|
|
|
|
struct timespec current_kernel_time(void)
|
|
@@ -912,7 +909,7 @@ struct timespec current_kernel_time(void)
|
|
|
do {
|
|
|
seq = read_seqbegin(&xtime_lock);
|
|
|
|
|
|
- now = xtime_cache;
|
|
|
+ now = xtime;
|
|
|
} while (read_seqretry(&xtime_lock, seq));
|
|
|
|
|
|
return now;
|
|
@@ -927,7 +924,7 @@ struct timespec get_monotonic_coarse(void)
|
|
|
do {
|
|
|
seq = read_seqbegin(&xtime_lock);
|
|
|
|
|
|
- now = xtime_cache;
|
|
|
+ now = xtime;
|
|
|
mono = wall_to_monotonic;
|
|
|
} while (read_seqretry(&xtime_lock, seq));
|
|
|
|