|
@@ -1062,6 +1062,33 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
|
|
|
return offset;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
|
|
|
+static inline void old_vsyscall_fixup(struct timekeeper *tk)
|
|
|
+{
|
|
|
+ s64 remainder;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Store only full nanoseconds into xtime_nsec after rounding
|
|
|
+ * it up and add the remainder to the error difference.
|
|
|
+ * XXX - This is necessary to avoid small 1ns inconsistnecies caused
|
|
|
+ * by truncating the remainder in vsyscalls. However, it causes
|
|
|
+ * additional work to be done in timekeeping_adjust(). Once
|
|
|
+ * the vsyscall implementations are converted to use xtime_nsec
|
|
|
+ * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
|
|
|
+ * users are removed, this can be killed.
|
|
|
+ */
|
|
|
+ remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
|
|
|
+ tk->xtime_nsec -= remainder;
|
|
|
+ tk->xtime_nsec += 1ULL << tk->shift;
|
|
|
+ tk->ntp_error += remainder << tk->ntp_error_shift;
|
|
|
+
|
|
|
+}
|
|
|
+#else
|
|
|
+#define old_vsyscall_fixup(tk)
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
/**
|
|
|
* update_wall_time - Uses the current clocksource to increment the wall time
|
|
|
*
|
|
@@ -1073,7 +1100,6 @@ static void update_wall_time(void)
|
|
|
cycle_t offset;
|
|
|
int shift = 0, maxshift;
|
|
|
unsigned long flags;
|
|
|
- s64 remainder;
|
|
|
|
|
|
write_seqlock_irqsave(&tk->lock, flags);
|
|
|
|
|
@@ -1115,20 +1141,11 @@ static void update_wall_time(void)
|
|
|
/* correct the clock when NTP error is too big */
|
|
|
timekeeping_adjust(tk, offset);
|
|
|
|
|
|
-
|
|
|
/*
|
|
|
- * Store only full nanoseconds into xtime_nsec after rounding
|
|
|
- * it up and add the remainder to the error difference.
|
|
|
- * XXX - This is necessary to avoid small 1ns inconsistnecies caused
|
|
|
- * by truncating the remainder in vsyscalls. However, it causes
|
|
|
- * additional work to be done in timekeeping_adjust(). Once
|
|
|
- * the vsyscall implementations are converted to use xtime_nsec
|
|
|
- * (shifted nanoseconds), this can be killed.
|
|
|
- */
|
|
|
- remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
|
|
|
- tk->xtime_nsec -= remainder;
|
|
|
- tk->xtime_nsec += 1ULL << tk->shift;
|
|
|
- tk->ntp_error += remainder << tk->ntp_error_shift;
|
|
|
+ * XXX This can be killed once everyone converts
|
|
|
+ * to the new update_vsyscall.
|
|
|
+ */
|
|
|
+ old_vsyscall_fixup(tk);
|
|
|
|
|
|
/*
|
|
|
* Finally, make sure that after the rounding
|