|
@@ -401,7 +401,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
|
|
* this is optimized for the most common adjustments of -1,0,1,
|
|
* this is optimized for the most common adjustments of -1,0,1,
|
|
* for other values we can do a bit more work.
|
|
* for other values we can do a bit more work.
|
|
*/
|
|
*/
|
|
-static void clocksource_adjust(struct clocksource *clock, s64 offset)
|
|
|
|
|
|
+static void clocksource_adjust(s64 offset)
|
|
{
|
|
{
|
|
s64 error, interval = clock->cycle_interval;
|
|
s64 error, interval = clock->cycle_interval;
|
|
int adj;
|
|
int adj;
|
|
@@ -476,7 +476,7 @@ void update_wall_time(void)
|
|
}
|
|
}
|
|
|
|
|
|
/* correct the clock when NTP error is too big */
|
|
/* correct the clock when NTP error is too big */
|
|
- clocksource_adjust(clock, offset);
|
|
|
|
|
|
+ clocksource_adjust(offset);
|
|
|
|
|
|
/* store full nanoseconds into xtime */
|
|
/* store full nanoseconds into xtime */
|
|
xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
|
|
xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
|