|
@@ -45,7 +45,8 @@ struct clocksource;
|
|
|
* @read: returns a cycle value
|
|
|
* @mask: bitmask for two's complement
|
|
|
* subtraction of non 64 bit counters
|
|
|
- * @mult: cycle to nanosecond multiplier
|
|
|
+ * @mult: cycle to nanosecond multiplier (adjusted by NTP)
|
|
|
+ * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
|
|
|
* @shift: cycle to nanosecond divisor (power of two)
|
|
|
* @flags: flags describing special properties
|
|
|
* @vread: vsyscall based read
|
|
@@ -63,6 +64,7 @@ struct clocksource {
|
|
|
cycle_t (*read)(void);
|
|
|
cycle_t mask;
|
|
|
u32 mult;
|
|
|
+ u32 mult_orig;
|
|
|
u32 shift;
|
|
|
unsigned long flags;
|
|
|
cycle_t (*vread)(void);
|
|
@@ -201,16 +203,17 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
|
|
|
{
|
|
|
u64 tmp;
|
|
|
|
|
|
- /* XXX - All of this could use a whole lot of optimization */
|
|
|
+ /* Do the ns -> cycle conversion first, using original mult */
|
|
|
tmp = length_nsec;
|
|
|
tmp <<= c->shift;
|
|
|
- tmp += c->mult/2;
|
|
|
- do_div(tmp, c->mult);
|
|
|
+ tmp += c->mult_orig/2;
|
|
|
+ do_div(tmp, c->mult_orig);
|
|
|
|
|
|
c->cycle_interval = (cycle_t)tmp;
|
|
|
if (c->cycle_interval == 0)
|
|
|
c->cycle_interval = 1;
|
|
|
|
|
|
+ /* Go back from cycles -> shifted ns, this time use ntp adjused mult */
|
|
|
c->xtime_interval = (u64)c->cycle_interval * c->mult;
|
|
|
}
|
|
|
|