|
@@ -118,11 +118,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
|
|
return pv_tsc_khz;
|
|
return pv_tsc_khz;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static atomic64_t last_value = ATOMIC64_INIT(0);
|
|
|
|
+
|
|
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
|
cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
|
{
|
|
{
|
|
struct pvclock_shadow_time shadow;
|
|
struct pvclock_shadow_time shadow;
|
|
unsigned version;
|
|
unsigned version;
|
|
cycle_t ret, offset;
|
|
cycle_t ret, offset;
|
|
|
|
+ u64 last;
|
|
|
|
|
|
do {
|
|
do {
|
|
version = pvclock_get_time_values(&shadow, src);
|
|
version = pvclock_get_time_values(&shadow, src);
|
|
@@ -132,6 +135,27 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
|
|
barrier();
|
|
barrier();
|
|
} while (version != src->version);
|
|
} while (version != src->version);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Assumption here is that last_value, a global accumulator, always goes
|
|
|
|
+ * forward. If we are less than that, we should not be much smaller.
|
|
|
|
+ * We assume there is an error marging we're inside, and then the correction
|
|
|
|
+ * does not sacrifice accuracy.
|
|
|
|
+ *
|
|
|
|
+ * For reads: global may have changed between test and return,
|
|
|
|
+ * but this means someone else updated poked the clock at a later time.
|
|
|
|
+ * We just need to make sure we are not seeing a backwards event.
|
|
|
|
+ *
|
|
|
|
+ * For updates: last_value = ret is not enough, since two vcpus could be
|
|
|
|
+ * updating at the same time, and one of them could be slightly behind,
|
|
|
|
+ * making the assumption that last_value always go forward fail to hold.
|
|
|
|
+ */
|
|
|
|
+ last = atomic64_read(&last_value);
|
|
|
|
+ do {
|
|
|
|
+ if (ret < last)
|
|
|
|
+ return last;
|
|
|
|
+ last = atomic64_cmpxchg(&last_value, last, ret);
|
|
|
|
+ } while (unlikely(last != ret));
|
|
|
|
+
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|