Эх сурвалжийг харах

[PATCH] X86_64 monotonic_clock goes backwards

I've noticed some erratic behavior while testing the X86_64 version
of monotonic_clock().

While spinning in a loop reading monotonic clock values (pinned to a
single cpu) I noticed that the difference between subsequent values
occasionally went negative (time going backwards).

I found that in the following code:
                this_offset = get_cycles_sync();
                /* FIXME: 1000 or 1000000? */
-->             offset = (this_offset - last_offset)*1000 / cpu_khz;
        }
        return base + offset;

the offset sometimes turns out to be 0, even though
this_offset > last_offset.

+Added fix From: Toyo Abe <toyoa@mvista.com>

The x86_64-mm-monotonic-clock.patch in 2.6.18-rc4-mm2 made a change to
the updating of monotonic_base. It now uses cycles_2_ns().

I suggest that a set_cyc2ns_scale() should be done prior to the setup_irq().
Because cycles_2_ns() can be called from the timer ISR right after the irq0
is enabled.

Signed-off-by: Toyo Abe <toyoa@mvista.com>
Signed-off-by: Dimitri Sivanich <sivanich@sgi.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Dimitri Sivanich 18 жил өмнө
parent
commit
cbf9b4bb76

+ 4 - 7
arch/x86_64/kernel/time.c

@@ -276,6 +276,7 @@ static void set_rtc_mmss(unsigned long nowtime)
  *		Note: This function is required to return accurate
  *		Note: This function is required to return accurate
  *		time even in the absence of multiple timer ticks.
  *		time even in the absence of multiple timer ticks.
  */
  */
+static inline unsigned long long cycles_2_ns(unsigned long long cyc);
 unsigned long long monotonic_clock(void)
 unsigned long long monotonic_clock(void)
 {
 {
 	unsigned long seq;
 	unsigned long seq;
@@ -300,8 +301,7 @@ unsigned long long monotonic_clock(void)
 			base = monotonic_base;
 			base = monotonic_base;
 		} while (read_seqretry(&xtime_lock, seq));
 		} while (read_seqretry(&xtime_lock, seq));
 		this_offset = get_cycles_sync();
 		this_offset = get_cycles_sync();
-		/* FIXME: 1000 or 1000000? */
-		offset = (this_offset - last_offset)*1000 / cpu_khz;
+		offset = cycles_2_ns(this_offset - last_offset);
 	}
 	}
 	return base + offset;
 	return base + offset;
 }
 }
@@ -405,8 +405,7 @@ void main_timer_handler(struct pt_regs *regs)
 			offset %= USEC_PER_TICK;
 			offset %= USEC_PER_TICK;
 		}
 		}
 
 
-		/* FIXME: 1000 or 1000000? */
-		monotonic_base += (tsc - vxtime.last_tsc) * 1000000 / cpu_khz;
+		monotonic_base += cycles_2_ns(tsc - vxtime.last_tsc);
 
 
 		vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
 		vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
 
 
@@ -929,10 +928,8 @@ void __init time_init(void)
 	vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz;
 	vxtime.quot = (USEC_PER_SEC << US_SCALE) / vxtime_hz;
 	vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
 	vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
 	vxtime.last_tsc = get_cycles_sync();
 	vxtime.last_tsc = get_cycles_sync();
-	setup_irq(0, &irq0);
-
 	set_cyc2ns_scale(cpu_khz);
 	set_cyc2ns_scale(cpu_khz);
-
+	setup_irq(0, &irq0);
 	hotcpu_notifier(time_cpu_notifier, 0);
 	hotcpu_notifier(time_cpu_notifier, 0);
 	time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id());
 	time_cpu_notifier(NULL, CPU_ONLINE, (void *)(long)smp_processor_id());