|
@@ -60,12 +60,6 @@ static inline int check_tsc_unstable(void)
|
|
|
return tsc_unstable;
|
|
|
}
|
|
|
|
|
|
-void mark_tsc_unstable(void)
|
|
|
-{
|
|
|
- tsc_unstable = 1;
|
|
|
-}
|
|
|
-EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
|
|
-
|
|
|
/* Accellerators for sched_clock()
|
|
|
* convert from cycles(64bits) => nanoseconds (64bits)
|
|
|
* basic equation:
|
|
@@ -295,7 +289,6 @@ core_initcall(cpufreq_tsc);
|
|
|
/* clock source code */
|
|
|
|
|
|
static unsigned long current_tsc_khz = 0;
|
|
|
-static int tsc_update_callback(void);
|
|
|
|
|
|
static cycle_t read_tsc(void)
|
|
|
{
|
|
@@ -313,37 +306,28 @@ static struct clocksource clocksource_tsc = {
|
|
|
.mask = CLOCKSOURCE_MASK(64),
|
|
|
.mult = 0, /* to be set */
|
|
|
.shift = 22,
|
|
|
- .update_callback = tsc_update_callback,
|
|
|
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
|
|
|
CLOCK_SOURCE_MUST_VERIFY,
|
|
|
};
|
|
|
|
|
|
-static int tsc_update_callback(void)
|
|
|
+void mark_tsc_unstable(void)
|
|
|
{
|
|
|
- int change = 0;
|
|
|
-
|
|
|
- /* check to see if we should switch to the safe clocksource: */
|
|
|
- if (clocksource_tsc.rating != 0 && check_tsc_unstable()) {
|
|
|
- clocksource_change_rating(&clocksource_tsc, 0);
|
|
|
- change = 1;
|
|
|
- }
|
|
|
-
|
|
|
- /* only update if tsc_khz has changed: */
|
|
|
- if (current_tsc_khz != tsc_khz) {
|
|
|
- current_tsc_khz = tsc_khz;
|
|
|
- clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
|
|
|
- clocksource_tsc.shift);
|
|
|
- change = 1;
|
|
|
+ if (!tsc_unstable) {
|
|
|
+ tsc_unstable = 1;
|
|
|
+ /* Can be called before registration */
|
|
|
+ if (clocksource_tsc.mult)
|
|
|
+ clocksource_change_rating(&clocksource_tsc, 0);
|
|
|
+ else
|
|
|
+ clocksource_tsc.rating = 0;
|
|
|
}
|
|
|
-
|
|
|
- return change;
|
|
|
}
|
|
|
+EXPORT_SYMBOL_GPL(mark_tsc_unstable);
|
|
|
|
|
|
static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
|
|
|
{
|
|
|
printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
|
|
|
d->ident);
|
|
|
- mark_tsc_unstable();
|
|
|
+ tsc_unstable = 1;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -415,11 +399,12 @@ __cpuinit int unsynchronized_tsc(void)
|
|
|
* Intel systems are normally all synchronized.
|
|
|
* Exceptions must mark TSC as unstable:
|
|
|
*/
|
|
|
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
|
|
- return 0;
|
|
|
-
|
|
|
- /* assume multi socket systems are not synchronized: */
|
|
|
- return num_possible_cpus() > 1;
|
|
|
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
|
|
|
+ /* assume multi socket systems are not synchronized: */
|
|
|
+ if (num_possible_cpus() > 1)
|
|
|
+ tsc_unstable = 1;
|
|
|
+ }
|
|
|
+ return tsc_unstable;
|
|
|
}
|
|
|
|
|
|
static int __init init_tsc_clocksource(void)
|
|
@@ -429,8 +414,7 @@ static int __init init_tsc_clocksource(void)
|
|
|
/* check blacklist */
|
|
|
dmi_check_system(bad_tsc_dmi_table);
|
|
|
|
|
|
- if (unsynchronized_tsc()) /* mark unstable if unsynced */
|
|
|
- mark_tsc_unstable();
|
|
|
+ unsynchronized_tsc();
|
|
|
current_tsc_khz = tsc_khz;
|
|
|
clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
|
|
|
clocksource_tsc.shift);
|