|
@@ -10,6 +10,7 @@
|
|
|
|
|
|
#include <asm/hpet.h>
|
|
|
#include <asm/timex.h>
|
|
|
+#include <asm/timer.h>
|
|
|
|
|
|
static int notsc __initdata = 0;
|
|
|
|
|
@@ -18,16 +19,48 @@ EXPORT_SYMBOL(cpu_khz);
|
|
|
unsigned int tsc_khz;
|
|
|
EXPORT_SYMBOL(tsc_khz);
|
|
|
|
|
|
-static unsigned int cyc2ns_scale __read_mostly;
|
|
|
+/* Accelerators for sched_clock()
|
|
|
+ * convert from cycles(64bits) => nanoseconds (64bits)
|
|
|
+ * basic equation:
|
|
|
+ * ns = cycles / (freq / ns_per_sec)
|
|
|
+ * ns = cycles * (ns_per_sec / freq)
|
|
|
+ * ns = cycles * (10^9 / (cpu_khz * 10^3))
|
|
|
+ * ns = cycles * (10^6 / cpu_khz)
|
|
|
+ *
|
|
|
+ * Then we use scaling math (suggested by george@mvista.com) to get:
|
|
|
+ * ns = cycles * (10^6 * SC / cpu_khz) / SC
|
|
|
+ * ns = cycles * cyc2ns_scale / SC
|
|
|
+ *
|
|
|
+ * And since SC is a constant power of two, we can convert the div
|
|
|
+ * into a shift.
|
|
|
+ *
|
|
|
+ * We can use khz divisor instead of mhz to keep a better precision, since
|
|
|
+ * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
|
|
|
+ * (mathieu.desnoyers@polymtl.ca)
|
|
|
+ *
|
|
|
+ * -johnstul@us.ibm.com "math is hard, lets go shopping!"
|
|
|
+ */
|
|
|
+DEFINE_PER_CPU(unsigned long, cyc2ns);
|
|
|
|
|
|
-static inline void set_cyc2ns_scale(unsigned long khz)
|
|
|
+static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
|
|
|
{
|
|
|
- cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
|
|
|
-}
|
|
|
+ unsigned long flags, prev_scale, *scale;
|
|
|
+ unsigned long long tsc_now, ns_now;
|
|
|
|
|
|
-static unsigned long long cycles_2_ns(unsigned long long cyc)
|
|
|
-{
|
|
|
- return (cyc * cyc2ns_scale) >> NS_SCALE;
|
|
|
+ local_irq_save(flags);
|
|
|
+ sched_clock_idle_sleep_event();
|
|
|
+
|
|
|
+ scale = &per_cpu(cyc2ns, cpu);
|
|
|
+
|
|
|
+ rdtscll(tsc_now);
|
|
|
+ ns_now = __cycles_2_ns(tsc_now);
|
|
|
+
|
|
|
+ prev_scale = *scale;
|
|
|
+ if (cpu_khz)
|
|
|
+ *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
|
|
|
+
|
|
|
+ sched_clock_idle_wakeup_event(0);
|
|
|
+ local_irq_restore(flags);
|
|
|
}
|
|
|
|
|
|
unsigned long long sched_clock(void)
|
|
@@ -100,7 +133,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
|
|
mark_tsc_unstable("cpufreq changes");
|
|
|
}
|
|
|
|
|
|
- set_cyc2ns_scale(tsc_khz_ref);
|
|
|
+ preempt_disable();
|
|
|
+ set_cyc2ns_scale(tsc_khz_ref, smp_processor_id());
|
|
|
+ preempt_enable();
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -151,7 +186,7 @@ static unsigned long __init tsc_read_refs(unsigned long *pm,
|
|
|
void __init tsc_calibrate(void)
|
|
|
{
|
|
|
unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2;
|
|
|
- int hpet = is_hpet_enabled();
|
|
|
+ int hpet = is_hpet_enabled(), cpu;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
|
@@ -206,7 +241,9 @@ void __init tsc_calibrate(void)
|
|
|
}
|
|
|
|
|
|
tsc_khz = tsc2 / tsc1;
|
|
|
- set_cyc2ns_scale(tsc_khz);
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu)
|
|
|
+ set_cyc2ns_scale(tsc_khz, cpu);
|
|
|
}
|
|
|
|
|
|
/*
|