tsc_64.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. #include <linux/kernel.h>
  2. #include <linux/sched.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/init.h>
  5. #include <linux/clocksource.h>
  6. #include <linux/time.h>
  7. #include <linux/acpi.h>
  8. #include <linux/cpufreq.h>
  9. #include <linux/acpi_pmtmr.h>
  10. #include <asm/hpet.h>
  11. #include <asm/timex.h>
  12. #include <asm/timer.h>
  13. static int notsc __initdata = 0;
  14. unsigned int cpu_khz; /* TSC clocks / usec, not used here */
  15. EXPORT_SYMBOL(cpu_khz);
  16. unsigned int tsc_khz;
  17. EXPORT_SYMBOL(tsc_khz);
  18. /* Accelerators for sched_clock()
  19. * convert from cycles(64bits) => nanoseconds (64bits)
  20. * basic equation:
  21. * ns = cycles / (freq / ns_per_sec)
  22. * ns = cycles * (ns_per_sec / freq)
  23. * ns = cycles * (10^9 / (cpu_khz * 10^3))
  24. * ns = cycles * (10^6 / cpu_khz)
  25. *
  26. * Then we use scaling math (suggested by george@mvista.com) to get:
  27. * ns = cycles * (10^6 * SC / cpu_khz) / SC
  28. * ns = cycles * cyc2ns_scale / SC
  29. *
  30. * And since SC is a constant power of two, we can convert the div
  31. * into a shift.
  32. *
  33. * We can use khz divisor instead of mhz to keep a better precision, since
  34. * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
  35. * (mathieu.desnoyers@polymtl.ca)
  36. *
  37. * -johnstul@us.ibm.com "math is hard, lets go shopping!"
  38. */
  39. DEFINE_PER_CPU(unsigned long, cyc2ns);
  40. static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
  41. {
  42. unsigned long flags, prev_scale, *scale;
  43. unsigned long long tsc_now, ns_now;
  44. local_irq_save(flags);
  45. sched_clock_idle_sleep_event();
  46. scale = &per_cpu(cyc2ns, cpu);
  47. rdtscll(tsc_now);
  48. ns_now = __cycles_2_ns(tsc_now);
  49. prev_scale = *scale;
  50. if (cpu_khz)
  51. *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
  52. sched_clock_idle_wakeup_event(0);
  53. local_irq_restore(flags);
  54. }
  55. unsigned long long native_sched_clock(void)
  56. {
  57. unsigned long a = 0;
  58. /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
  59. * which means it is not completely exact and may not be monotonous
  60. * between CPUs. But the errors should be too small to matter for
  61. * scheduling purposes.
  62. */
  63. rdtscll(a);
  64. return cycles_2_ns(a);
  65. }
  66. /* We need to define a real function for sched_clock, to override the
  67. weak default version */
  68. #ifdef CONFIG_PARAVIRT
  69. unsigned long long sched_clock(void)
  70. {
  71. return paravirt_sched_clock();
  72. }
  73. #else
  74. unsigned long long
  75. sched_clock(void) __attribute__((alias("native_sched_clock")));
  76. #endif
  77. static int tsc_unstable;
  78. int check_tsc_unstable(void)
  79. {
  80. return tsc_unstable;
  81. }
  82. EXPORT_SYMBOL_GPL(check_tsc_unstable);
  83. #ifdef CONFIG_CPU_FREQ
  84. /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
  85. * changes.
  86. *
  87. * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
  88. * not that important because current Opteron setups do not support
  89. * scaling on SMP anyroads.
  90. *
  91. * Should fix up last_tsc too. Currently gettimeofday in the
  92. * first tick after the change will be slightly wrong.
  93. */
  94. static unsigned int ref_freq;
  95. static unsigned long loops_per_jiffy_ref;
  96. static unsigned long tsc_khz_ref;
  97. static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  98. void *data)
  99. {
  100. struct cpufreq_freqs *freq = data;
  101. unsigned long *lpj, dummy;
  102. if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
  103. return 0;
  104. lpj = &dummy;
  105. if (!(freq->flags & CPUFREQ_CONST_LOOPS))
  106. #ifdef CONFIG_SMP
  107. lpj = &cpu_data(freq->cpu).loops_per_jiffy;
  108. #else
  109. lpj = &boot_cpu_data.loops_per_jiffy;
  110. #endif
  111. if (!ref_freq) {
  112. ref_freq = freq->old;
  113. loops_per_jiffy_ref = *lpj;
  114. tsc_khz_ref = tsc_khz;
  115. }
  116. if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
  117. (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
  118. (val == CPUFREQ_RESUMECHANGE)) {
  119. *lpj =
  120. cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
  121. tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
  122. if (!(freq->flags & CPUFREQ_CONST_LOOPS))
  123. mark_tsc_unstable("cpufreq changes");
  124. }
  125. preempt_disable();
  126. set_cyc2ns_scale(tsc_khz_ref, smp_processor_id());
  127. preempt_enable();
  128. return 0;
  129. }
  130. static struct notifier_block time_cpufreq_notifier_block = {
  131. .notifier_call = time_cpufreq_notifier
  132. };
  133. static int __init cpufreq_tsc(void)
  134. {
  135. cpufreq_register_notifier(&time_cpufreq_notifier_block,
  136. CPUFREQ_TRANSITION_NOTIFIER);
  137. return 0;
  138. }
  139. core_initcall(cpufreq_tsc);
  140. #endif
  141. #define MAX_RETRIES 5
  142. #define SMI_TRESHOLD 50000
  143. /*
  144. * Read TSC and the reference counters. Take care of SMI disturbance
  145. */
  146. static unsigned long __init tsc_read_refs(unsigned long *pm,
  147. unsigned long *hpet)
  148. {
  149. unsigned long t1, t2;
  150. int i;
  151. for (i = 0; i < MAX_RETRIES; i++) {
  152. t1 = get_cycles();
  153. if (hpet)
  154. *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
  155. else
  156. *pm = acpi_pm_read_early();
  157. t2 = get_cycles();
  158. if ((t2 - t1) < SMI_TRESHOLD)
  159. return t2;
  160. }
  161. return ULONG_MAX;
  162. }
  163. /**
  164. * tsc_calibrate - calibrate the tsc on boot
  165. */
  166. void __init tsc_calibrate(void)
  167. {
  168. unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2;
  169. int hpet = is_hpet_enabled(), cpu;
  170. local_irq_save(flags);
  171. tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
  172. outb((inb(0x61) & ~0x02) | 0x01, 0x61);
  173. outb(0xb0, 0x43);
  174. outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
  175. outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
  176. tr1 = get_cycles();
  177. while ((inb(0x61) & 0x20) == 0);
  178. tr2 = get_cycles();
  179. tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
  180. local_irq_restore(flags);
  181. /*
  182. * Preset the result with the raw and inaccurate PIT
  183. * calibration value
  184. */
  185. tsc_khz = (tr2 - tr1) / 50;
  186. /* hpet or pmtimer available ? */
  187. if (!hpet && !pm1 && !pm2) {
  188. printk(KERN_INFO "TSC calibrated against PIT\n");
  189. return;
  190. }
  191. /* Check, whether the sampling was disturbed by an SMI */
  192. if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) {
  193. printk(KERN_WARNING "TSC calibration disturbed by SMI, "
  194. "using PIT calibration result\n");
  195. return;
  196. }
  197. tsc2 = (tsc2 - tsc1) * 1000000L;
  198. if (hpet) {
  199. printk(KERN_INFO "TSC calibrated against HPET\n");
  200. if (hpet2 < hpet1)
  201. hpet2 += 0x100000000;
  202. hpet2 -= hpet1;
  203. tsc1 = (hpet2 * hpet_readl(HPET_PERIOD)) / 1000000;
  204. } else {
  205. printk(KERN_INFO "TSC calibrated against PM_TIMER\n");
  206. if (pm2 < pm1)
  207. pm2 += ACPI_PM_OVRRUN;
  208. pm2 -= pm1;
  209. tsc1 = (pm2 * 1000000000) / PMTMR_TICKS_PER_SEC;
  210. }
  211. tsc_khz = tsc2 / tsc1;
  212. for_each_possible_cpu(cpu)
  213. set_cyc2ns_scale(tsc_khz, cpu);
  214. }
  215. /*
  216. * Make an educated guess if the TSC is trustworthy and synchronized
  217. * over all CPUs.
  218. */
  219. __cpuinit int unsynchronized_tsc(void)
  220. {
  221. if (tsc_unstable)
  222. return 1;
  223. #ifdef CONFIG_SMP
  224. if (apic_is_clustered_box())
  225. return 1;
  226. #endif
  227. if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  228. return 0;
  229. /* Assume multi socket systems are not synchronized */
  230. return num_present_cpus() > 1;
  231. }
  232. int __init notsc_setup(char *s)
  233. {
  234. notsc = 1;
  235. return 1;
  236. }
  237. __setup("notsc", notsc_setup);
  238. /* clock source code: */
  239. static cycle_t read_tsc(void)
  240. {
  241. cycle_t ret = (cycle_t)get_cycles();
  242. return ret;
  243. }
  244. static cycle_t __vsyscall_fn vread_tsc(void)
  245. {
  246. cycle_t ret = (cycle_t)vget_cycles();
  247. return ret;
  248. }
  249. static struct clocksource clocksource_tsc = {
  250. .name = "tsc",
  251. .rating = 300,
  252. .read = read_tsc,
  253. .mask = CLOCKSOURCE_MASK(64),
  254. .shift = 22,
  255. .flags = CLOCK_SOURCE_IS_CONTINUOUS |
  256. CLOCK_SOURCE_MUST_VERIFY,
  257. .vread = vread_tsc,
  258. };
  259. void mark_tsc_unstable(char *reason)
  260. {
  261. if (!tsc_unstable) {
  262. tsc_unstable = 1;
  263. printk("Marking TSC unstable due to %s\n", reason);
  264. /* Change only the rating, when not registered */
  265. if (clocksource_tsc.mult)
  266. clocksource_change_rating(&clocksource_tsc, 0);
  267. else
  268. clocksource_tsc.rating = 0;
  269. }
  270. }
  271. EXPORT_SYMBOL_GPL(mark_tsc_unstable);
  272. void __init init_tsc_clocksource(void)
  273. {
  274. if (!notsc) {
  275. clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
  276. clocksource_tsc.shift);
  277. if (check_tsc_unstable())
  278. clocksource_tsc.rating = 0;
  279. clocksource_register(&clocksource_tsc);
  280. }
  281. }