tsc_32.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. #include <linux/sched.h>
  2. #include <linux/clocksource.h>
  3. #include <linux/workqueue.h>
  4. #include <linux/delay.h>
  5. #include <linux/cpufreq.h>
  6. #include <linux/jiffies.h>
  7. #include <linux/init.h>
  8. #include <linux/dmi.h>
  9. #include <linux/percpu.h>
  10. #include <asm/delay.h>
  11. #include <asm/tsc.h>
  12. #include <asm/io.h>
  13. #include <asm/timer.h>
  14. #include "mach_timer.h"
  15. extern int tsc_unstable;
  16. extern int tsc_disabled;
  17. /* clock source code */
  18. static struct clocksource clocksource_tsc;
  19. /*
  20. * We compare the TSC to the cycle_last value in the clocksource
  21. * structure to avoid a nasty time-warp issue. This can be observed in
  22. * a very small window right after one CPU updated cycle_last under
  23. * xtime lock and the other CPU reads a TSC value which is smaller
  24. * than the cycle_last reference value due to a TSC which is slighty
  25. * behind. This delta is nowhere else observable, but in that case it
  26. * results in a forward time jump in the range of hours due to the
  27. * unsigned delta calculation of the time keeping core code, which is
  28. * necessary to support wrapping clocksources like pm timer.
  29. */
  30. static cycle_t read_tsc(void)
  31. {
  32. cycle_t ret;
  33. rdtscll(ret);
  34. return ret >= clocksource_tsc.cycle_last ?
  35. ret : clocksource_tsc.cycle_last;
  36. }
  37. static struct clocksource clocksource_tsc = {
  38. .name = "tsc",
  39. .rating = 300,
  40. .read = read_tsc,
  41. .mask = CLOCKSOURCE_MASK(64),
  42. .mult = 0, /* to be set */
  43. .shift = 22,
  44. .flags = CLOCK_SOURCE_IS_CONTINUOUS |
  45. CLOCK_SOURCE_MUST_VERIFY,
  46. };
  47. void mark_tsc_unstable(char *reason)
  48. {
  49. if (!tsc_unstable) {
  50. tsc_unstable = 1;
  51. printk("Marking TSC unstable due to: %s.\n", reason);
  52. /* Can be called before registration */
  53. if (clocksource_tsc.mult)
  54. clocksource_change_rating(&clocksource_tsc, 0);
  55. else
  56. clocksource_tsc.rating = 0;
  57. }
  58. }
  59. EXPORT_SYMBOL_GPL(mark_tsc_unstable);
  60. static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
  61. {
  62. printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
  63. d->ident);
  64. tsc_unstable = 1;
  65. return 0;
  66. }
  67. /* List of systems that have known TSC problems */
  68. static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
  69. {
  70. .callback = dmi_mark_tsc_unstable,
  71. .ident = "IBM Thinkpad 380XD",
  72. .matches = {
  73. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  74. DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
  75. },
  76. },
  77. {}
  78. };
  79. /*
  80. * Make an educated guess if the TSC is trustworthy and synchronized
  81. * over all CPUs.
  82. */
  83. __cpuinit int unsynchronized_tsc(void)
  84. {
  85. if (!cpu_has_tsc || tsc_unstable)
  86. return 1;
  87. /* Anything with constant TSC should be synchronized */
  88. if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  89. return 0;
  90. /*
  91. * Intel systems are normally all synchronized.
  92. * Exceptions must mark TSC as unstable:
  93. */
  94. if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
  95. /* assume multi socket systems are not synchronized: */
  96. if (num_possible_cpus() > 1)
  97. tsc_unstable = 1;
  98. }
  99. return tsc_unstable;
  100. }
  101. /*
  102. * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
  103. */
  104. #ifdef CONFIG_MGEODE_LX
  105. /* RTSC counts during suspend */
  106. #define RTSC_SUSP 0x100
  107. static void __init check_geode_tsc_reliable(void)
  108. {
  109. unsigned long res_low, res_high;
  110. rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
  111. if (res_low & RTSC_SUSP)
  112. clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
  113. }
  114. #else
  115. static inline void check_geode_tsc_reliable(void) { }
  116. #endif
  117. void __init tsc_init(void)
  118. {
  119. int cpu;
  120. u64 lpj;
  121. if (!cpu_has_tsc || tsc_disabled > 0)
  122. return;
  123. cpu_khz = calculate_cpu_khz();
  124. tsc_khz = cpu_khz;
  125. if (!cpu_khz) {
  126. mark_tsc_unstable("could not calculate TSC khz");
  127. return;
  128. }
  129. lpj = ((u64)tsc_khz * 1000);
  130. do_div(lpj, HZ);
  131. lpj_fine = lpj;
  132. /* now allow native_sched_clock() to use rdtsc */
  133. tsc_disabled = 0;
  134. printk("Detected %lu.%03lu MHz processor.\n",
  135. (unsigned long)cpu_khz / 1000,
  136. (unsigned long)cpu_khz % 1000);
  137. /*
  138. * Secondary CPUs do not run through tsc_init(), so set up
  139. * all the scale factors for all CPUs, assuming the same
  140. * speed as the bootup CPU. (cpufreq notifiers will fix this
  141. * up if their speed diverges)
  142. */
  143. for_each_possible_cpu(cpu)
  144. set_cyc2ns_scale(cpu_khz, cpu);
  145. use_tsc_delay();
  146. /* Check and install the TSC clocksource */
  147. dmi_check_system(bad_tsc_dmi_table);
  148. unsynchronized_tsc();
  149. check_geode_tsc_reliable();
  150. clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
  151. clocksource_tsc.shift);
  152. /* lower the rating if we already know its unstable: */
  153. if (check_tsc_unstable()) {
  154. clocksource_tsc.rating = 0;
  155. clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
  156. }
  157. clocksource_register(&clocksource_tsc);
  158. }