tsc_32.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. #include <linux/sched.h>
  2. #include <linux/clocksource.h>
  3. #include <linux/workqueue.h>
  4. #include <linux/cpufreq.h>
  5. #include <linux/jiffies.h>
  6. #include <linux/init.h>
  7. #include <linux/dmi.h>
  8. #include <linux/percpu.h>
  9. #include <asm/delay.h>
  10. #include <asm/tsc.h>
  11. #include <asm/io.h>
  12. #include <asm/timer.h>
  13. #include "mach_timer.h"
  14. /* native_sched_clock() is called before tsc_init(), so
  15. we must start with the TSC soft disabled to prevent
  16. erroneous rdtsc usage on !cpu_has_tsc processors */
  17. static int tsc_disabled = -1;
  18. /*
  19. * On some systems the TSC frequency does not
  20. * change with the cpu frequency. So we need
  21. * an extra value to store the TSC freq
  22. */
  23. unsigned int tsc_khz;
  24. EXPORT_SYMBOL_GPL(tsc_khz);
  25. #ifdef CONFIG_X86_TSC
  26. static int __init tsc_setup(char *str)
  27. {
  28. printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
  29. "cannot disable TSC completely.\n");
  30. tsc_disabled = 1;
  31. return 1;
  32. }
  33. #else
  34. /*
  35. * disable flag for tsc. Takes effect by clearing the TSC cpu flag
  36. * in cpu/common.c
  37. */
  38. static int __init tsc_setup(char *str)
  39. {
  40. setup_clear_cpu_cap(X86_FEATURE_TSC);
  41. return 1;
  42. }
  43. #endif
  44. __setup("notsc", tsc_setup);
  45. /*
  46. * code to mark and check if the TSC is unstable
  47. * due to cpufreq or due to unsynced TSCs
  48. */
  49. static int tsc_unstable;
  50. int check_tsc_unstable(void)
  51. {
  52. return tsc_unstable;
  53. }
  54. EXPORT_SYMBOL_GPL(check_tsc_unstable);
  55. /* Accelerators for sched_clock()
  56. * convert from cycles(64bits) => nanoseconds (64bits)
  57. * basic equation:
  58. * ns = cycles / (freq / ns_per_sec)
  59. * ns = cycles * (ns_per_sec / freq)
  60. * ns = cycles * (10^9 / (cpu_khz * 10^3))
  61. * ns = cycles * (10^6 / cpu_khz)
  62. *
  63. * Then we use scaling math (suggested by george@mvista.com) to get:
  64. * ns = cycles * (10^6 * SC / cpu_khz) / SC
  65. * ns = cycles * cyc2ns_scale / SC
  66. *
  67. * And since SC is a constant power of two, we can convert the div
  68. * into a shift.
  69. *
  70. * We can use khz divisor instead of mhz to keep a better precision, since
  71. * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
  72. * (mathieu.desnoyers@polymtl.ca)
  73. *
  74. * -johnstul@us.ibm.com "math is hard, lets go shopping!"
  75. */
  76. DEFINE_PER_CPU(unsigned long, cyc2ns);
  77. static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
  78. {
  79. unsigned long long tsc_now, ns_now;
  80. unsigned long flags, *scale;
  81. local_irq_save(flags);
  82. sched_clock_idle_sleep_event();
  83. scale = &per_cpu(cyc2ns, cpu);
  84. rdtscll(tsc_now);
  85. ns_now = __cycles_2_ns(tsc_now);
  86. if (cpu_khz)
  87. *scale = (NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR)/cpu_khz;
  88. /*
  89. * Start smoothly with the new frequency:
  90. */
  91. sched_clock_idle_wakeup_event(0);
  92. local_irq_restore(flags);
  93. }
  94. /*
  95. * Scheduler clock - returns current time in nanosec units.
  96. */
  97. unsigned long long native_sched_clock(void)
  98. {
  99. unsigned long long this_offset;
  100. /*
  101. * Fall back to jiffies if there's no TSC available:
  102. * ( But note that we still use it if the TSC is marked
  103. * unstable. We do this because unlike Time Of Day,
  104. * the scheduler clock tolerates small errors and it's
  105. * very important for it to be as fast as the platform
  106. * can achive it. )
  107. */
  108. if (unlikely(tsc_disabled))
  109. /* No locking but a rare wrong value is not a big deal: */
  110. return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
  111. /* read the Time Stamp Counter: */
  112. rdtscll(this_offset);
  113. /* return the value in ns */
  114. return cycles_2_ns(this_offset);
  115. }
  116. /* We need to define a real function for sched_clock, to override the
  117. weak default version */
  118. #ifdef CONFIG_PARAVIRT
  119. unsigned long long sched_clock(void)
  120. {
  121. return paravirt_sched_clock();
  122. }
  123. #else
  124. unsigned long long sched_clock(void)
  125. __attribute__((alias("native_sched_clock")));
  126. #endif
  127. unsigned long native_calculate_cpu_khz(void)
  128. {
  129. unsigned long long start, end;
  130. unsigned long count;
  131. u64 delta64 = (u64)ULLONG_MAX;
  132. int i;
  133. unsigned long flags;
  134. local_irq_save(flags);
  135. /* run 3 times to ensure the cache is warm and to get an accurate reading */
  136. for (i = 0; i < 3; i++) {
  137. mach_prepare_counter();
  138. rdtscll(start);
  139. mach_countup(&count);
  140. rdtscll(end);
  141. /*
  142. * Error: ECTCNEVERSET
  143. * The CTC wasn't reliable: we got a hit on the very first read,
  144. * or the CPU was so fast/slow that the quotient wouldn't fit in
  145. * 32 bits..
  146. */
  147. if (count <= 1)
  148. continue;
  149. /* cpu freq too slow: */
  150. if ((end - start) <= CALIBRATE_TIME_MSEC)
  151. continue;
  152. /*
  153. * We want the minimum time of all runs in case one of them
  154. * is inaccurate due to SMI or other delay
  155. */
  156. delta64 = min(delta64, (end - start));
  157. }
  158. /* cpu freq too fast (or every run was bad): */
  159. if (delta64 > (1ULL<<32))
  160. goto err;
  161. delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
  162. do_div(delta64,CALIBRATE_TIME_MSEC);
  163. local_irq_restore(flags);
  164. return (unsigned long)delta64;
  165. err:
  166. local_irq_restore(flags);
  167. return 0;
  168. }
  169. int recalibrate_cpu_khz(void)
  170. {
  171. #ifndef CONFIG_SMP
  172. unsigned long cpu_khz_old = cpu_khz;
  173. if (cpu_has_tsc) {
  174. cpu_khz = calculate_cpu_khz();
  175. tsc_khz = cpu_khz;
  176. cpu_data(0).loops_per_jiffy =
  177. cpufreq_scale(cpu_data(0).loops_per_jiffy,
  178. cpu_khz_old, cpu_khz);
  179. return 0;
  180. } else
  181. return -ENODEV;
  182. #else
  183. return -ENODEV;
  184. #endif
  185. }
  186. EXPORT_SYMBOL(recalibrate_cpu_khz);
  187. #ifdef CONFIG_CPU_FREQ
  188. /*
  189. * if the CPU frequency is scaled, TSC-based delays will need a different
  190. * loops_per_jiffy value to function properly.
  191. */
  192. static unsigned int ref_freq;
  193. static unsigned long loops_per_jiffy_ref;
  194. static unsigned long cpu_khz_ref;
  195. static int
  196. time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
  197. {
  198. struct cpufreq_freqs *freq = data;
  199. if (!ref_freq) {
  200. if (!freq->old){
  201. ref_freq = freq->new;
  202. return 0;
  203. }
  204. ref_freq = freq->old;
  205. loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
  206. cpu_khz_ref = cpu_khz;
  207. }
  208. if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
  209. (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
  210. (val == CPUFREQ_RESUMECHANGE)) {
  211. if (!(freq->flags & CPUFREQ_CONST_LOOPS))
  212. cpu_data(freq->cpu).loops_per_jiffy =
  213. cpufreq_scale(loops_per_jiffy_ref,
  214. ref_freq, freq->new);
  215. if (cpu_khz) {
  216. if (num_online_cpus() == 1)
  217. cpu_khz = cpufreq_scale(cpu_khz_ref,
  218. ref_freq, freq->new);
  219. if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
  220. tsc_khz = cpu_khz;
  221. set_cyc2ns_scale(cpu_khz, freq->cpu);
  222. /*
  223. * TSC based sched_clock turns
  224. * to junk w/ cpufreq
  225. */
  226. mark_tsc_unstable("cpufreq changes");
  227. }
  228. }
  229. }
  230. return 0;
  231. }
  232. static struct notifier_block time_cpufreq_notifier_block = {
  233. .notifier_call = time_cpufreq_notifier
  234. };
  235. static int __init cpufreq_tsc(void)
  236. {
  237. return cpufreq_register_notifier(&time_cpufreq_notifier_block,
  238. CPUFREQ_TRANSITION_NOTIFIER);
  239. }
  240. core_initcall(cpufreq_tsc);
  241. #endif
  242. /* clock source code */
  243. static unsigned long current_tsc_khz;
  244. static struct clocksource clocksource_tsc;
  245. /*
  246. * We compare the TSC to the cycle_last value in the clocksource
  247. * structure to avoid a nasty time-warp issue. This can be observed in
  248. * a very small window right after one CPU updated cycle_last under
  249. * xtime lock and the other CPU reads a TSC value which is smaller
  250. * than the cycle_last reference value due to a TSC which is slighty
  251. * behind. This delta is nowhere else observable, but in that case it
  252. * results in a forward time jump in the range of hours due to the
  253. * unsigned delta calculation of the time keeping core code, which is
  254. * necessary to support wrapping clocksources like pm timer.
  255. */
  256. static cycle_t read_tsc(void)
  257. {
  258. cycle_t ret;
  259. rdtscll(ret);
  260. return ret >= clocksource_tsc.cycle_last ?
  261. ret : clocksource_tsc.cycle_last;
  262. }
  263. static struct clocksource clocksource_tsc = {
  264. .name = "tsc",
  265. .rating = 300,
  266. .read = read_tsc,
  267. .mask = CLOCKSOURCE_MASK(64),
  268. .mult = 0, /* to be set */
  269. .shift = 22,
  270. .flags = CLOCK_SOURCE_IS_CONTINUOUS |
  271. CLOCK_SOURCE_MUST_VERIFY,
  272. };
  273. void mark_tsc_unstable(char *reason)
  274. {
  275. if (!tsc_unstable) {
  276. tsc_unstable = 1;
  277. printk("Marking TSC unstable due to: %s.\n", reason);
  278. /* Can be called before registration */
  279. if (clocksource_tsc.mult)
  280. clocksource_change_rating(&clocksource_tsc, 0);
  281. else
  282. clocksource_tsc.rating = 0;
  283. }
  284. }
  285. EXPORT_SYMBOL_GPL(mark_tsc_unstable);
  286. static int __init dmi_mark_tsc_unstable(const struct dmi_system_id *d)
  287. {
  288. printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
  289. d->ident);
  290. tsc_unstable = 1;
  291. return 0;
  292. }
  293. /* List of systems that have known TSC problems */
  294. static struct dmi_system_id __initdata bad_tsc_dmi_table[] = {
  295. {
  296. .callback = dmi_mark_tsc_unstable,
  297. .ident = "IBM Thinkpad 380XD",
  298. .matches = {
  299. DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
  300. DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
  301. },
  302. },
  303. {}
  304. };
  305. /*
  306. * Make an educated guess if the TSC is trustworthy and synchronized
  307. * over all CPUs.
  308. */
  309. __cpuinit int unsynchronized_tsc(void)
  310. {
  311. if (!cpu_has_tsc || tsc_unstable)
  312. return 1;
  313. /* Anything with constant TSC should be synchronized */
  314. if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
  315. return 0;
  316. /*
  317. * Intel systems are normally all synchronized.
  318. * Exceptions must mark TSC as unstable:
  319. */
  320. if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
  321. /* assume multi socket systems are not synchronized: */
  322. if (num_possible_cpus() > 1)
  323. tsc_unstable = 1;
  324. }
  325. return tsc_unstable;
  326. }
  327. /*
  328. * Geode_LX - the OLPC CPU has a possibly a very reliable TSC
  329. */
  330. #ifdef CONFIG_MGEODE_LX
  331. /* RTSC counts during suspend */
  332. #define RTSC_SUSP 0x100
  333. static void __init check_geode_tsc_reliable(void)
  334. {
  335. unsigned long res_low, res_high;
  336. rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
  337. if (res_low & RTSC_SUSP)
  338. clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
  339. }
  340. #else
  341. static inline void check_geode_tsc_reliable(void) { }
  342. #endif
  343. void __init tsc_init(void)
  344. {
  345. int cpu;
  346. if (!cpu_has_tsc || tsc_disabled > 0)
  347. return;
  348. cpu_khz = calculate_cpu_khz();
  349. tsc_khz = cpu_khz;
  350. if (!cpu_khz) {
  351. mark_tsc_unstable("could not calculate TSC khz");
  352. return;
  353. }
  354. /* now allow native_sched_clock() to use rdtsc */
  355. tsc_disabled = 0;
  356. printk("Detected %lu.%03lu MHz processor.\n",
  357. (unsigned long)cpu_khz / 1000,
  358. (unsigned long)cpu_khz % 1000);
  359. /*
  360. * Secondary CPUs do not run through tsc_init(), so set up
  361. * all the scale factors for all CPUs, assuming the same
  362. * speed as the bootup CPU. (cpufreq notifiers will fix this
  363. * up if their speed diverges)
  364. */
  365. for_each_possible_cpu(cpu)
  366. set_cyc2ns_scale(cpu_khz, cpu);
  367. use_tsc_delay();
  368. /* Check and install the TSC clocksource */
  369. dmi_check_system(bad_tsc_dmi_table);
  370. unsynchronized_tsc();
  371. check_geode_tsc_reliable();
  372. current_tsc_khz = tsc_khz;
  373. clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
  374. clocksource_tsc.shift);
  375. /* lower the rating if we already know its unstable: */
  376. if (check_tsc_unstable()) {
  377. clocksource_tsc.rating = 0;
  378. clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS;
  379. }
  380. clocksource_register(&clocksource_tsc);
  381. }