tsc.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. #include <linux/kernel.h>
  2. #include <linux/sched.h>
  3. #include <linux/interrupt.h>
  4. #include <linux/init.h>
  5. #include <linux/clocksource.h>
  6. #include <linux/time.h>
  7. #include <linux/acpi.h>
  8. #include <linux/cpufreq.h>
  9. #include <asm/timex.h>
  10. int notsc __initdata = 0;
  11. unsigned int cpu_khz; /* TSC clocks / usec, not used here */
  12. EXPORT_SYMBOL(cpu_khz);
  13. /*
  14. * do_gettimeoffset() returns microseconds since last timer interrupt was
  15. * triggered by hardware. A memory read of HPET is slower than a register read
  16. * of TSC, but much more reliable. It's also synchronized to the timer
  17. * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
  18. * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
  19. * This is not a problem, because jiffies hasn't updated either. They are bound
  20. * together by xtime_lock.
  21. */
  22. unsigned int do_gettimeoffset_tsc(void)
  23. {
  24. unsigned long t;
  25. unsigned long x;
  26. t = get_cycles_sync();
  27. if (t < vxtime.last_tsc)
  28. t = vxtime.last_tsc; /* hack */
  29. x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE;
  30. return x;
  31. }
  32. static unsigned int cyc2ns_scale __read_mostly;
  33. void set_cyc2ns_scale(unsigned long khz)
  34. {
  35. cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
  36. }
  37. unsigned long long cycles_2_ns(unsigned long long cyc)
  38. {
  39. return (cyc * cyc2ns_scale) >> NS_SCALE;
  40. }
  41. unsigned long long sched_clock(void)
  42. {
  43. unsigned long a = 0;
  44. /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
  45. * which means it is not completely exact and may not be monotonous
  46. * between CPUs. But the errors should be too small to matter for
  47. * scheduling purposes.
  48. */
  49. rdtscll(a);
  50. return cycles_2_ns(a);
  51. }
  52. #ifdef CONFIG_CPU_FREQ
  53. /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
  54. * changes.
  55. *
  56. * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
  57. * not that important because current Opteron setups do not support
  58. * scaling on SMP anyroads.
  59. *
  60. * Should fix up last_tsc too. Currently gettimeofday in the
  61. * first tick after the change will be slightly wrong.
  62. */
  63. #include <linux/workqueue.h>
  64. static unsigned int cpufreq_delayed_issched = 0;
  65. static unsigned int cpufreq_init = 0;
  66. static struct work_struct cpufreq_delayed_get_work;
  67. static void handle_cpufreq_delayed_get(struct work_struct *v)
  68. {
  69. unsigned int cpu;
  70. for_each_online_cpu(cpu) {
  71. cpufreq_get(cpu);
  72. }
  73. cpufreq_delayed_issched = 0;
  74. }
  75. /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
  76. * to verify the CPU frequency the timing core thinks the CPU is running
  77. * at is still correct.
  78. */
  79. void cpufreq_delayed_get(void)
  80. {
  81. static int warned;
  82. if (cpufreq_init && !cpufreq_delayed_issched) {
  83. cpufreq_delayed_issched = 1;
  84. if (!warned) {
  85. warned = 1;
  86. printk(KERN_DEBUG "Losing some ticks... "
  87. "checking if CPU frequency changed.\n");
  88. }
  89. schedule_work(&cpufreq_delayed_get_work);
  90. }
  91. }
  92. static unsigned int ref_freq = 0;
  93. static unsigned long loops_per_jiffy_ref = 0;
  94. static unsigned long cpu_khz_ref = 0;
  95. static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  96. void *data)
  97. {
  98. struct cpufreq_freqs *freq = data;
  99. unsigned long *lpj, dummy;
  100. if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
  101. return 0;
  102. lpj = &dummy;
  103. if (!(freq->flags & CPUFREQ_CONST_LOOPS))
  104. #ifdef CONFIG_SMP
  105. lpj = &cpu_data[freq->cpu].loops_per_jiffy;
  106. #else
  107. lpj = &boot_cpu_data.loops_per_jiffy;
  108. #endif
  109. if (!ref_freq) {
  110. ref_freq = freq->old;
  111. loops_per_jiffy_ref = *lpj;
  112. cpu_khz_ref = cpu_khz;
  113. }
  114. if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
  115. (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
  116. (val == CPUFREQ_RESUMECHANGE)) {
  117. *lpj =
  118. cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
  119. cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
  120. if (!(freq->flags & CPUFREQ_CONST_LOOPS))
  121. vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz;
  122. }
  123. set_cyc2ns_scale(cpu_khz_ref);
  124. return 0;
  125. }
  126. static struct notifier_block time_cpufreq_notifier_block = {
  127. .notifier_call = time_cpufreq_notifier
  128. };
  129. static int __init cpufreq_tsc(void)
  130. {
  131. INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
  132. if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
  133. CPUFREQ_TRANSITION_NOTIFIER))
  134. cpufreq_init = 1;
  135. return 0;
  136. }
  137. core_initcall(cpufreq_tsc);
  138. #endif
  139. static int tsc_unstable = 0;
  140. void mark_tsc_unstable(void)
  141. {
  142. tsc_unstable = 1;
  143. }
  144. EXPORT_SYMBOL_GPL(mark_tsc_unstable);
  145. /*
  146. * Make an educated guess if the TSC is trustworthy and synchronized
  147. * over all CPUs.
  148. */
  149. __cpuinit int unsynchronized_tsc(void)
  150. {
  151. if (tsc_unstable)
  152. return 1;
  153. #ifdef CONFIG_SMP
  154. if (apic_is_clustered_box())
  155. return 1;
  156. #endif
  157. /* Most intel systems have synchronized TSCs except for
  158. multi node systems */
  159. if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
  160. #ifdef CONFIG_ACPI
  161. /* But TSC doesn't tick in C3 so don't use it there */
  162. if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
  163. return 1;
  164. #endif
  165. return 0;
  166. }
  167. /* Assume multi socket systems are not synchronized */
  168. return num_present_cpus() > 1;
  169. }
  170. int __init notsc_setup(char *s)
  171. {
  172. notsc = 1;
  173. return 1;
  174. }
  175. __setup("notsc", notsc_setup);