Prechádzať zdrojové kódy

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86:
  x86: fix 64-bit asm NOPS for CONFIG_GENERIC_CPU
  x86: fix call to set_cyc2ns_scale() from time_cpufreq_notifier()
  revert "x86: tsc prevent time going backwards"
Linus Torvalds 17 rokov pred
rodič
commit
950b0d2837
3 zmenil súbory, kde vykonal 16 pridanie a 50 odobranie
  1. 2 17
      arch/x86/kernel/tsc_32.c
  2. 4 23
      arch/x86/kernel/tsc_64.c
  3. 10 10
      include/asm-x86/nops.h

+ 2 - 17
arch/x86/kernel/tsc_32.c

@@ -256,9 +256,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
 						ref_freq, freq->new);
 			if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
 				tsc_khz = cpu_khz;
-				preempt_disable();
-				set_cyc2ns_scale(cpu_khz, smp_processor_id());
-				preempt_enable();
+				set_cyc2ns_scale(cpu_khz, freq->cpu);
 				/*
 				 * TSC based sched_clock turns
 				 * to junk w/ cpufreq
@@ -287,27 +285,14 @@ core_initcall(cpufreq_tsc);
 /* clock source code */
 
 static unsigned long current_tsc_khz = 0;
-static struct clocksource clocksource_tsc;
 
-/*
- * We compare the TSC to the cycle_last value in the clocksource
- * structure to avoid a nasty time-warp issue. This can be observed in
- * a very small window right after one CPU updated cycle_last under
- * xtime lock and the other CPU reads a TSC value which is smaller
- * than the cycle_last reference value due to a TSC which is slighty
- * behind. This delta is nowhere else observable, but in that case it
- * results in a forward time jump in the range of hours due to the
- * unsigned delta calculation of the time keeping core code, which is
- * necessary to support wrapping clocksources like pm timer.
- */
 static cycle_t read_tsc(void)
 {
 	cycle_t ret;
 
 	rdtscll(ret);
 
-	return ret >= clocksource_tsc.cycle_last ?
-		ret : clocksource_tsc.cycle_last;
+	return ret;
 }
 
 static struct clocksource clocksource_tsc = {

+ 4 - 23
arch/x86/kernel/tsc_64.c

@@ -11,7 +11,6 @@
 #include <asm/hpet.h>
 #include <asm/timex.h>
 #include <asm/timer.h>
-#include <asm/vgtod.h>
 
 static int notsc __initdata = 0;
 
@@ -149,9 +148,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
 			mark_tsc_unstable("cpufreq changes");
 	}
 
-	preempt_disable();
-	set_cyc2ns_scale(tsc_khz_ref, smp_processor_id());
-	preempt_enable();
+	set_cyc2ns_scale(tsc_khz_ref, freq->cpu);
 
 	return 0;
 }
@@ -291,34 +288,18 @@ int __init notsc_setup(char *s)
 
 __setup("notsc", notsc_setup);
 
-static struct clocksource clocksource_tsc;
 
-/*
- * We compare the TSC to the cycle_last value in the clocksource
- * structure to avoid a nasty time-warp. This can be observed in a
- * very small window right after one CPU updated cycle_last under
- * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
- * is smaller than the cycle_last reference value due to a TSC which
- * is slighty behind. This delta is nowhere else observable, but in
- * that case it results in a forward time jump in the range of hours
- * due to the unsigned delta calculation of the time keeping core
- * code, which is necessary to support wrapping clocksources like pm
- * timer.
- */
+/* clock source code: */
 static cycle_t read_tsc(void)
 {
 	cycle_t ret = (cycle_t)get_cycles();
-
-	return ret >= clocksource_tsc.cycle_last ?
-		ret : clocksource_tsc.cycle_last;
+	return ret;
 }
 
 static cycle_t __vsyscall_fn vread_tsc(void)
 {
 	cycle_t ret = (cycle_t)vget_cycles();
-
-	return ret >= __vsyscall_gtod_data.clock.cycle_last ?
-		ret : __vsyscall_gtod_data.clock.cycle_last;
+	return ret;
 }
 
 static struct clocksource clocksource_tsc = {

+ 10 - 10
include/asm-x86/nops.h

@@ -73,16 +73,7 @@
 #define P6_NOP7	".byte 0x0f,0x1f,0x80,0,0,0,0\n"
 #define P6_NOP8	".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
 
-#if defined(CONFIG_MK8)
-#define ASM_NOP1 K8_NOP1
-#define ASM_NOP2 K8_NOP2
-#define ASM_NOP3 K8_NOP3
-#define ASM_NOP4 K8_NOP4
-#define ASM_NOP5 K8_NOP5
-#define ASM_NOP6 K8_NOP6
-#define ASM_NOP7 K8_NOP7
-#define ASM_NOP8 K8_NOP8
-#elif defined(CONFIG_MK7)
+#if defined(CONFIG_MK7)
 #define ASM_NOP1 K7_NOP1
 #define ASM_NOP2 K7_NOP2
 #define ASM_NOP3 K7_NOP3
@@ -100,6 +91,15 @@
 #define ASM_NOP6 P6_NOP6
 #define ASM_NOP7 P6_NOP7
 #define ASM_NOP8 P6_NOP8
+#elif defined(CONFIG_X86_64)
+#define ASM_NOP1 K8_NOP1
+#define ASM_NOP2 K8_NOP2
+#define ASM_NOP3 K8_NOP3
+#define ASM_NOP4 K8_NOP4
+#define ASM_NOP5 K8_NOP5
+#define ASM_NOP6 K8_NOP6
+#define ASM_NOP7 K8_NOP7
+#define ASM_NOP8 K8_NOP8
 #else
 #define ASM_NOP1 GENERIC_NOP1
 #define ASM_NOP2 GENERIC_NOP2