Procházet zdrojové kódy

[PATCH] x86_64: Fix race in cpu_local_* on preemptible kernels

When a process changes CPUs while doing the non atomic cpu_local_*
operations it might operate on the local_t of a different CPUs.

Fix that by disabling preemption.

Pointed out by Christopher Lameter

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Andi Kleen před 19 roky
rodič
revize
da5311258d
2 změnil soubory, kde provedl 40 přidání a 12 odebrání
  1. 20 6
      include/asm-i386/local.h
  2. 20 6
      include/asm-x86_64/local.h

+ 20 - 6
include/asm-i386/local.h

@@ -55,12 +55,26 @@ static __inline__ void local_sub(long i, local_t *v)
  * much more efficient than these naive implementations.  Note they take
  * much more efficient than these naive implementations.  Note they take
  * a variable, not an address.
  * a variable, not an address.
  */
  */
-#define cpu_local_read(v)	local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i)	local_set(&__get_cpu_var(v), (i))
-#define cpu_local_inc(v)	local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v)	local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v)	local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v)	local_sub((i), &__get_cpu_var(v))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+   still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(v)	 	\
+	({ local_t res__;		\
+	   preempt_disable(); 		\
+	   res__ = (v);			\
+	   preempt_enable();		\
+	   res__; })
+#define cpu_local_wrap(v)		\
+	({ preempt_disable();		\
+	   v;				\
+	   preempt_enable(); })		\
+
+#define cpu_local_read(v)    cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
+#define cpu_local_set(v, i)  cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
+#define cpu_local_inc(v)     cpu_local_wrap(local_inc(&__get_cpu_var(v)))
+#define cpu_local_dec(v)     cpu_local_wrap(local_dec(&__get_cpu_var(v)))
+#define cpu_local_add(i, v)  cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
+#define cpu_local_sub(i, v)  cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
 
 
 #define __cpu_local_inc(v)	cpu_local_inc(v)
 #define __cpu_local_inc(v)	cpu_local_inc(v)
 #define __cpu_local_dec(v)	cpu_local_dec(v)
 #define __cpu_local_dec(v)	cpu_local_dec(v)

+ 20 - 6
include/asm-x86_64/local.h

@@ -59,12 +59,26 @@ static inline void local_sub(long i, local_t *v)
  * This could be done better if we moved the per cpu data directly
  * This could be done better if we moved the per cpu data directly
  * after GS.
  * after GS.
  */
  */
-#define cpu_local_read(v)	local_read(&__get_cpu_var(v))
-#define cpu_local_set(v, i)	local_set(&__get_cpu_var(v), (i))
-#define cpu_local_inc(v)	local_inc(&__get_cpu_var(v))
-#define cpu_local_dec(v)	local_dec(&__get_cpu_var(v))
-#define cpu_local_add(i, v)	local_add((i), &__get_cpu_var(v))
-#define cpu_local_sub(i, v)	local_sub((i), &__get_cpu_var(v))
+
+/* Need to disable preemption for the cpu local counters otherwise we could
+   still access a variable of a previous CPU in a non atomic way. */
+#define cpu_local_wrap_v(v)	 	\
+	({ local_t res__;		\
+	   preempt_disable(); 		\
+	   res__ = (v);			\
+	   preempt_enable();		\
+	   res__; })
+#define cpu_local_wrap(v)		\
+	({ preempt_disable();		\
+	   v;				\
+	   preempt_enable(); })		\
+
+#define cpu_local_read(v)    cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
+#define cpu_local_set(v, i)  cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
+#define cpu_local_inc(v)     cpu_local_wrap(local_inc(&__get_cpu_var(v)))
+#define cpu_local_dec(v)     cpu_local_wrap(local_dec(&__get_cpu_var(v)))
+#define cpu_local_add(i, v)  cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
+#define cpu_local_sub(i, v)  cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
 
 
 #define __cpu_local_inc(v)	cpu_local_inc(v)
 #define __cpu_local_inc(v)	cpu_local_inc(v)
 #define __cpu_local_dec(v)	cpu_local_dec(v)
 #define __cpu_local_dec(v)	cpu_local_dec(v)