瀏覽代碼

[PATCH] x86_64: Use int operations in spinlocks to support more than 128 CPUs spinning.

Pointed out by Eric Dumazet

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Andi Kleen 19 年之前
父節點
當前提交
485832a5d9
共有 1 個文件被更改,包括 6 次插入6 次删除
  1. 6 6
      include/asm-x86_64/spinlock.h

+ 6 - 6
include/asm-x86_64/spinlock.h

@@ -18,22 +18,22 @@
  */
  */
 
 
 #define __raw_spin_is_locked(x) \
 #define __raw_spin_is_locked(x) \
-		(*(volatile signed char *)(&(x)->slock) <= 0)
+		(*(volatile signed int *)(&(x)->slock) <= 0)
 
 
 #define __raw_spin_lock_string \
 #define __raw_spin_lock_string \
 	"\n1:\t" \
 	"\n1:\t" \
-	"lock ; decb %0\n\t" \
+	"lock ; decl %0\n\t" \
 	"js 2f\n" \
 	"js 2f\n" \
 	LOCK_SECTION_START("") \
 	LOCK_SECTION_START("") \
 	"2:\t" \
 	"2:\t" \
 	"rep;nop\n\t" \
 	"rep;nop\n\t" \
-	"cmpb $0,%0\n\t" \
+	"cmpl $0,%0\n\t" \
 	"jle 2b\n\t" \
 	"jle 2b\n\t" \
 	"jmp 1b\n" \
 	"jmp 1b\n" \
 	LOCK_SECTION_END
 	LOCK_SECTION_END
 
 
 #define __raw_spin_unlock_string \
 #define __raw_spin_unlock_string \
-	"movb $1,%0" \
+	"movl $1,%0" \
 		:"=m" (lock->slock) : : "memory"
 		:"=m" (lock->slock) : : "memory"
 
 
 static inline void __raw_spin_lock(raw_spinlock_t *lock)
 static inline void __raw_spin_lock(raw_spinlock_t *lock)
@@ -47,10 +47,10 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
 
 
 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
 {
-	char oldval;
+	int oldval;
 
 
 	__asm__ __volatile__(
 	__asm__ __volatile__(
-		"xchgb %b0,%1"
+		"xchgl %0,%1"
 		:"=q" (oldval), "=m" (lock->slock)
 		:"=q" (oldval), "=m" (lock->slock)
 		:"0" (0) : "memory");
 		:"0" (0) : "memory");