Bladeren bron

sh: Relax inline assembly constraints

When dereferencing the memory address contained in a register and
modifying the value at that memory address, the register should not be
listed in the inline asm outputs. The value at the memory address is an
output (which is taken care of with the "memory" clobber), not the register.

Signed-off-by: Matt Fleming <matt@console-pimps.org>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Matt Fleming 16 jaren geleden
bovenliggende
commit
42990701f9
2 gewijzigde bestanden met toevoegingen van 55 en 55 verwijderingen
  1. 36 36
      arch/sh/include/asm/bitops-llsc.h
  2. 19 19
      arch/sh/include/asm/cmpxchg-llsc.h

+ 36 - 36
arch/sh/include/asm/bitops-llsc.h

@@ -1,7 +1,7 @@
 #ifndef __ASM_SH_BITOPS_LLSC_H
 #define __ASM_SH_BITOPS_LLSC_H
 
-static inline void set_bit(int nr, volatile void * addr)
+static inline void set_bit(int nr, volatile void *addr)
 {
 	int	mask;
 	volatile unsigned int *a = addr;
@@ -13,16 +13,16 @@ static inline void set_bit(int nr, volatile void * addr)
 	__asm__ __volatile__ (
 		"1:						\n\t"
 		"movli.l	@%1, %0	! set_bit		\n\t"
-		"or		%3, %0				\n\t"
+		"or		%2, %0				\n\t"
 		"movco.l	%0, @%1				\n\t"
 		"bf		1b				\n\t"
-		: "=&z" (tmp), "=r" (a)
-		: "1" (a), "r" (mask)
+		: "=&z" (tmp)
+		: "r" (a), "r" (mask)
 		: "t", "memory"
 	);
 }
 
-static inline void clear_bit(int nr, volatile void * addr)
+static inline void clear_bit(int nr, volatile void *addr)
 {
 	int	mask;
 	volatile unsigned int *a = addr;
@@ -34,16 +34,16 @@ static inline void clear_bit(int nr, volatile void * addr)
 	__asm__ __volatile__ (
 		"1:						\n\t"
 		"movli.l	@%1, %0	! clear_bit		\n\t"
-		"and		%3, %0				\n\t"
+		"and		%2, %0				\n\t"
 		"movco.l	%0, @%1				\n\t"
 		"bf		1b				\n\t"
-		: "=&z" (tmp), "=r" (a)
-		: "1" (a), "r" (~mask)
+		: "=&z" (tmp)
+		: "r" (a), "r" (~mask)
 		: "t", "memory"
 	);
 }
 
-static inline void change_bit(int nr, volatile void * addr)
+static inline void change_bit(int nr, volatile void *addr)
 {
 	int	mask;
 	volatile unsigned int *a = addr;
@@ -55,16 +55,16 @@ static inline void change_bit(int nr, volatile void * addr)
 	__asm__ __volatile__ (
 		"1:						\n\t"
 		"movli.l	@%1, %0	! change_bit		\n\t"
-		"xor		%3, %0				\n\t"
+		"xor		%2, %0				\n\t"
 		"movco.l	%0, @%1				\n\t"
 		"bf		1b				\n\t"
-		: "=&z" (tmp), "=r" (a)
-		: "1" (a), "r" (mask)
+		: "=&z" (tmp)
+		: "r" (a), "r" (mask)
 		: "t", "memory"
 	);
 }
 
-static inline int test_and_set_bit(int nr, volatile void * addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
 {
 	int	mask, retval;
 	volatile unsigned int *a = addr;
@@ -75,21 +75,21 @@ static inline int test_and_set_bit(int nr, volatile void * addr)
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! test_and_set_bit	\n\t"
-		"mov		%0, %2				\n\t"
-		"or		%4, %0				\n\t"
-		"movco.l	%0, @%1				\n\t"
+		"movli.l	@%2, %0	! test_and_set_bit	\n\t"
+		"mov		%0, %1				\n\t"
+		"or		%3, %0				\n\t"
+		"movco.l	%0, @%2				\n\t"
 		"bf		1b				\n\t"
-		"and		%4, %2				\n\t"
-		: "=&z" (tmp), "=r" (a), "=&r" (retval)
-		: "1" (a), "r" (mask)
+		"and		%3, %1				\n\t"
+		: "=&z" (tmp), "=&r" (retval)
+		: "r" (a), "r" (mask)
 		: "t", "memory"
 	);
 
 	return retval != 0;
 }
 
-static inline int test_and_clear_bit(int nr, volatile void * addr)
+static inline int test_and_clear_bit(int nr, volatile void *addr)
 {
 	int	mask, retval;
 	volatile unsigned int *a = addr;
@@ -100,22 +100,22 @@ static inline int test_and_clear_bit(int nr, volatile void * addr)
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! test_and_clear_bit	\n\t"
-		"mov		%0, %2				\n\t"
-		"and		%5, %0				\n\t"
-		"movco.l	%0, @%1				\n\t"
+		"movli.l	@%2, %0	! test_and_clear_bit	\n\t"
+		"mov		%0, %1				\n\t"
+		"and		%4, %0				\n\t"
+		"movco.l	%0, @%2				\n\t"
 		"bf		1b				\n\t"
-		"and		%4, %2				\n\t"
+		"and		%3, %1				\n\t"
 		"synco						\n\t"
-		: "=&z" (tmp), "=r" (a), "=&r" (retval)
-		: "1" (a), "r" (mask), "r" (~mask)
+		: "=&z" (tmp), "=&r" (retval)
+		: "r" (a), "r" (mask), "r" (~mask)
 		: "t", "memory"
 	);
 
 	return retval != 0;
 }
 
-static inline int test_and_change_bit(int nr, volatile void * addr)
+static inline int test_and_change_bit(int nr, volatile void *addr)
 {
 	int	mask, retval;
 	volatile unsigned int *a = addr;
@@ -126,15 +126,15 @@ static inline int test_and_change_bit(int nr, volatile void * addr)
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! test_and_change_bit	\n\t"
-		"mov		%0, %2				\n\t"
-		"xor		%4, %0				\n\t"
-		"movco.l	%0, @%1				\n\t"
+		"movli.l	@%2, %0	! test_and_change_bit	\n\t"
+		"mov		%0, %1				\n\t"
+		"xor		%3, %0				\n\t"
+		"movco.l	%0, @%2				\n\t"
 		"bf		1b				\n\t"
-		"and		%4, %2				\n\t"
+		"and		%3, %1				\n\t"
 		"synco						\n\t"
-		: "=&z" (tmp), "=r" (a), "=&r" (retval)
-		: "1" (a), "r" (mask)
+		: "=&z" (tmp), "=&r" (retval)
+		: "r" (a), "r" (mask)
 		: "t", "memory"
 	);
 

+ 19 - 19
arch/sh/include/asm/cmpxchg-llsc.h

@@ -8,14 +8,14 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
 
 	__asm__ __volatile__ (
 		"1:					\n\t"
-		"movli.l	@%1, %0	! xchg_u32	\n\t"
-		"mov		%0, %2			\n\t"
-		"mov		%4, %0			\n\t"
-		"movco.l	%0, @%1			\n\t"
+		"movli.l	@%2, %0	! xchg_u32	\n\t"
+		"mov		%0, %1			\n\t"
+		"mov		%3, %0			\n\t"
+		"movco.l	%0, @%2			\n\t"
 		"bf		1b			\n\t"
 		"synco					\n\t"
-		: "=&z"(tmp), "=r" (m), "=&r" (retval)
-		: "1" (m), "r" (val)
+		: "=&z"(tmp), "=&r" (retval)
+		: "r" (m), "r" (val)
 		: "t", "memory"
 	);
 
@@ -29,14 +29,14 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
 
 	__asm__ __volatile__ (
 		"1:					\n\t"
-		"movli.l	@%1, %0	! xchg_u8	\n\t"
-		"mov		%0, %2			\n\t"
-		"mov		%4, %0			\n\t"
-		"movco.l	%0, @%1			\n\t"
+		"movli.l	@%2, %0	! xchg_u8	\n\t"
+		"mov		%0, %1			\n\t"
+		"mov		%3, %0			\n\t"
+		"movco.l	%0, @%2			\n\t"
 		"bf		1b			\n\t"
 		"synco					\n\t"
-		: "=&z"(tmp), "=r" (m), "=&r" (retval)
-		: "1" (m), "r" (val & 0xff)
+		: "=&z"(tmp), "=&r" (retval)
+		: "r" (m), "r" (val & 0xff)
 		: "t", "memory"
 	);
 
@@ -51,17 +51,17 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new)
 
 	__asm__ __volatile__ (
 		"1:						\n\t"
-		"movli.l	@%1, %0	! __cmpxchg_u32		\n\t"
-		"mov		%0, %2				\n\t"
-		"cmp/eq		%2, %4				\n\t"
+		"movli.l	@%2, %0	! __cmpxchg_u32		\n\t"
+		"mov		%0, %1				\n\t"
+		"cmp/eq		%1, %3				\n\t"
 		"bf		2f				\n\t"
-		"mov		%5, %0				\n\t"
+		"mov		%3, %0				\n\t"
 		"2:						\n\t"
-		"movco.l	%0, @%1				\n\t"
+		"movco.l	%0, @%2				\n\t"
 		"bf		1b				\n\t"
 		"synco						\n\t"
-		: "=&z" (tmp), "=r" (m), "=&r" (retval)
-		: "1" (m), "r" (old), "r" (new)
+		: "=&z" (tmp), "=&r" (retval)
+		: "r" (m), "r" (old), "r" (new)
 		: "t", "memory"
 	);