Selaa lähdekoodia

sh: Provide movli.l/movco.l-based bitops.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Paul Mundt 16 vuotta sitten
vanhempi
commit
742fd1bcfb
2 muutettua tiedostoa jossa 146 lisäystä ja 0 poistoa
  1. 144 0
      arch/sh/include/asm/bitops-llsc.h
  2. 2 0
      arch/sh/include/asm/bitops.h

+ 144 - 0
arch/sh/include/asm/bitops-llsc.h

@@ -0,0 +1,144 @@
+#ifndef __ASM_SH_BITOPS_LLSC_H
+#define __ASM_SH_BITOPS_LLSC_H
+
+static inline void set_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! set_bit		\n\t"
+		"or		%3, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		: "=&z" (tmp), "=r" (a)
+		: "1" (a), "r" (mask)
+		: "t", "memory"
+	);
+}
+
+static inline void clear_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! clear_bit		\n\t"
+		"and		%3, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		: "=&z" (tmp), "=r" (a)
+		: "1" (a), "r" (~mask)
+		: "t", "memory"
+	);
+}
+
+static inline void change_bit(int nr, volatile void * addr)
+{
+	int	mask;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! change_bit		\n\t"
+		"xor		%3, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		: "=&z" (tmp), "=r" (a)
+		: "1" (a), "r" (mask)
+		: "t", "memory"
+	);
+}
+
+static inline int test_and_set_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! test_and_set_bit	\n\t"
+		"mov		%0, %2				\n\t"
+		"or		%4, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		"and		%4, %2				\n\t"
+		: "=&z" (tmp), "=r" (a), "=&r" (retval)
+		: "1" (a), "r" (mask)
+		: "t", "memory"
+	);
+
+	return retval != 0;
+}
+
+static inline int test_and_clear_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! test_and_clear_bit	\n\t"
+		"mov		%0, %2				\n\t"
+		"and		%5, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		"and		%4, %2				\n\t"
+		"synco						\n\t"
+		: "=&z" (tmp), "=r" (a), "=&r" (retval)
+		: "1" (a), "r" (mask), "r" (~mask)
+		: "t", "memory"
+	);
+
+	return retval != 0;
+}
+
+static inline int test_and_change_bit(int nr, volatile void * addr)
+{
+	int	mask, retval;
+	volatile unsigned int *a = addr;
+	unsigned long tmp;
+
+	a += nr >> 5;
+	mask = 1 << (nr & 0x1f);
+
+	__asm__ __volatile__ (
+		"1:						\n\t"
+		"movli.l	@%1, %0	! test_and_change_bit	\n\t"
+		"mov		%0, %2				\n\t"
+		"xor		%4, %0				\n\t"
+		"movco.l	%0, @%1				\n\t"
+		"bf		1b				\n\t"
+		"and		%4, %2				\n\t"
+		"synco						\n\t"
+		: "=&z" (tmp), "=r" (a), "=&r" (retval)
+		: "1" (a), "r" (mask)
+		: "t", "memory"
+	);
+
+	return retval != 0;
+}
+
+#endif /* __ASM_SH_BITOPS_LLSC_H */

+ 2 - 0
arch/sh/include/asm/bitops.h

@@ -13,6 +13,8 @@
 
 #ifdef CONFIG_GUSA_RB
 #include <asm/bitops-grb.h>
+#elif defined(CONFIG_CPU_SH4A)
+#include <asm/bitops-llsc.h>
 #else
 #include <asm/bitops-irq.h>
 #endif