Pārlūkot izejas kodu

Merge branch 'v6v7' into devel

Conflicts:
	arch/arm/include/asm/cacheflush.h
	arch/arm/include/asm/proc-fns.h
	arch/arm/mm/Kconfig
Russell King 14 gadi atpakaļ
vecāks
revīzija
bd1274dc00

+ 8 - 6
arch/arm/Kconfig

@@ -7,7 +7,7 @@ config ARM
 	select HAVE_MEMBLOCK
 	select HAVE_MEMBLOCK
 	select RTC_LIB
 	select RTC_LIB
 	select SYS_SUPPORTS_APM_EMULATION
 	select SYS_SUPPORTS_APM_EMULATION
-	select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
+	select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
 	select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
 	select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
 	select HAVE_ARCH_KGDB
 	select HAVE_ARCH_KGDB
 	select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
 	select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
@@ -24,7 +24,7 @@ config ARM
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_EVENTS
 	select PERF_USE_VMALLOC
 	select PERF_USE_VMALLOC
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_REGS_AND_STACK_ACCESS_API
-	select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7))
+	select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_C_RECORDMCOUNT
 	select HAVE_GENERIC_HARDIRQS
 	select HAVE_GENERIC_HARDIRQS
 	select HAVE_SPARSE_IRQ
 	select HAVE_SPARSE_IRQ
@@ -456,6 +456,7 @@ config ARCH_IXP4XX
 
 
 config ARCH_DOVE
 config ARCH_DOVE
 	bool "Marvell Dove"
 	bool "Marvell Dove"
+	select CPU_V6K
 	select PCI
 	select PCI
 	select ARCH_REQUIRE_GPIOLIB
 	select ARCH_REQUIRE_GPIOLIB
 	select GENERIC_CLOCKEVENTS
 	select GENERIC_CLOCKEVENTS
@@ -1059,7 +1060,7 @@ config XSCALE_PMU
 	default y
 	default y
 
 
 config CPU_HAS_PMU
 config CPU_HAS_PMU
-	depends on (CPU_V6 || CPU_V7 || XSCALE_PMU) && \
+	depends on (CPU_V6 || CPU_V6K || CPU_V7 || XSCALE_PMU) && \
 		   (!ARCH_OMAP3 || OMAP3_EMU)
 		   (!ARCH_OMAP3 || OMAP3_EMU)
 	default y
 	default y
 	bool
 	bool
@@ -1075,7 +1076,7 @@ endif
 
 
 config ARM_ERRATA_411920
 config ARM_ERRATA_411920
 	bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
 	bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
-	depends on CPU_V6
+	depends on CPU_V6 || CPU_V6K
 	help
 	help
 	  Invalidation of the Instruction Cache operation can
 	  Invalidation of the Instruction Cache operation can
 	  fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
 	  fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.
@@ -1318,6 +1319,7 @@ source "kernel/time/Kconfig"
 config SMP
 config SMP
 	bool "Symmetric Multi-Processing (EXPERIMENTAL)"
 	bool "Symmetric Multi-Processing (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
 	depends on EXPERIMENTAL
+	depends on CPU_V6K || CPU_V7
 	depends on GENERIC_CLOCKEVENTS
 	depends on GENERIC_CLOCKEVENTS
 	depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
 	depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
 		 MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
 		 MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
@@ -1429,7 +1431,7 @@ config HZ
 
 
 config THUMB2_KERNEL
 config THUMB2_KERNEL
 	bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
 	bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
-	depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL
+	depends on CPU_V7 && !CPU_V6 && !CPU_V6K && EXPERIMENTAL
 	select AEABI
 	select AEABI
 	select ARM_ASM_UNIFIED
 	select ARM_ASM_UNIFIED
 	help
 	help
@@ -1963,7 +1965,7 @@ config FPE_FASTFPE
 
 
 config VFP
 config VFP
 	bool "VFP-format floating point maths"
 	bool "VFP-format floating point maths"
-	depends on CPU_V6 || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
+	depends on CPU_V6 || CPU_V6K || CPU_ARM926T || CPU_V7 || CPU_FEROCEON
 	help
 	help
 	  Say Y to include VFP support code in the kernel. This is needed
 	  Say Y to include VFP support code in the kernel. This is needed
 	  if your hardware includes a VFP unit.
 	  if your hardware includes a VFP unit.

+ 1 - 0
arch/arm/Makefile

@@ -89,6 +89,7 @@ tune-$(CONFIG_CPU_XSCALE)	:=$(call cc-option,-mtune=xscale,-mtune=strongarm110)
 tune-$(CONFIG_CPU_XSC3)		:=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
 tune-$(CONFIG_CPU_XSC3)		:=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
 tune-$(CONFIG_CPU_FEROCEON)	:=$(call cc-option,-mtune=marvell-f,-mtune=xscale)
 tune-$(CONFIG_CPU_FEROCEON)	:=$(call cc-option,-mtune=marvell-f,-mtune=xscale)
 tune-$(CONFIG_CPU_V6)		:=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
 tune-$(CONFIG_CPU_V6)		:=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
+tune-$(CONFIG_CPU_V6K)		:=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
 
 
 ifeq ($(CONFIG_AEABI),y)
 ifeq ($(CONFIG_AEABI),y)
 CFLAGS_ABI	:=-mabi=aapcs-linux -mno-thumb-interwork
 CFLAGS_ABI	:=-mabi=aapcs-linux -mno-thumb-interwork

+ 1 - 1
arch/arm/boot/compressed/head.S

@@ -21,7 +21,7 @@
 
 
 #if defined(CONFIG_DEBUG_ICEDCC)
 #if defined(CONFIG_DEBUG_ICEDCC)
 
 
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 		.macro	loadsp, rb, tmp
 		.macro	loadsp, rb, tmp
 		.endm
 		.endm
 		.macro	writeb, ch, rb
 		.macro	writeb, ch, rb

+ 1 - 1
arch/arm/boot/compressed/misc.c

@@ -36,7 +36,7 @@ extern void error(char *x);
 
 
 #ifdef CONFIG_DEBUG_ICEDCC
 #ifdef CONFIG_DEBUG_ICEDCC
 
 
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 
 
 static void icedcc_putc(int ch)
 static void icedcc_putc(int ch)
 {
 {

+ 22 - 38
arch/arm/include/asm/bitops.h

@@ -148,15 +148,19 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
  * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
  * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
  */
  */
 
 
+/*
+ * Native endian assembly bitops.  nr = 0 -> word 0 bit 0.
+ */
+extern void _set_bit(int nr, volatile unsigned long * p);
+extern void _clear_bit(int nr, volatile unsigned long * p);
+extern void _change_bit(int nr, volatile unsigned long * p);
+extern int _test_and_set_bit(int nr, volatile unsigned long * p);
+extern int _test_and_clear_bit(int nr, volatile unsigned long * p);
+extern int _test_and_change_bit(int nr, volatile unsigned long * p);
+
 /*
 /*
  * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
  * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
  */
  */
-extern void _set_bit_le(int nr, volatile unsigned long * p);
-extern void _clear_bit_le(int nr, volatile unsigned long * p);
-extern void _change_bit_le(int nr, volatile unsigned long * p);
-extern int _test_and_set_bit_le(int nr, volatile unsigned long * p);
-extern int _test_and_clear_bit_le(int nr, volatile unsigned long * p);
-extern int _test_and_change_bit_le(int nr, volatile unsigned long * p);
 extern int _find_first_zero_bit_le(const void * p, unsigned size);
 extern int _find_first_zero_bit_le(const void * p, unsigned size);
 extern int _find_next_zero_bit_le(const void * p, int size, int offset);
 extern int _find_next_zero_bit_le(const void * p, int size, int offset);
 extern int _find_first_bit_le(const unsigned long *p, unsigned size);
 extern int _find_first_bit_le(const unsigned long *p, unsigned size);
@@ -165,12 +169,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
 /*
 /*
  * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
  * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
  */
  */
-extern void _set_bit_be(int nr, volatile unsigned long * p);
-extern void _clear_bit_be(int nr, volatile unsigned long * p);
-extern void _change_bit_be(int nr, volatile unsigned long * p);
-extern int _test_and_set_bit_be(int nr, volatile unsigned long * p);
-extern int _test_and_clear_bit_be(int nr, volatile unsigned long * p);
-extern int _test_and_change_bit_be(int nr, volatile unsigned long * p);
 extern int _find_first_zero_bit_be(const void * p, unsigned size);
 extern int _find_first_zero_bit_be(const void * p, unsigned size);
 extern int _find_next_zero_bit_be(const void * p, int size, int offset);
 extern int _find_next_zero_bit_be(const void * p, int size, int offset);
 extern int _find_first_bit_be(const unsigned long *p, unsigned size);
 extern int _find_first_bit_be(const unsigned long *p, unsigned size);
@@ -180,33 +178,26 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
 /*
 /*
  * The __* form of bitops are non-atomic and may be reordered.
  * The __* form of bitops are non-atomic and may be reordered.
  */
  */
-#define	ATOMIC_BITOP_LE(name,nr,p)		\
-	(__builtin_constant_p(nr) ?		\
-	 ____atomic_##name(nr, p) :		\
-	 _##name##_le(nr,p))
-
-#define	ATOMIC_BITOP_BE(name,nr,p)		\
-	(__builtin_constant_p(nr) ?		\
-	 ____atomic_##name(nr, p) :		\
-	 _##name##_be(nr,p))
+#define ATOMIC_BITOP(name,nr,p)			\
+	(__builtin_constant_p(nr) ? ____atomic_##name(nr, p) : _##name(nr,p))
 #else
 #else
-#define ATOMIC_BITOP_LE(name,nr,p)	_##name##_le(nr,p)
-#define ATOMIC_BITOP_BE(name,nr,p)	_##name##_be(nr,p)
+#define ATOMIC_BITOP(name,nr,p)		_##name(nr,p)
 #endif
 #endif
 
 
-#define NONATOMIC_BITOP(name,nr,p)		\
-	(____nonatomic_##name(nr, p))
+/*
+ * Native endian atomic definitions.
+ */
+#define set_bit(nr,p)			ATOMIC_BITOP(set_bit,nr,p)
+#define clear_bit(nr,p)			ATOMIC_BITOP(clear_bit,nr,p)
+#define change_bit(nr,p)		ATOMIC_BITOP(change_bit,nr,p)
+#define test_and_set_bit(nr,p)		ATOMIC_BITOP(test_and_set_bit,nr,p)
+#define test_and_clear_bit(nr,p)	ATOMIC_BITOP(test_and_clear_bit,nr,p)
+#define test_and_change_bit(nr,p)	ATOMIC_BITOP(test_and_change_bit,nr,p)
 
 
 #ifndef __ARMEB__
 #ifndef __ARMEB__
 /*
 /*
  * These are the little endian, atomic definitions.
  * These are the little endian, atomic definitions.
  */
  */
-#define set_bit(nr,p)			ATOMIC_BITOP_LE(set_bit,nr,p)
-#define clear_bit(nr,p)			ATOMIC_BITOP_LE(clear_bit,nr,p)
-#define change_bit(nr,p)		ATOMIC_BITOP_LE(change_bit,nr,p)
-#define test_and_set_bit(nr,p)		ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
-#define test_and_clear_bit(nr,p)	ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
-#define test_and_change_bit(nr,p)	ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
 #define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)
 #define find_first_zero_bit(p,sz)	_find_first_zero_bit_le(p,sz)
 #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)
 #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_le(p,sz,off)
 #define find_first_bit(p,sz)		_find_first_bit_le(p,sz)
 #define find_first_bit(p,sz)		_find_first_bit_le(p,sz)
@@ -215,16 +206,9 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
 #define WORD_BITOFF_TO_LE(x)		((x))
 #define WORD_BITOFF_TO_LE(x)		((x))
 
 
 #else
 #else
-
 /*
 /*
  * These are the big endian, atomic definitions.
  * These are the big endian, atomic definitions.
  */
  */
-#define set_bit(nr,p)			ATOMIC_BITOP_BE(set_bit,nr,p)
-#define clear_bit(nr,p)			ATOMIC_BITOP_BE(clear_bit,nr,p)
-#define change_bit(nr,p)		ATOMIC_BITOP_BE(change_bit,nr,p)
-#define test_and_set_bit(nr,p)		ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
-#define test_and_clear_bit(nr,p)	ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
-#define test_and_change_bit(nr,p)	ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
 #define find_first_zero_bit(p,sz)	_find_first_zero_bit_be(p,sz)
 #define find_first_zero_bit(p,sz)	_find_first_zero_bit_be(p,sz)
 #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_be(p,sz,off)
 #define find_next_zero_bit(p,sz,off)	_find_next_zero_bit_be(p,sz,off)
 #define find_first_bit(p,sz)		_find_first_bit_be(p,sz)
 #define find_first_bit(p,sz)		_find_first_bit_be(p,sz)

+ 2 - 1
arch/arm/include/asm/cacheflush.h

@@ -187,7 +187,8 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
  * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
  * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
  * will fall through to use __flush_icache_all_generic.
  * will fall through to use __flush_icache_all_generic.
  */
  */
-#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) ||		\
+#if (defined(CONFIG_CPU_V7) && \
+     (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
 	defined(CONFIG_SMP_ON_UP)
 	defined(CONFIG_SMP_ON_UP)
 #define __flush_icache_preferred	__cpuc_flush_icache_all
 #define __flush_icache_preferred	__cpuc_flush_icache_all
 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)

+ 1 - 1
arch/arm/include/asm/glue-cache.h

@@ -109,7 +109,7 @@
 # define MULTI_CACHE 1
 # define MULTI_CACHE 1
 #endif
 #endif
 
 
-#if defined(CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 //# ifdef _CACHE
 //# ifdef _CACHE
 #  define MULTI_CACHE 1
 #  define MULTI_CACHE 1
 //# else
 //# else

+ 1 - 1
arch/arm/include/asm/glue-proc.h

@@ -230,7 +230,7 @@
 # endif
 # endif
 #endif
 #endif
 
 
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 # ifdef CPU_NAME
 # ifdef CPU_NAME
 #  undef  MULTI_CPU
 #  undef  MULTI_CPU
 #  define MULTI_CPU
 #  define MULTI_CPU

+ 41 - 12
arch/arm/include/asm/spinlock.h

@@ -5,17 +5,52 @@
 #error SMP not supported on pre-ARMv6 CPUs
 #error SMP not supported on pre-ARMv6 CPUs
 #endif
 #endif
 
 
+/*
+ * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
+ * extensions, so when running on UP, we have to patch these instructions away.
+ */
+#define ALT_SMP(smp, up)					\
+	"9998:	" smp "\n"					\
+	"	.pushsection \".alt.smp.init\", \"a\"\n"	\
+	"	.long	9998b\n"				\
+	"	" up "\n"					\
+	"	.popsection\n"
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define SEV		ALT_SMP("sev.w", "nop.w")
+/*
+ * For Thumb-2, special care is needed to ensure that the conditional WFE
+ * instruction really does assemble to exactly 4 bytes (as required by
+ * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
+ * assembler to insert a extra (16-bit) IT instruction, depending on the
+ * presence or absence of neighbouring conditional instructions.
+ *
+ * To avoid this unpredictableness, an approprite IT is inserted explicitly:
+ * the assembler won't change IT instructions which are explicitly present
+ * in the input.
+ */
+#define WFE(cond)	ALT_SMP(		\
+	"it " cond "\n\t"			\
+	"wfe" cond ".n",			\
+						\
+	"nop.w"					\
+)
+#else
+#define SEV		ALT_SMP("sev", "nop")
+#define WFE(cond)	ALT_SMP("wfe" cond, "nop")
+#endif
+
 static inline void dsb_sev(void)
 static inline void dsb_sev(void)
 {
 {
 #if __LINUX_ARM_ARCH__ >= 7
 #if __LINUX_ARM_ARCH__ >= 7
 	__asm__ __volatile__ (
 	__asm__ __volatile__ (
 		"dsb\n"
 		"dsb\n"
-		"sev"
+		SEV
 	);
 	);
-#elif defined(CONFIG_CPU_32v6K)
+#else
 	__asm__ __volatile__ (
 	__asm__ __volatile__ (
 		"mcr p15, 0, %0, c7, c10, 4\n"
 		"mcr p15, 0, %0, c7, c10, 4\n"
-		"sev"
+		SEV
 		: : "r" (0)
 		: : "r" (0)
 	);
 	);
 #endif
 #endif
@@ -46,9 +81,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
 	__asm__ __volatile__(
 	__asm__ __volatile__(
 "1:	ldrex	%0, [%1]\n"
 "1:	ldrex	%0, [%1]\n"
 "	teq	%0, #0\n"
 "	teq	%0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-"	wfene\n"
-#endif
+	WFE("ne")
 "	strexeq	%0, %2, [%1]\n"
 "	strexeq	%0, %2, [%1]\n"
 "	teqeq	%0, #0\n"
 "	teqeq	%0, #0\n"
 "	bne	1b"
 "	bne	1b"
@@ -107,9 +140,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
 	__asm__ __volatile__(
 	__asm__ __volatile__(
 "1:	ldrex	%0, [%1]\n"
 "1:	ldrex	%0, [%1]\n"
 "	teq	%0, #0\n"
 "	teq	%0, #0\n"
-#ifdef CONFIG_CPU_32v6K
-"	wfene\n"
-#endif
+	WFE("ne")
 "	strexeq	%0, %2, [%1]\n"
 "	strexeq	%0, %2, [%1]\n"
 "	teq	%0, #0\n"
 "	teq	%0, #0\n"
 "	bne	1b"
 "	bne	1b"
@@ -176,9 +207,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
 "1:	ldrex	%0, [%2]\n"
 "1:	ldrex	%0, [%2]\n"
 "	adds	%0, %0, #1\n"
 "	adds	%0, %0, #1\n"
 "	strexpl	%1, %0, [%2]\n"
 "	strexpl	%1, %0, [%2]\n"
-#ifdef CONFIG_CPU_32v6K
-"	wfemi\n"
-#endif
+	WFE("mi")
 "	rsbpls	%0, %1, #0\n"
 "	rsbpls	%0, %1, #0\n"
 "	bmi	1b"
 "	bmi	1b"
 	: "=&r" (tmp), "=&r" (tmp2)
 	: "=&r" (tmp), "=&r" (tmp2)

+ 9 - 8
arch/arm/include/asm/system.h

@@ -347,6 +347,7 @@ void cpu_idle_wait(void);
 #include <asm-generic/cmpxchg-local.h>
 #include <asm-generic/cmpxchg-local.h>
 
 
 #if __LINUX_ARM_ARCH__ < 6
 #if __LINUX_ARM_ARCH__ < 6
+/* min ARCH < ARMv6 */
 
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_SMP
 #error "SMP is not supported on this platform"
 #error "SMP is not supported on this platform"
@@ -365,7 +366,7 @@ void cpu_idle_wait(void);
 #include <asm-generic/cmpxchg.h>
 #include <asm-generic/cmpxchg.h>
 #endif
 #endif
 
 
-#else	/* __LINUX_ARM_ARCH__ >= 6 */
+#else	/* min ARCH >= ARMv6 */
 
 
 extern void __bad_cmpxchg(volatile void *ptr, int size);
 extern void __bad_cmpxchg(volatile void *ptr, int size);
 
 
@@ -379,7 +380,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 	unsigned long oldval, res;
 	unsigned long oldval, res;
 
 
 	switch (size) {
 	switch (size) {
-#ifdef CONFIG_CPU_32v6K
+#ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
 	case 1:
 	case 1:
 		do {
 		do {
 			asm volatile("@ __cmpxchg1\n"
 			asm volatile("@ __cmpxchg1\n"
@@ -404,7 +405,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
 				: "memory", "cc");
 				: "memory", "cc");
 		} while (res);
 		} while (res);
 		break;
 		break;
-#endif /* CONFIG_CPU_32v6K */
+#endif
 	case 4:
 	case 4:
 		do {
 		do {
 			asm volatile("@ __cmpxchg4\n"
 			asm volatile("@ __cmpxchg4\n"
@@ -450,12 +451,12 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
 	unsigned long ret;
 	unsigned long ret;
 
 
 	switch (size) {
 	switch (size) {
-#ifndef CONFIG_CPU_32v6K
+#ifdef CONFIG_CPU_V6	/* min ARCH == ARMv6 */
 	case 1:
 	case 1:
 	case 2:
 	case 2:
 		ret = __cmpxchg_local_generic(ptr, old, new, size);
 		ret = __cmpxchg_local_generic(ptr, old, new, size);
 		break;
 		break;
-#endif	/* !CONFIG_CPU_32v6K */
+#endif
 	default:
 	default:
 		ret = __cmpxchg(ptr, old, new, size);
 		ret = __cmpxchg(ptr, old, new, size);
 	}
 	}
@@ -469,7 +470,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
 				       (unsigned long)(n),		\
 				       (unsigned long)(n),		\
 				       sizeof(*(ptr))))
 				       sizeof(*(ptr))))
 
 
-#ifdef CONFIG_CPU_32v6K
+#ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
 
 
 /*
 /*
  * Note : ARMv7-M (currently unsupported by Linux) does not support
  * Note : ARMv7-M (currently unsupported by Linux) does not support
@@ -524,11 +525,11 @@ static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
 					 (unsigned long long)(o),	\
 					 (unsigned long long)(o),	\
 					 (unsigned long long)(n)))
 					 (unsigned long long)(n)))
 
 
-#else	/* !CONFIG_CPU_32v6K */
+#else /* min ARCH = ARMv6 */
 
 
 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 
 
-#endif	/* CONFIG_CPU_32v6K */
+#endif
 
 
 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
 #endif	/* __LINUX_ARM_ARCH__ >= 6 */
 
 

+ 5 - 6
arch/arm/include/asm/tls.h

@@ -28,15 +28,14 @@
 #define tls_emu		1
 #define tls_emu		1
 #define has_tls_reg		1
 #define has_tls_reg		1
 #define set_tls		set_tls_none
 #define set_tls		set_tls_none
-#elif __LINUX_ARM_ARCH__ >= 7 ||					\
-	(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
-#define tls_emu		0
-#define has_tls_reg		1
-#define set_tls		set_tls_v6k
-#elif __LINUX_ARM_ARCH__ == 6
+#elif defined(CONFIG_CPU_V6)
 #define tls_emu		0
 #define tls_emu		0
 #define has_tls_reg		(elf_hwcap & HWCAP_TLS)
 #define has_tls_reg		(elf_hwcap & HWCAP_TLS)
 #define set_tls		set_tls_v6
 #define set_tls		set_tls_v6
+#elif defined(CONFIG_CPU_32v6K)
+#define tls_emu		0
+#define has_tls_reg		1
+#define set_tls		set_tls_v6k
 #else
 #else
 #define tls_emu		0
 #define tls_emu		0
 #define has_tls_reg		0
 #define has_tls_reg		0

+ 6 - 12
arch/arm/kernel/armksyms.c

@@ -140,24 +140,18 @@ EXPORT_SYMBOL(__aeabi_ulcmp);
 #endif
 #endif
 
 
 	/* bitops */
 	/* bitops */
-EXPORT_SYMBOL(_set_bit_le);
-EXPORT_SYMBOL(_test_and_set_bit_le);
-EXPORT_SYMBOL(_clear_bit_le);
-EXPORT_SYMBOL(_test_and_clear_bit_le);
-EXPORT_SYMBOL(_change_bit_le);
-EXPORT_SYMBOL(_test_and_change_bit_le);
+EXPORT_SYMBOL(_set_bit);
+EXPORT_SYMBOL(_test_and_set_bit);
+EXPORT_SYMBOL(_clear_bit);
+EXPORT_SYMBOL(_test_and_clear_bit);
+EXPORT_SYMBOL(_change_bit);
+EXPORT_SYMBOL(_test_and_change_bit);
 EXPORT_SYMBOL(_find_first_zero_bit_le);
 EXPORT_SYMBOL(_find_first_zero_bit_le);
 EXPORT_SYMBOL(_find_next_zero_bit_le);
 EXPORT_SYMBOL(_find_next_zero_bit_le);
 EXPORT_SYMBOL(_find_first_bit_le);
 EXPORT_SYMBOL(_find_first_bit_le);
 EXPORT_SYMBOL(_find_next_bit_le);
 EXPORT_SYMBOL(_find_next_bit_le);
 
 
 #ifdef __ARMEB__
 #ifdef __ARMEB__
-EXPORT_SYMBOL(_set_bit_be);
-EXPORT_SYMBOL(_test_and_set_bit_be);
-EXPORT_SYMBOL(_clear_bit_be);
-EXPORT_SYMBOL(_test_and_clear_bit_be);
-EXPORT_SYMBOL(_change_bit_be);
-EXPORT_SYMBOL(_test_and_change_bit_be);
 EXPORT_SYMBOL(_find_first_zero_bit_be);
 EXPORT_SYMBOL(_find_first_zero_bit_be);
 EXPORT_SYMBOL(_find_next_zero_bit_be);
 EXPORT_SYMBOL(_find_next_zero_bit_be);
 EXPORT_SYMBOL(_find_first_bit_be);
 EXPORT_SYMBOL(_find_first_bit_be);

+ 1 - 1
arch/arm/kernel/debug.S

@@ -25,7 +25,7 @@
 		.macro	addruart, rp, rv
 		.macro	addruart, rp, rv
 		.endm
 		.endm
 
 
-#if defined(CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 
 
 		.macro	senduart, rd, rx
 		.macro	senduart, rd, rx
 		mcr	p14, 0, \rd, c0, c5, 0
 		mcr	p14, 0, \rd, c0, c5, 0

+ 7 - 7
arch/arm/kernel/entry-header.S

@@ -76,13 +76,13 @@
 #ifndef CONFIG_THUMB2_KERNEL
 #ifndef CONFIG_THUMB2_KERNEL
 	.macro	svc_exit, rpsr
 	.macro	svc_exit, rpsr
 	msr	spsr_cxsf, \rpsr
 	msr	spsr_cxsf, \rpsr
-#if defined(CONFIG_CPU_32v6K)
-	clrex					@ clear the exclusive monitor
-	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
-#elif defined (CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6)
 	ldr	r0, [sp]
 	ldr	r0, [sp]
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
 	ldmib	sp, {r1 - pc}^			@ load r1 - pc, cpsr
 	ldmib	sp, {r1 - pc}^			@ load r1 - pc, cpsr
+#elif defined(CONFIG_CPU_32v6K)
+	clrex					@ clear the exclusive monitor
+	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 #else
 #else
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 #endif
 #endif
@@ -92,10 +92,10 @@
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
 	ldr	r1, [sp, #\offset + S_PSR]	@ get calling cpsr
 	ldr	lr, [sp, #\offset + S_PC]!	@ get pc
 	ldr	lr, [sp, #\offset + S_PC]!	@ get pc
 	msr	spsr_cxsf, r1			@ save in spsr_svc
 	msr	spsr_cxsf, r1			@ save in spsr_svc
-#if defined(CONFIG_CPU_32v6K)
-	clrex					@ clear the exclusive monitor
-#elif defined (CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_V6)
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
+#elif defined(CONFIG_CPU_32v6K)
+	clrex					@ clear the exclusive monitor
 #endif
 #endif
 	.if	\fast
 	.if	\fast
 	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr
 	ldmdb	sp, {r1 - lr}^			@ get calling r1 - lr

+ 2 - 2
arch/arm/kernel/perf_event_v6.c

@@ -30,7 +30,7 @@
  * enable the interrupt.
  * enable the interrupt.
  */
  */
 
 
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 enum armv6_perf_types {
 enum armv6_perf_types {
 	ARMV6_PERFCTR_ICACHE_MISS	    = 0x0,
 	ARMV6_PERFCTR_ICACHE_MISS	    = 0x0,
 	ARMV6_PERFCTR_IBUF_STALL	    = 0x1,
 	ARMV6_PERFCTR_IBUF_STALL	    = 0x1,
@@ -669,4 +669,4 @@ static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
 {
 {
 	return NULL;
 	return NULL;
 }
 }
-#endif	/* CONFIG_CPU_V6 */
+#endif	/* CONFIG_CPU_V6 || CONFIG_CPU_V6K */

+ 30 - 20
arch/arm/lib/bitops.h

@@ -1,44 +1,52 @@
-
-#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_32v6K)
+#if __LINUX_ARM_ARCH__ >= 6
 	.macro	bitop, instr
 	.macro	bitop, instr
+	ands	ip, r1, #3
+	strneb	r1, [ip]		@ assert word-aligned
 	mov	r2, #1
 	mov	r2, #1
-	and	r3, r0, #7		@ Get bit offset
-	add	r1, r1, r0, lsr #3	@ Get byte offset
+	and	r3, r0, #31		@ Get bit offset
+	mov	r0, r0, lsr #5
+	add	r1, r1, r0, lsl #2	@ Get word offset
 	mov	r3, r2, lsl r3
 	mov	r3, r2, lsl r3
-1:	ldrexb	r2, [r1]
+1:	ldrex	r2, [r1]
 	\instr	r2, r2, r3
 	\instr	r2, r2, r3
-	strexb	r0, r2, [r1]
+	strex	r0, r2, [r1]
 	cmp	r0, #0
 	cmp	r0, #0
 	bne	1b
 	bne	1b
-	mov	pc, lr
+	bx	lr
 	.endm
 	.endm
 
 
 	.macro	testop, instr, store
 	.macro	testop, instr, store
-	and	r3, r0, #7		@ Get bit offset
+	ands	ip, r1, #3
+	strneb	r1, [ip]		@ assert word-aligned
 	mov	r2, #1
 	mov	r2, #1
-	add	r1, r1, r0, lsr #3	@ Get byte offset
+	and	r3, r0, #31		@ Get bit offset
+	mov	r0, r0, lsr #5
+	add	r1, r1, r0, lsl #2	@ Get word offset
 	mov	r3, r2, lsl r3		@ create mask
 	mov	r3, r2, lsl r3		@ create mask
 	smp_dmb
 	smp_dmb
-1:	ldrexb	r2, [r1]
+1:	ldrex	r2, [r1]
 	ands	r0, r2, r3		@ save old value of bit
 	ands	r0, r2, r3		@ save old value of bit
-	\instr	r2, r2, r3			@ toggle bit
-	strexb	ip, r2, [r1]
+	\instr	r2, r2, r3		@ toggle bit
+	strex	ip, r2, [r1]
 	cmp	ip, #0
 	cmp	ip, #0
 	bne	1b
 	bne	1b
 	smp_dmb
 	smp_dmb
 	cmp	r0, #0
 	cmp	r0, #0
 	movne	r0, #1
 	movne	r0, #1
-2:	mov	pc, lr
+2:	bx	lr
 	.endm
 	.endm
 #else
 #else
 	.macro	bitop, instr
 	.macro	bitop, instr
-	and	r2, r0, #7
+	ands	ip, r1, #3
+	strneb	r1, [ip]		@ assert word-aligned
+	and	r2, r0, #31
+	mov	r0, r0, lsr #5
 	mov	r3, #1
 	mov	r3, #1
 	mov	r3, r3, lsl r2
 	mov	r3, r3, lsl r2
 	save_and_disable_irqs ip
 	save_and_disable_irqs ip
-	ldrb	r2, [r1, r0, lsr #3]
+	ldr	r2, [r1, r0, lsl #2]
 	\instr	r2, r2, r3
 	\instr	r2, r2, r3
-	strb	r2, [r1, r0, lsr #3]
+	str	r2, [r1, r0, lsl #2]
 	restore_irqs ip
 	restore_irqs ip
 	mov	pc, lr
 	mov	pc, lr
 	.endm
 	.endm
@@ -52,11 +60,13 @@
  * to avoid dirtying the data cache.
  * to avoid dirtying the data cache.
  */
  */
 	.macro	testop, instr, store
 	.macro	testop, instr, store
-	add	r1, r1, r0, lsr #3
-	and	r3, r0, #7
-	mov	r0, #1
+	ands	ip, r1, #3
+	strneb	r1, [ip]		@ assert word-aligned
+	and	r3, r0, #31
+	mov	r0, r0, lsr #5
 	save_and_disable_irqs ip
 	save_and_disable_irqs ip
-	ldrb	r2, [r1]
+	ldr	r2, [r1, r0, lsl #2]!
+	mov	r0, #1
 	tst	r2, r0, lsl r3
 	tst	r2, r0, lsl r3
 	\instr	r2, r2, r0, lsl r3
 	\instr	r2, r2, r0, lsl r3
 	\store	r2, [r1]
 	\store	r2, [r1]

+ 2 - 8
arch/arm/lib/changebit.S

@@ -12,12 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-/* Purpose  : Function to change a bit
- * Prototype: int change_bit(int bit, void *addr)
- */
-ENTRY(_change_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_change_bit_le)
+ENTRY(_change_bit)
 	bitop	eor
 	bitop	eor
-ENDPROC(_change_bit_be)
-ENDPROC(_change_bit_le)
+ENDPROC(_change_bit)

+ 2 - 9
arch/arm/lib/clearbit.S

@@ -12,13 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-/*
- * Purpose  : Function to clear a bit
- * Prototype: int clear_bit(int bit, void *addr)
- */
-ENTRY(_clear_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_clear_bit_le)
+ENTRY(_clear_bit)
 	bitop	bic
 	bitop	bic
-ENDPROC(_clear_bit_be)
-ENDPROC(_clear_bit_le)
+ENDPROC(_clear_bit)

+ 2 - 9
arch/arm/lib/setbit.S

@@ -12,13 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
 		.text
 		.text
 
 
-/*
- * Purpose  : Function to set a bit
- * Prototype: int set_bit(int bit, void *addr)
- */
-ENTRY(_set_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_set_bit_le)
+ENTRY(_set_bit)
 	bitop	orr
 	bitop	orr
-ENDPROC(_set_bit_be)
-ENDPROC(_set_bit_le)
+ENDPROC(_set_bit)

+ 3 - 6
arch/arm/lib/testchangebit.S

@@ -12,9 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-ENTRY(_test_and_change_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_test_and_change_bit_le)
-	testop	eor, strb
-ENDPROC(_test_and_change_bit_be)
-ENDPROC(_test_and_change_bit_le)
+ENTRY(_test_and_change_bit)
+	testop	eor, str
+ENDPROC(_test_and_change_bit)

+ 3 - 6
arch/arm/lib/testclearbit.S

@@ -12,9 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-ENTRY(_test_and_clear_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_test_and_clear_bit_le)
-	testop	bicne, strneb
-ENDPROC(_test_and_clear_bit_be)
-ENDPROC(_test_and_clear_bit_le)
+ENTRY(_test_and_clear_bit)
+	testop	bicne, strne
+ENDPROC(_test_and_clear_bit)

+ 3 - 6
arch/arm/lib/testsetbit.S

@@ -12,9 +12,6 @@
 #include "bitops.h"
 #include "bitops.h"
                 .text
                 .text
 
 
-ENTRY(_test_and_set_bit_be)
-		eor	r0, r0, #0x18		@ big endian byte ordering
-ENTRY(_test_and_set_bit_le)
-	testop	orreq, streqb
-ENDPROC(_test_and_set_bit_be)
-ENDPROC(_test_and_set_bit_le)
+ENTRY(_test_and_set_bit)
+	testop	orreq, streq
+ENDPROC(_test_and_set_bit)

+ 1 - 1
arch/arm/mach-dove/Kconfig

@@ -9,7 +9,7 @@ config MACH_DOVE_DB
 	  Say 'Y' here if you want your kernel to support the
 	  Say 'Y' here if you want your kernel to support the
 	  Marvell DB-MV88AP510 Development Board.
 	  Marvell DB-MV88AP510 Development Board.
 
 
- config MACH_CM_A510
+config MACH_CM_A510
 	bool "CompuLab CM-A510 Board"
 	bool "CompuLab CM-A510 Board"
 	help
 	help
 	  Say 'Y' here if you want your kernel to support the
 	  Say 'Y' here if you want your kernel to support the

+ 3 - 2
arch/arm/mach-realview/Kconfig

@@ -19,7 +19,7 @@ config REALVIEW_EB_A9MP
 config REALVIEW_EB_ARM11MP
 config REALVIEW_EB_ARM11MP
 	bool "Support ARM11MPCore Tile"
 	bool "Support ARM11MPCore Tile"
 	depends on MACH_REALVIEW_EB
 	depends on MACH_REALVIEW_EB
-	select CPU_V6
+	select CPU_V6K
 	select ARCH_HAS_BARRIERS if SMP
 	select ARCH_HAS_BARRIERS if SMP
 	help
 	help
 	  Enable support for the ARM11MPCore tile fitted to the Realview(R)
 	  Enable support for the ARM11MPCore tile fitted to the Realview(R)
@@ -36,7 +36,7 @@ config REALVIEW_EB_ARM11MP_REVB
 
 
 config MACH_REALVIEW_PB11MP
 config MACH_REALVIEW_PB11MP
 	bool "Support RealView(R) Platform Baseboard for ARM11MPCore"
 	bool "Support RealView(R) Platform Baseboard for ARM11MPCore"
-	select CPU_V6
+	select CPU_V6K
 	select ARM_GIC
 	select ARM_GIC
 	select HAVE_PATA_PLATFORM
 	select HAVE_PATA_PLATFORM
 	select ARCH_HAS_BARRIERS if SMP
 	select ARCH_HAS_BARRIERS if SMP
@@ -45,6 +45,7 @@ config MACH_REALVIEW_PB11MP
 	  the ARM11MPCore.  This platform has an on-board ARM11MPCore and has
 	  the ARM11MPCore.  This platform has an on-board ARM11MPCore and has
 	  support for PCI-E and Compact Flash.
 	  support for PCI-E and Compact Flash.
 
 
+# ARMv6 CPU without K extensions, but does have the new exclusive ops
 config MACH_REALVIEW_PB1176
 config MACH_REALVIEW_PB1176
 	bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
 	bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
 	select CPU_V6
 	select CPU_V6

+ 29 - 21
arch/arm/mm/Kconfig

@@ -390,7 +390,7 @@ config CPU_PJ4
 
 
 # ARMv6
 # ARMv6
 config CPU_V6
 config CPU_V6
-	bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE
+	bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
 	select CPU_32v6
 	select CPU_32v6
 	select CPU_ABRT_EV6
 	select CPU_ABRT_EV6
 	select CPU_PABRT_V6
 	select CPU_PABRT_V6
@@ -402,16 +402,18 @@ config CPU_V6
 	select CPU_TLB_V6 if MMU
 	select CPU_TLB_V6 if MMU
 
 
 # ARMv6k
 # ARMv6k
-config CPU_32v6K
-	bool "Support ARM V6K processor extensions" if !SMP
-	depends on CPU_V6 || CPU_V7
-	default y if SMP
-	help
-	  Say Y here if your ARMv6 processor supports the 'K' extension.
-	  This enables the kernel to use some instructions not present
-	  on previous processors, and as such a kernel build with this
-	  enabled will not boot on processors with do not support these
-	  instructions.
+config CPU_V6K
+	bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
+	select CPU_32v6
+	select CPU_32v6K
+	select CPU_ABRT_EV6
+	select CPU_PABRT_V6
+	select CPU_CACHE_V6
+	select CPU_CACHE_VIPT
+	select CPU_CP15_MMU
+	select CPU_HAS_ASID if MMU
+	select CPU_COPY_V6 if MMU
+	select CPU_TLB_V6 if MMU
 
 
 # ARMv7
 # ARMv7
 config CPU_V7
 config CPU_V7
@@ -433,25 +435,33 @@ config CPU_32v3
 	bool
 	bool
 	select TLS_REG_EMUL if SMP || !MMU
 	select TLS_REG_EMUL if SMP || !MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+	select CPU_USE_DOMAINS if MMU
 
 
 config CPU_32v4
 config CPU_32v4
 	bool
 	bool
 	select TLS_REG_EMUL if SMP || !MMU
 	select TLS_REG_EMUL if SMP || !MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+	select CPU_USE_DOMAINS if MMU
 
 
 config CPU_32v4T
 config CPU_32v4T
 	bool
 	bool
 	select TLS_REG_EMUL if SMP || !MMU
 	select TLS_REG_EMUL if SMP || !MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+	select CPU_USE_DOMAINS if MMU
 
 
 config CPU_32v5
 config CPU_32v5
 	bool
 	bool
 	select TLS_REG_EMUL if SMP || !MMU
 	select TLS_REG_EMUL if SMP || !MMU
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
 	select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
+	select CPU_USE_DOMAINS if MMU
 
 
 config CPU_32v6
 config CPU_32v6
 	bool
 	bool
 	select TLS_REG_EMUL if !CPU_32v6K && !MMU
 	select TLS_REG_EMUL if !CPU_32v6K && !MMU
+	select CPU_USE_DOMAINS if CPU_V6 && MMU
+
+config CPU_32v6K
+	bool
 
 
 config CPU_32v7
 config CPU_32v7
 	bool
 	bool
@@ -607,8 +617,6 @@ config CPU_CP15_MPU
 
 
 config CPU_USE_DOMAINS
 config CPU_USE_DOMAINS
 	bool
 	bool
-	depends on MMU
-	default y if !CPU_32v6K
 	help
 	help
 	  This option enables or disables the use of domain switching
 	  This option enables or disables the use of domain switching
 	  via the set_fs() function.
 	  via the set_fs() function.
@@ -623,7 +631,7 @@ comment "Processor Features"
 
 
 config ARM_THUMB
 config ARM_THUMB
 	bool "Support Thumb user binaries"
 	bool "Support Thumb user binaries"
-	depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V7 || CPU_FEROCEON
+	depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
 	default y
 	default y
 	help
 	help
 	  Say Y if you want to include kernel support for running user space
 	  Say Y if you want to include kernel support for running user space
@@ -644,7 +652,7 @@ config ARM_THUMBEE
 
 
 config SWP_EMULATE
 config SWP_EMULATE
 	bool "Emulate SWP/SWPB instructions"
 	bool "Emulate SWP/SWPB instructions"
-	depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
+	depends on !CPU_USE_DOMAINS && CPU_V7
 	select HAVE_PROC_CPU if PROC_FS
 	select HAVE_PROC_CPU if PROC_FS
 	default y if SMP
 	default y if SMP
 	help
 	help
@@ -681,7 +689,7 @@ config CPU_BIG_ENDIAN
 config CPU_ENDIAN_BE8
 config CPU_ENDIAN_BE8
 	bool
 	bool
 	depends on CPU_BIG_ENDIAN
 	depends on CPU_BIG_ENDIAN
-	default CPU_V6 || CPU_V7
+	default CPU_V6 || CPU_V6K || CPU_V7
 	help
 	help
 	  Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors.
 	  Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors.
 
 
@@ -747,7 +755,7 @@ config CPU_CACHE_ROUND_ROBIN
 
 
 config CPU_BPREDICT_DISABLE
 config CPU_BPREDICT_DISABLE
 	bool "Disable branch prediction"
 	bool "Disable branch prediction"
-	depends on CPU_ARM1020 || CPU_V6 || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526
+	depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526
 	help
 	help
 	  Say Y here to disable branch prediction.  If unsure, say N.
 	  Say Y here to disable branch prediction.  If unsure, say N.
 
 
@@ -767,7 +775,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
 
 
 config DMA_CACHE_RWFO
 config DMA_CACHE_RWFO
 	bool "Enable read/write for ownership DMA cache maintenance"
 	bool "Enable read/write for ownership DMA cache maintenance"
-	depends on CPU_V6 && SMP
+	depends on CPU_V6K && SMP
 	default y
 	default y
 	help
 	help
 	  The Snoop Control Unit on ARM11MPCore does not detect the
 	  The Snoop Control Unit on ARM11MPCore does not detect the
@@ -823,7 +831,7 @@ config CACHE_L2X0
 config CACHE_PL310
 config CACHE_PL310
 	bool
 	bool
 	depends on CACHE_L2X0
 	depends on CACHE_L2X0
-	default y if CPU_V7 && !CPU_V6
+	default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
 	help
 	help
 	  This option enables optimisations for the PL310 cache
 	  This option enables optimisations for the PL310 cache
 	  controller.
 	  controller.
@@ -856,10 +864,10 @@ config ARM_L1_CACHE_SHIFT
 	default 5
 	default 5
 
 
 config ARM_DMA_MEM_BUFFERABLE
 config ARM_DMA_MEM_BUFFERABLE
-	bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7
+	bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7
 	depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \
 	depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \
 		     MACH_REALVIEW_PB11MP)
 		     MACH_REALVIEW_PB11MP)
-	default y if CPU_V6 || CPU_V7
+	default y if CPU_V6 || CPU_V6K || CPU_V7
 	help
 	help
 	  Historically, the kernel has used strongly ordered mappings to
 	  Historically, the kernel has used strongly ordered mappings to
 	  provide DMA coherent memory.  With the advent of ARMv7, mapping
 	  provide DMA coherent memory.  With the advent of ARMv7, mapping

+ 1 - 0
arch/arm/mm/Makefile

@@ -90,6 +90,7 @@ obj-$(CONFIG_CPU_XSC3)		+= proc-xsc3.o
 obj-$(CONFIG_CPU_MOHAWK)	+= proc-mohawk.o
 obj-$(CONFIG_CPU_MOHAWK)	+= proc-mohawk.o
 obj-$(CONFIG_CPU_FEROCEON)	+= proc-feroceon.o
 obj-$(CONFIG_CPU_FEROCEON)	+= proc-feroceon.o
 obj-$(CONFIG_CPU_V6)		+= proc-v6.o
 obj-$(CONFIG_CPU_V6)		+= proc-v6.o
+obj-$(CONFIG_CPU_V6K)		+= proc-v6.o
 obj-$(CONFIG_CPU_V7)		+= proc-v7.o
 obj-$(CONFIG_CPU_V7)		+= proc-v7.o
 
 
 AFLAGS_proc-v6.o	:=-Wa,-march=armv6
 AFLAGS_proc-v6.o	:=-Wa,-march=armv6

+ 3 - 3
arch/arm/mm/abort-ev6.S

@@ -20,11 +20,11 @@
  */
  */
 	.align	5
 	.align	5
 ENTRY(v6_early_abort)
 ENTRY(v6_early_abort)
-#ifdef CONFIG_CPU_32v6K
-	clrex
-#else
+#ifdef CONFIG_CPU_V6
 	sub	r1, sp, #4			@ Get unused stack location
 	sub	r1, sp, #4			@ Get unused stack location
 	strex	r0, r1, [r1]			@ Clear the exclusive monitor
 	strex	r0, r1, [r1]			@ Clear the exclusive monitor
+#elif defined(CONFIG_CPU_32v6K)
+	clrex
 #endif
 #endif
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR

+ 1 - 1
arch/arm/mm/mmap.c

@@ -31,7 +31,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
 	struct mm_struct *mm = current->mm;
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	struct vm_area_struct *vma;
 	unsigned long start_addr;
 	unsigned long start_addr;
-#ifdef CONFIG_CPU_V6
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
 	unsigned int cache_type;
 	unsigned int cache_type;
 	int do_align = 0, aliasing = 0;
 	int do_align = 0, aliasing = 0;