浏览代码

Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm

Pull ARM fixes from Russell King:
 "It's been a while...  so there's a little more here than normal.

  Mostly updates from Will for the breakpoint stuff, and plugging a few
  holes in the user access functions which crept in when domain support
  was disabled for ARMv7 CPUs."

* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
  ARM: 7529/1: delay: set loops_per_jiffy when moving to timer-based loop
  ARM: 7528/1: uaccess: annotate [__]{get,put}_user functions with might_fault()
  ARM: 7527/1: uaccess: explicitly check __user pointer when !CPU_USE_DOMAINS
  ARM: 7526/1: traps: send SIGILL if get_user fails on undef handling path
  ARM: 7521/1: Fix semihosting Kconfig text
  ARM: 7513/1: Make sure dtc is built before running it
  ARM: 7512/1: Fix XIP build due to PHYS_OFFSET definition moving
  ARM: 7499/1: mm: Fix vmalloc overlap check for !HIGHMEM
  ARM: 7503/1: mm: only flush both pmd entries for classic MMU
  ARM: 7502/1: contextidr: avoid using bfi instruction during notifier
  ARM: 7501/1: decompressor: reset ttbcr for VMSA ARMv7 cores
  ARM: 7497/1: hw_breakpoint: allow single-byte watchpoints on all addresses
  ARM: 7496/1: hw_breakpoint: don't rely on dfsr to show watchpoint access type
  ARM: Fix ioremap() of address zero
Linus Torvalds 12 年之前
父节点
当前提交
8507876aaa

+ 3 - 3
arch/arm/Kconfig.debug

@@ -356,15 +356,15 @@ choice
 		  is nothing connected to read from the DCC.
 		  is nothing connected to read from the DCC.
 
 
 	config DEBUG_SEMIHOSTING
 	config DEBUG_SEMIHOSTING
-		bool "Kernel low-level debug output via semihosting I"
+		bool "Kernel low-level debug output via semihosting I/O"
 		help
 		help
 		  Semihosting enables code running on an ARM target to use
 		  Semihosting enables code running on an ARM target to use
 		  the I/O facilities on a host debugger/emulator through a
 		  the I/O facilities on a host debugger/emulator through a
-		  simple SVC calls. The host debugger or emulator must have
+		  simple SVC call. The host debugger or emulator must have
 		  semihosting enabled for the special svc call to be trapped
 		  semihosting enabled for the special svc call to be trapped
 		  otherwise the kernel will crash.
 		  otherwise the kernel will crash.
 
 
-		  This is known to work with OpenOCD, as wellas
+		  This is known to work with OpenOCD, as well as
 		  ARM's Fast Models, or any other controlling environment
 		  ARM's Fast Models, or any other controlling environment
 		  that implements semihosting.
 		  that implements semihosting.
 
 

+ 2 - 2
arch/arm/Makefile

@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
 zinstall uinstall install: vmlinux
 zinstall uinstall install: vmlinux
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 
 
-%.dtb:
+%.dtb: scripts
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
 
-dtbs:
+dtbs: scripts
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
 
 # We use MRPROPER_FILES and CLEAN_FILES now
 # We use MRPROPER_FILES and CLEAN_FILES now

+ 4 - 0
arch/arm/boot/compressed/head.S

@@ -659,10 +659,14 @@ __armv7_mmu_cache_on:
 #ifdef CONFIG_CPU_ENDIAN_BE8
 #ifdef CONFIG_CPU_ENDIAN_BE8
 		orr	r0, r0, #1 << 25	@ big-endian page tables
 		orr	r0, r0, #1 << 25	@ big-endian page tables
 #endif
 #endif
+		mrcne   p15, 0, r6, c2, c0, 2   @ read ttb control reg
 		orrne	r0, r0, #1		@ MMU enabled
 		orrne	r0, r0, #1		@ MMU enabled
 		movne	r1, #0xfffffffd		@ domain 0 = client
 		movne	r1, #0xfffffffd		@ domain 0 = client
+		bic     r6, r6, #1 << 31        @ 32-bit translation system
+		bic     r6, r6, #3 << 0         @ use only ttbr0
 		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer
 		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer
 		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control
 		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control
+		mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
 #endif
 #endif
 		mcr	p15, 0, r0, c7, c5, 4	@ ISB
 		mcr	p15, 0, r0, c7, c5, 4	@ ISB
 		mcr	p15, 0, r0, c1, c0, 0	@ load control register
 		mcr	p15, 0, r0, c1, c0, 0	@ load control register

+ 8 - 0
arch/arm/include/asm/assembler.h

@@ -320,4 +320,12 @@
 	.size \name , . - \name
 	.size \name , . - \name
 	.endm
 	.endm
 
 
+	.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+	adds	\tmp, \addr, #\size - 1
+	sbcccs	\tmp, \tmp, \limit
+	bcs	\bad
+#endif
+	.endm
+
 #endif /* __ASM_ASSEMBLER_H__ */
 #endif /* __ASM_ASSEMBLER_H__ */

+ 3 - 0
arch/arm/include/asm/memory.h

@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
 #define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET)
 #define __phys_to_virt(x)	((x) - PHYS_OFFSET + PAGE_OFFSET)
 #endif
 #endif
 #endif
 #endif
+#endif /* __ASSEMBLY__ */
 
 
 #ifndef PHYS_OFFSET
 #ifndef PHYS_OFFSET
 #ifdef PLAT_PHYS_OFFSET
 #ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
 #endif
 #endif
 #endif
 #endif
 
 
+#ifndef __ASSEMBLY__
+
 /*
 /*
  * PFNs are used to describe any physical page; this means
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.
  * PFN 0 == physical address 0.

+ 4 - 0
arch/arm/include/asm/tlb.h

@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 {
 {
 	pgtable_page_dtor(pte);
 	pgtable_page_dtor(pte);
 
 
+#ifdef CONFIG_ARM_LPAE
+	tlb_add_flush(tlb, addr);
+#else
 	/*
 	/*
 	 * With the classic ARM MMU, a pte page has two corresponding pmd
 	 * With the classic ARM MMU, a pte page has two corresponding pmd
 	 * entries, each covering 1MB.
 	 * entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 	addr &= PMD_MASK;
 	addr &= PMD_MASK;
 	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
 	tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
 	tlb_add_flush(tlb, addr + SZ_1M);
 	tlb_add_flush(tlb, addr + SZ_1M);
+#endif
 
 
 	tlb_remove_page(tlb, pte);
 	tlb_remove_page(tlb, pte);
 }
 }

+ 43 - 15
arch/arm/include/asm/uaccess.h

@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
 extern int __get_user_2(void *);
 extern int __get_user_2(void *);
 extern int __get_user_4(void *);
 extern int __get_user_4(void *);
 
 
-#define __get_user_x(__r2,__p,__e,__s,__i...)				\
+#define __GUP_CLOBBER_1	"lr", "cc"
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define __GUP_CLOBBER_2	"ip", "lr", "cc"
+#else
+#define __GUP_CLOBBER_2 "lr", "cc"
+#endif
+#define __GUP_CLOBBER_4	"lr", "cc"
+
+#define __get_user_x(__r2,__p,__e,__l,__s)				\
 	   __asm__ __volatile__ (					\
 	   __asm__ __volatile__ (					\
 		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
 		__asmeq("%0", "r0") __asmeq("%1", "r2")			\
+		__asmeq("%3", "r1")					\
 		"bl	__get_user_" #__s				\
 		"bl	__get_user_" #__s				\
 		: "=&r" (__e), "=r" (__r2)				\
 		: "=&r" (__e), "=r" (__r2)				\
-		: "0" (__p)						\
-		: __i, "cc")
+		: "0" (__p), "r" (__l)					\
+		: __GUP_CLOBBER_##__s)
 
 
-#define get_user(x,p)							\
+#define __get_user_check(x,p)							\
 	({								\
 	({								\
+		unsigned long __limit = current_thread_info()->addr_limit - 1; \
 		register const typeof(*(p)) __user *__p asm("r0") = (p);\
 		register const typeof(*(p)) __user *__p asm("r0") = (p);\
 		register unsigned long __r2 asm("r2");			\
 		register unsigned long __r2 asm("r2");			\
+		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
 		register int __e asm("r0");				\
 		switch (sizeof(*(__p))) {				\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 		case 1:							\
-			__get_user_x(__r2, __p, __e, 1, "lr");		\
-	       		break;						\
+			__get_user_x(__r2, __p, __e, __l, 1);		\
+			break;						\
 		case 2:							\
 		case 2:							\
-			__get_user_x(__r2, __p, __e, 2, "r3", "lr");	\
+			__get_user_x(__r2, __p, __e, __l, 2);		\
 			break;						\
 			break;						\
 		case 4:							\
 		case 4:							\
-	       		__get_user_x(__r2, __p, __e, 4, "lr");		\
+			__get_user_x(__r2, __p, __e, __l, 4);		\
 			break;						\
 			break;						\
 		default: __e = __get_user_bad(); break;			\
 		default: __e = __get_user_bad(); break;			\
 		}							\
 		}							\
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
 		__e;							\
 		__e;							\
 	})
 	})
 
 
+#define get_user(x,p)							\
+	({								\
+		might_fault();						\
+		__get_user_check(x,p);					\
+	 })
+
 extern int __put_user_1(void *, unsigned int);
 extern int __put_user_1(void *, unsigned int);
 extern int __put_user_2(void *, unsigned int);
 extern int __put_user_2(void *, unsigned int);
 extern int __put_user_4(void *, unsigned int);
 extern int __put_user_4(void *, unsigned int);
 extern int __put_user_8(void *, unsigned long long);
 extern int __put_user_8(void *, unsigned long long);
 
 
-#define __put_user_x(__r2,__p,__e,__s)					\
+#define __put_user_x(__r2,__p,__e,__l,__s)				\
 	   __asm__ __volatile__ (					\
 	   __asm__ __volatile__ (					\
 		__asmeq("%0", "r0") __asmeq("%2", "r2")			\
 		__asmeq("%0", "r0") __asmeq("%2", "r2")			\
+		__asmeq("%3", "r1")					\
 		"bl	__put_user_" #__s				\
 		"bl	__put_user_" #__s				\
 		: "=&r" (__e)						\
 		: "=&r" (__e)						\
-		: "0" (__p), "r" (__r2)					\
+		: "0" (__p), "r" (__r2), "r" (__l)			\
 		: "ip", "lr", "cc")
 		: "ip", "lr", "cc")
 
 
-#define put_user(x,p)							\
+#define __put_user_check(x,p)							\
 	({								\
 	({								\
+		unsigned long __limit = current_thread_info()->addr_limit - 1; \
 		register const typeof(*(p)) __r2 asm("r2") = (x);	\
 		register const typeof(*(p)) __r2 asm("r2") = (x);	\
 		register const typeof(*(p)) __user *__p asm("r0") = (p);\
 		register const typeof(*(p)) __user *__p asm("r0") = (p);\
+		register unsigned long __l asm("r1") = __limit;		\
 		register int __e asm("r0");				\
 		register int __e asm("r0");				\
 		switch (sizeof(*(__p))) {				\
 		switch (sizeof(*(__p))) {				\
 		case 1:							\
 		case 1:							\
-			__put_user_x(__r2, __p, __e, 1);		\
+			__put_user_x(__r2, __p, __e, __l, 1);		\
 			break;						\
 			break;						\
 		case 2:							\
 		case 2:							\
-			__put_user_x(__r2, __p, __e, 2);		\
+			__put_user_x(__r2, __p, __e, __l, 2);		\
 			break;						\
 			break;						\
 		case 4:							\
 		case 4:							\
-			__put_user_x(__r2, __p, __e, 4);		\
+			__put_user_x(__r2, __p, __e, __l, 4);		\
 			break;						\
 			break;						\
 		case 8:							\
 		case 8:							\
-			__put_user_x(__r2, __p, __e, 8);		\
+			__put_user_x(__r2, __p, __e, __l, 8);		\
 			break;						\
 			break;						\
 		default: __e = __put_user_bad(); break;			\
 		default: __e = __put_user_bad(); break;			\
 		}							\
 		}							\
 		__e;							\
 		__e;							\
 	})
 	})
 
 
+#define put_user(x,p)							\
+	({								\
+		might_fault();						\
+		__put_user_check(x,p);					\
+	 })
+
 #else /* CONFIG_MMU */
 #else /* CONFIG_MMU */
 
 
 /*
 /*
@@ -219,6 +245,7 @@ do {									\
 	unsigned long __gu_addr = (unsigned long)(ptr);			\
 	unsigned long __gu_addr = (unsigned long)(ptr);			\
 	unsigned long __gu_val;						\
 	unsigned long __gu_val;						\
 	__chk_user_ptr(ptr);						\
 	__chk_user_ptr(ptr);						\
+	might_fault();							\
 	switch (sizeof(*(ptr))) {					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:	__get_user_asm_byte(__gu_val,__gu_addr,err);	break;	\
 	case 1:	__get_user_asm_byte(__gu_val,__gu_addr,err);	break;	\
 	case 2:	__get_user_asm_half(__gu_val,__gu_addr,err);	break;	\
 	case 2:	__get_user_asm_half(__gu_val,__gu_addr,err);	break;	\
@@ -300,6 +327,7 @@ do {									\
 	unsigned long __pu_addr = (unsigned long)(ptr);			\
 	unsigned long __pu_addr = (unsigned long)(ptr);			\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
 	__chk_user_ptr(ptr);						\
+	might_fault();							\
 	switch (sizeof(*(ptr))) {					\
 	switch (sizeof(*(ptr))) {					\
 	case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);	break;	\
 	case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);	break;	\
 	case 2: __put_user_asm_half(__pu_val,__pu_addr,err);	break;	\
 	case 2: __put_user_asm_half(__pu_val,__pu_addr,err);	break;	\

+ 44 - 18
arch/arm/kernel/hw_breakpoint.c

@@ -159,6 +159,12 @@ static int debug_arch_supported(void)
 		arch >= ARM_DEBUG_ARCH_V7_1;
 		arch >= ARM_DEBUG_ARCH_V7_1;
 }
 }
 
 
+/* Can we determine the watchpoint access type from the fsr? */
+static int debug_exception_updates_fsr(void)
+{
+	return 0;
+}
+
 /* Determine number of WRP registers available. */
 /* Determine number of WRP registers available. */
 static int get_num_wrp_resources(void)
 static int get_num_wrp_resources(void)
 {
 {
@@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
 		/* Aligned */
 		/* Aligned */
 		break;
 		break;
 	case 1:
 	case 1:
-		/* Allow single byte watchpoint. */
-		if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
-			break;
 	case 2:
 	case 2:
 		/* Allow halfword watchpoints and breakpoints. */
 		/* Allow halfword watchpoints and breakpoints. */
 		if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
 		if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
 			break;
 			break;
+	case 3:
+		/* Allow single byte watchpoint. */
+		if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+			break;
 	default:
 	default:
 		ret = -EINVAL;
 		ret = -EINVAL;
 		goto out;
 		goto out;
@@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
 	info->address &= ~alignment_mask;
 	info->address &= ~alignment_mask;
 	info->ctrl.len <<= offset;
 	info->ctrl.len <<= offset;
 
 
-	/*
-	 * Currently we rely on an overflow handler to take
-	 * care of single-stepping the breakpoint when it fires.
-	 * In the case of userspace breakpoints on a core with V7 debug,
-	 * we can use the mismatch feature as a poor-man's hardware
-	 * single-step, but this only works for per-task breakpoints.
-	 */
-	if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
-	    !core_has_mismatch_brps() || !bp->hw.bp_target)) {
-		pr_warning("overflow handler required but none found\n");
-		ret = -EINVAL;
+	if (!bp->overflow_handler) {
+		/*
+		 * Mismatch breakpoints are required for single-stepping
+		 * breakpoints.
+		 */
+		if (!core_has_mismatch_brps())
+			return -EINVAL;
+
+		/* We don't allow mismatch breakpoints in kernel space. */
+		if (arch_check_bp_in_kernelspace(bp))
+			return -EPERM;
+
+		/*
+		 * Per-cpu breakpoints are not supported by our stepping
+		 * mechanism.
+		 */
+		if (!bp->hw.bp_target)
+			return -EINVAL;
+
+		/*
+		 * We only support specific access types if the fsr
+		 * reports them.
+		 */
+		if (!debug_exception_updates_fsr() &&
+		    (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
+		     info->ctrl.type == ARM_BREAKPOINT_STORE))
+			return -EINVAL;
 	}
 	}
+
 out:
 out:
 	return ret;
 	return ret;
 }
 }
@@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
 				goto unlock;
 				goto unlock;
 
 
 			/* Check that the access type matches. */
 			/* Check that the access type matches. */
-			access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
-				 HW_BREAKPOINT_R;
-			if (!(access & hw_breakpoint_type(wp)))
-				goto unlock;
+			if (debug_exception_updates_fsr()) {
+				access = (fsr & ARM_FSR_ACCESS_MASK) ?
+					  HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+				if (!(access & hw_breakpoint_type(wp)))
+					goto unlock;
+			}
 
 
 			/* We have a winner. */
 			/* We have a winner. */
 			info->trigger = addr;
 			info->trigger = addr;

+ 7 - 4
arch/arm/kernel/traps.c

@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 #endif
 #endif
 			instr = *(u32 *) pc;
 			instr = *(u32 *) pc;
 	} else if (thumb_mode(regs)) {
 	} else if (thumb_mode(regs)) {
-		get_user(instr, (u16 __user *)pc);
+		if (get_user(instr, (u16 __user *)pc))
+			goto die_sig;
 		if (is_wide_instruction(instr)) {
 		if (is_wide_instruction(instr)) {
 			unsigned int instr2;
 			unsigned int instr2;
-			get_user(instr2, (u16 __user *)pc+1);
+			if (get_user(instr2, (u16 __user *)pc+1))
+				goto die_sig;
 			instr <<= 16;
 			instr <<= 16;
 			instr |= instr2;
 			instr |= instr2;
 		}
 		}
-	} else {
-		get_user(instr, (u32 __user *)pc);
+	} else if (get_user(instr, (u32 __user *)pc)) {
+		goto die_sig;
 	}
 	}
 
 
 	if (call_undef_hook(regs, instr) == 0)
 	if (call_undef_hook(regs, instr) == 0)
 		return;
 		return;
 
 
+die_sig:
 #ifdef CONFIG_DEBUG_USER
 #ifdef CONFIG_DEBUG_USER
 	if (user_debug & UDBG_UNDEFINED) {
 	if (user_debug & UDBG_UNDEFINED) {
 		printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
 		printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",

+ 1 - 0
arch/arm/lib/delay.c

@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
 {
 {
 	pr_info("Switching to timer-based delay loop\n");
 	pr_info("Switching to timer-based delay loop\n");
 	lpj_fine			= freq / HZ;
 	lpj_fine			= freq / HZ;
+	loops_per_jiffy			= lpj_fine;
 	arm_delay_ops.delay		= __timer_delay;
 	arm_delay_ops.delay		= __timer_delay;
 	arm_delay_ops.const_udelay	= __timer_const_udelay;
 	arm_delay_ops.const_udelay	= __timer_const_udelay;
 	arm_delay_ops.udelay		= __timer_udelay;
 	arm_delay_ops.udelay		= __timer_udelay;

+ 15 - 8
arch/arm/lib/getuser.S

@@ -16,8 +16,9 @@
  * __get_user_X
  * __get_user_X
  *
  *
  * Inputs:	r0 contains the address
  * Inputs:	r0 contains the address
+ *		r1 contains the address limit, which must be preserved
  * Outputs:	r0 is the error code
  * Outputs:	r0 is the error code
- *		r2, r3 contains the zero-extended value
+ *		r2 contains the zero-extended value
  *		lr corrupted
  *		lr corrupted
  *
  *
  * No other registers must be altered.  (see <asm/uaccess.h>
  * No other registers must be altered.  (see <asm/uaccess.h>
@@ -27,33 +28,39 @@
  * Note also that it is intended that __get_user_bad is not global.
  * Note also that it is intended that __get_user_bad is not global.
  */
  */
 #include <linux/linkage.h>
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
 #include <asm/domain.h>
 
 
 ENTRY(__get_user_1)
 ENTRY(__get_user_1)
+	check_uaccess r0, 1, r1, r2, __get_user_bad
 1: TUSER(ldrb)	r2, [r0]
 1: TUSER(ldrb)	r2, [r0]
 	mov	r0, #0
 	mov	r0, #0
 	mov	pc, lr
 	mov	pc, lr
 ENDPROC(__get_user_1)
 ENDPROC(__get_user_1)
 
 
 ENTRY(__get_user_2)
 ENTRY(__get_user_2)
-#ifdef CONFIG_THUMB2_KERNEL
-2: TUSER(ldrb)	r2, [r0]
-3: TUSER(ldrb)	r3, [r0, #1]
+	check_uaccess r0, 2, r1, r2, __get_user_bad
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb	.req	ip
+2:	ldrbt	r2, [r0], #1
+3:	ldrbt	rb, [r0], #0
 #else
 #else
-2: TUSER(ldrb)	r2, [r0], #1
-3: TUSER(ldrb)	r3, [r0]
+rb	.req	r0
+2:	ldrb	r2, [r0]
+3:	ldrb	rb, [r0, #1]
 #endif
 #endif
 #ifndef __ARMEB__
 #ifndef __ARMEB__
-	orr	r2, r2, r3, lsl #8
+	orr	r2, r2, rb, lsl #8
 #else
 #else
-	orr	r2, r3, r2, lsl #8
+	orr	r2, rb, r2, lsl #8
 #endif
 #endif
 	mov	r0, #0
 	mov	r0, #0
 	mov	pc, lr
 	mov	pc, lr
 ENDPROC(__get_user_2)
 ENDPROC(__get_user_2)
 
 
 ENTRY(__get_user_4)
 ENTRY(__get_user_4)
+	check_uaccess r0, 4, r1, r2, __get_user_bad
 4: TUSER(ldr)	r2, [r0]
 4: TUSER(ldr)	r2, [r0]
 	mov	r0, #0
 	mov	r0, #0
 	mov	pc, lr
 	mov	pc, lr

+ 6 - 0
arch/arm/lib/putuser.S

@@ -16,6 +16,7 @@
  * __put_user_X
  * __put_user_X
  *
  *
  * Inputs:	r0 contains the address
  * Inputs:	r0 contains the address
+ *		r1 contains the address limit, which must be preserved
  *		r2, r3 contains the value
  *		r2, r3 contains the value
  * Outputs:	r0 is the error code
  * Outputs:	r0 is the error code
  *		lr corrupted
  *		lr corrupted
@@ -27,16 +28,19 @@
  * Note also that it is intended that __put_user_bad is not global.
  * Note also that it is intended that __put_user_bad is not global.
  */
  */
 #include <linux/linkage.h>
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
 #include <asm/domain.h>
 
 
 ENTRY(__put_user_1)
 ENTRY(__put_user_1)
+	check_uaccess r0, 1, r1, ip, __put_user_bad
 1: TUSER(strb)	r2, [r0]
 1: TUSER(strb)	r2, [r0]
 	mov	r0, #0
 	mov	r0, #0
 	mov	pc, lr
 	mov	pc, lr
 ENDPROC(__put_user_1)
 ENDPROC(__put_user_1)
 
 
 ENTRY(__put_user_2)
 ENTRY(__put_user_2)
+	check_uaccess r0, 2, r1, ip, __put_user_bad
 	mov	ip, r2, lsr #8
 	mov	ip, r2, lsr #8
 #ifdef CONFIG_THUMB2_KERNEL
 #ifdef CONFIG_THUMB2_KERNEL
 #ifndef __ARMEB__
 #ifndef __ARMEB__
@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
 ENDPROC(__put_user_2)
 ENDPROC(__put_user_2)
 
 
 ENTRY(__put_user_4)
 ENTRY(__put_user_4)
+	check_uaccess r0, 4, r1, ip, __put_user_bad
 4: TUSER(str)	r2, [r0]
 4: TUSER(str)	r2, [r0]
 	mov	r0, #0
 	mov	r0, #0
 	mov	pc, lr
 	mov	pc, lr
 ENDPROC(__put_user_4)
 ENDPROC(__put_user_4)
 
 
 ENTRY(__put_user_8)
 ENTRY(__put_user_8)
+	check_uaccess r0, 8, r1, ip, __put_user_bad
 #ifdef CONFIG_THUMB2_KERNEL
 #ifdef CONFIG_THUMB2_KERNEL
 5: TUSER(str)	r2, [r0]
 5: TUSER(str)	r2, [r0]
 6: TUSER(str)	r3, [r0, #4]
 6: TUSER(str)	r3, [r0, #4]

+ 4 - 3
arch/arm/mm/context.c

@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
 	pid = task_pid_nr(thread->task) << ASID_BITS;
 	pid = task_pid_nr(thread->task) << ASID_BITS;
 	asm volatile(
 	asm volatile(
 	"	mrc	p15, 0, %0, c13, c0, 1\n"
 	"	mrc	p15, 0, %0, c13, c0, 1\n"
-	"	bfi	%1, %0, #0, %2\n"
-	"	mcr	p15, 0, %1, c13, c0, 1\n"
+	"	and	%0, %0, %2\n"
+	"	orr	%0, %0, %1\n"
+	"	mcr	p15, 0, %0, c13, c0, 1\n"
 	: "=r" (contextidr), "+r" (pid)
 	: "=r" (contextidr), "+r" (pid)
-	: "I" (ASID_BITS));
+	: "I" (~ASID_MASK));
 	isb();
 	isb();
 
 
 	return NOTIFY_OK;
 	return NOTIFY_OK;

+ 3 - 0
arch/arm/mm/mm.h

@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 /* permanent static mappings from iotable_init() */
 /* permanent static mappings from iotable_init() */
 #define VM_ARM_STATIC_MAPPING	0x40000000
 #define VM_ARM_STATIC_MAPPING	0x40000000
 
 
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING	0x20000000
+
 /* mapping type (attributes) for permanent static mappings */
 /* mapping type (attributes) for permanent static mappings */
 #define VM_ARM_MTYPE(mt)		((mt) << 20)
 #define VM_ARM_MTYPE(mt)		((mt) << 20)
 #define VM_ARM_MTYPE_MASK	(0x1f << 20)
 #define VM_ARM_MTYPE_MASK	(0x1f << 20)

+ 4 - 4
arch/arm/mm/mmu.c

@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
 	vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
 	vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
 	vm->addr = (void *)addr;
 	vm->addr = (void *)addr;
 	vm->size = SECTION_SIZE;
 	vm->size = SECTION_SIZE;
-	vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+	vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
 	vm->caller = pmd_empty_section_gap;
 	vm->caller = pmd_empty_section_gap;
 	vm_area_add_early(vm);
 	vm_area_add_early(vm);
 }
 }
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
 
 
 	/* we're still single threaded hence no lock needed here */
 	/* we're still single threaded hence no lock needed here */
 	for (vm = vmlist; vm; vm = vm->next) {
 	for (vm = vmlist; vm; vm = vm->next) {
-		if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+		if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
 			continue;
 			continue;
 		addr = (unsigned long)vm->addr;
 		addr = (unsigned long)vm->addr;
 		if (addr < next)
 		if (addr < next)
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
 		 * Check whether this memory bank would partially overlap
 		 * Check whether this memory bank would partially overlap
 		 * the vmalloc area.
 		 * the vmalloc area.
 		 */
 		 */
-		if (__va(bank->start + bank->size) > vmalloc_min ||
-		    __va(bank->start + bank->size) < __va(bank->start)) {
+		if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
+		    __va(bank->start + bank->size - 1) <= __va(bank->start)) {
 			unsigned long newsize = vmalloc_min - __va(bank->start);
 			unsigned long newsize = vmalloc_min - __va(bank->start);
 			printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
 			printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
 			       "to -%.8llx (vmalloc region overlap).\n",
 			       "to -%.8llx (vmalloc region overlap).\n",