فهرست منبع

[ARM] 5385/2: unwind: Add unwinding information to exception entry points

This is needed to allow or stop the unwinding at certain points in the
kernel like exception entries.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Catalin Marinas 16 سال پیش
والد
کامیت
c4c5716e16
2فایلهای تغییر یافته به همراه23 افزوده شده و 0 حذف شده
  1. 19 0
      arch/arm/kernel/entry-armv.S
  2. 4 0
      arch/arm/kernel/entry-common.S

+ 19 - 0
arch/arm/kernel/entry-armv.S

@@ -20,6 +20,7 @@
 #include <asm/vfpmacros.h>
 #include <asm/vfpmacros.h>
 #include <mach/entry-macro.S>
 #include <mach/entry-macro.S>
 #include <asm/thread_notify.h>
 #include <asm/thread_notify.h>
+#include <asm/unwind.h>
 
 
 #include "entry-header.S"
 #include "entry-header.S"
 
 
@@ -123,6 +124,8 @@ ENDPROC(__und_invalid)
 #endif
 #endif
 
 
 	.macro	svc_entry, stack_hole=0
 	.macro	svc_entry, stack_hole=0
+ UNWIND(.fnstart		)
+ UNWIND(.save {r0 - pc}		)
 	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole)
 	sub	sp, sp, #(S_FRAME_SIZE + \stack_hole)
  SPFIX(	tst	sp, #4		)
  SPFIX(	tst	sp, #4		)
  SPFIX(	bicne	sp, sp, #4	)
  SPFIX(	bicne	sp, sp, #4	)
@@ -196,6 +199,7 @@ __dabt_svc:
 	ldr	r0, [sp, #S_PSR]
 	ldr	r0, [sp, #S_PSR]
 	msr	spsr_cxsf, r0
 	msr	spsr_cxsf, r0
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
+ UNWIND(.fnend		)
 ENDPROC(__dabt_svc)
 ENDPROC(__dabt_svc)
 
 
 	.align	5
 	.align	5
@@ -228,6 +232,7 @@ __irq_svc:
 	bleq	trace_hardirqs_on
 	bleq	trace_hardirqs_on
 #endif
 #endif
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
+ UNWIND(.fnend		)
 ENDPROC(__irq_svc)
 ENDPROC(__irq_svc)
 
 
 	.ltorg
 	.ltorg
@@ -278,6 +283,7 @@ __und_svc:
 	ldr	lr, [sp, #S_PSR]		@ Get SVC cpsr
 	ldr	lr, [sp, #S_PSR]		@ Get SVC cpsr
 	msr	spsr_cxsf, lr
 	msr	spsr_cxsf, lr
 	ldmia	sp, {r0 - pc}^			@ Restore SVC registers
 	ldmia	sp, {r0 - pc}^			@ Restore SVC registers
+ UNWIND(.fnend		)
 ENDPROC(__und_svc)
 ENDPROC(__und_svc)
 
 
 	.align	5
 	.align	5
@@ -320,6 +326,7 @@ __pabt_svc:
 	ldr	r0, [sp, #S_PSR]
 	ldr	r0, [sp, #S_PSR]
 	msr	spsr_cxsf, r0
 	msr	spsr_cxsf, r0
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
+ UNWIND(.fnend		)
 ENDPROC(__pabt_svc)
 ENDPROC(__pabt_svc)
 
 
 	.align	5
 	.align	5
@@ -343,6 +350,8 @@ ENDPROC(__pabt_svc)
 #endif
 #endif
 
 
 	.macro	usr_entry
 	.macro	usr_entry
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)	@ don't unwind the user space
 	sub	sp, sp, #S_FRAME_SIZE
 	sub	sp, sp, #S_FRAME_SIZE
 	stmib	sp, {r1 - r12}
 	stmib	sp, {r1 - r12}
 
 
@@ -420,6 +429,7 @@ __dabt_usr:
 	mov	r2, sp
 	mov	r2, sp
 	adr	lr, ret_from_exception
 	adr	lr, ret_from_exception
 	b	do_DataAbort
 	b	do_DataAbort
+ UNWIND(.fnend		)
 ENDPROC(__dabt_usr)
 ENDPROC(__dabt_usr)
 
 
 	.align	5
 	.align	5
@@ -450,6 +460,7 @@ __irq_usr:
 
 
 	mov	why, #0
 	mov	why, #0
 	b	ret_to_user
 	b	ret_to_user
+ UNWIND(.fnend		)
 ENDPROC(__irq_usr)
 ENDPROC(__irq_usr)
 
 
 	.ltorg
 	.ltorg
@@ -484,6 +495,7 @@ __und_usr:
 #else
 #else
 	b	__und_usr_unknown
 	b	__und_usr_unknown
 #endif
 #endif
+ UNWIND(.fnend		)
 ENDPROC(__und_usr)
 ENDPROC(__und_usr)
 
 
 	@
 	@
@@ -671,14 +683,18 @@ __pabt_usr:
 	enable_irq				@ Enable interrupts
 	enable_irq				@ Enable interrupts
 	mov	r1, sp				@ regs
 	mov	r1, sp				@ regs
 	bl	do_PrefetchAbort		@ call abort handler
 	bl	do_PrefetchAbort		@ call abort handler
+ UNWIND(.fnend		)
 	/* fall through */
 	/* fall through */
 /*
 /*
  * This is the return code to user mode for abort handlers
  * This is the return code to user mode for abort handlers
  */
  */
 ENTRY(ret_from_exception)
 ENTRY(ret_from_exception)
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
 	get_thread_info tsk
 	get_thread_info tsk
 	mov	why, #0
 	mov	why, #0
 	b	ret_to_user
 	b	ret_to_user
+ UNWIND(.fnend		)
 ENDPROC(__pabt_usr)
 ENDPROC(__pabt_usr)
 ENDPROC(ret_from_exception)
 ENDPROC(ret_from_exception)
 
 
@@ -688,6 +704,8 @@ ENDPROC(ret_from_exception)
  * previous and next are guaranteed not to be the same.
  * previous and next are guaranteed not to be the same.
  */
  */
 ENTRY(__switch_to)
 ENTRY(__switch_to)
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
 	add	ip, r1, #TI_CPU_SAVE
 	add	ip, r1, #TI_CPU_SAVE
 	ldr	r3, [r2, #TI_TP_VALUE]
 	ldr	r3, [r2, #TI_TP_VALUE]
 	stmia	ip!, {r4 - sl, fp, sp, lr}	@ Store most regs on stack
 	stmia	ip!, {r4 - sl, fp, sp, lr}	@ Store most regs on stack
@@ -717,6 +735,7 @@ ENTRY(__switch_to)
 	bl	atomic_notifier_call_chain
 	bl	atomic_notifier_call_chain
 	mov	r0, r5
 	mov	r0, r5
 	ldmia	r4, {r4 - sl, fp, sp, pc}	@ Load all regs saved previously
 	ldmia	r4, {r4 - sl, fp, sp, pc}	@ Load all regs saved previously
+ UNWIND(.fnend		)
 ENDPROC(__switch_to)
 ENDPROC(__switch_to)
 
 
 	__INIT
 	__INIT

+ 4 - 0
arch/arm/kernel/entry-common.S

@@ -11,6 +11,7 @@
 #include <asm/unistd.h>
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
 #include <asm/ftrace.h>
 #include <mach/entry-macro.S>
 #include <mach/entry-macro.S>
+#include <asm/unwind.h>
 
 
 #include "entry-header.S"
 #include "entry-header.S"
 
 
@@ -22,6 +23,8 @@
  * stack.
  * stack.
  */
  */
 ret_fast_syscall:
 ret_fast_syscall:
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
 	disable_irq				@ disable interrupts
 	disable_irq				@ disable interrupts
 	ldr	r1, [tsk, #TI_FLAGS]
 	ldr	r1, [tsk, #TI_FLAGS]
 	tst	r1, #_TIF_WORK_MASK
 	tst	r1, #_TIF_WORK_MASK
@@ -38,6 +41,7 @@ ret_fast_syscall:
 	mov	r0, r0
 	mov	r0, r0
 	add	sp, sp, #S_FRAME_SIZE - S_PC
 	add	sp, sp, #S_FRAME_SIZE - S_PC
 	movs	pc, lr				@ return & move spsr_svc into cpsr
 	movs	pc, lr				@ return & move spsr_svc into cpsr
+ UNWIND(.fnend		)
 
 
 /*
 /*
  * Ok, we need to do extra processing, enter the slow path.
  * Ok, we need to do extra processing, enter the slow path.