|
@@ -5,6 +5,7 @@
|
|
|
#include <asm/asm-offsets.h>
|
|
|
#include <asm/errno.h>
|
|
|
#include <asm/thread_info.h>
|
|
|
+#include <asm/v7m.h>
|
|
|
|
|
|
@ Bad Abort numbers
|
|
|
@ -----------------
|
|
@@ -44,6 +45,116 @@
|
|
|
#endif
|
|
|
.endm
|
|
|
|
|
|
+#ifdef CONFIG_CPU_V7M
|
|
|
+/*
|
|
|
+ * ARMv7-M exception entry/exit macros.
|
|
|
+ *
|
|
|
+ * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
|
|
|
+ * automatically saved on the current stack (32 words) before
|
|
|
+ * switching to the exception stack (SP_main).
|
|
|
+ *
|
|
|
+ * If exception is taken while in user mode, SP_main is
|
|
|
+ * empty. Otherwise, SP_main is aligned to 64 bit automatically
|
|
|
+ * (CCR.STKALIGN set).
|
|
|
+ *
|
|
|
+ * Linux assumes that the interrupts are disabled when entering an
|
|
|
+ * exception handler and it may BUG if this is not the case. Interrupts
|
|
|
+ * are disabled during entry and reenabled in the exit macro.
|
|
|
+ *
|
|
|
+ * v7m_exception_slow_exit is used when returning from SVC or PendSV.
|
|
|
+ * When returning to kernel mode, we don't return from exception.
|
|
|
+ */
|
|
|
+ .macro v7m_exception_entry
|
|
|
+ @ determine the location of the registers saved by the core during
|
|
|
+ @ exception entry. Depending on the mode the cpu was in when the
|
|
|
+ @ exception happend that is either on the main or the process stack.
|
|
|
+ @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
|
|
|
+ @ was used.
|
|
|
+ tst lr, #EXC_RET_STACK_MASK
|
|
|
+ mrsne r12, psp
|
|
|
+ moveq r12, sp
|
|
|
+
|
|
|
+ @ we cannot rely on r0-r3 and r12 matching the value saved in the
|
|
|
+ @ exception frame because of tail-chaining. So these have to be
|
|
|
+ @ reloaded.
|
|
|
+ ldmia r12!, {r0-r3}
|
|
|
+
|
|
|
+ @ Linux expects to have irqs off. Do it here before taking stack space
|
|
|
+ cpsid i
|
|
|
+
|
|
|
+ sub sp, #S_FRAME_SIZE-S_IP
|
|
|
+ stmdb sp!, {r0-r11}
|
|
|
+
|
|
|
+ @ load saved r12, lr, return address and xPSR.
|
|
|
+ @ r0-r7 are used for signals and never touched from now on. Clobbering
|
|
|
+ @ r8-r12 is OK.
|
|
|
+ mov r9, r12
|
|
|
+ ldmia r9!, {r8, r10-r12}
|
|
|
+
|
|
|
+ @ calculate the original stack pointer value.
|
|
|
+ @ r9 currently points to the memory location just above the auto saved
|
|
|
+ @ xPSR.
|
|
|
+ @ The cpu might automatically 8-byte align the stack. Bit 9
|
|
|
+ @ of the saved xPSR specifies if stack aligning took place. In this case
|
|
|
+ @ another 32-bit value is included in the stack.
|
|
|
+
|
|
|
+ tst r12, V7M_xPSR_FRAMEPTRALIGN
|
|
|
+ addne r9, r9, #4
|
|
|
+
|
|
|
+ @ store saved r12 using str to have a register to hold the base for stm
|
|
|
+ str r8, [sp, #S_IP]
|
|
|
+ add r8, sp, #S_SP
|
|
|
+ @ store r13-r15, xPSR
|
|
|
+ stmia r8!, {r9-r12}
|
|
|
+ @ store old_r0
|
|
|
+ str r0, [r8]
|
|
|
+ .endm
|
|
|
+
|
|
|
+ /*
|
|
|
+ * PENDSV and SVCALL are configured to have the same exception
|
|
|
+ * priorities. As a kernel thread runs at SVCALL execution priority it
|
|
|
+ * can never be preempted and so we will never have to return to a
|
|
|
+ * kernel thread here.
|
|
|
+ */
|
|
|
+ .macro v7m_exception_slow_exit ret_r0
|
|
|
+ cpsid i
|
|
|
+ ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
|
|
|
+
|
|
|
+ @ read original r12, sp, lr, pc and xPSR
|
|
|
+ add r12, sp, #S_IP
|
|
|
+ ldmia r12, {r1-r5}
|
|
|
+
|
|
|
+ @ an exception frame is always 8-byte aligned. To tell the hardware if
|
|
|
+ @ the sp to be restored is aligned or not set bit 9 of the saved xPSR
|
|
|
+ @ accordingly.
|
|
|
+ tst r2, #4
|
|
|
+ subne r2, r2, #4
|
|
|
+ orrne r5, V7M_xPSR_FRAMEPTRALIGN
|
|
|
+ biceq r5, V7M_xPSR_FRAMEPTRALIGN
|
|
|
+
|
|
|
+ @ write basic exception frame
|
|
|
+ stmdb r2!, {r1, r3-r5}
|
|
|
+ ldmia sp, {r1, r3-r5}
|
|
|
+ .if \ret_r0
|
|
|
+ stmdb r2!, {r0, r3-r5}
|
|
|
+ .else
|
|
|
+ stmdb r2!, {r1, r3-r5}
|
|
|
+ .endif
|
|
|
+
|
|
|
+ @ restore process sp
|
|
|
+ msr psp, r2
|
|
|
+
|
|
|
+ @ restore original r4-r11
|
|
|
+ ldmia sp!, {r0-r11}
|
|
|
+
|
|
|
+ @ restore main sp
|
|
|
+ add sp, sp, #S_FRAME_SIZE-S_IP
|
|
|
+
|
|
|
+ cpsie i
|
|
|
+ bx lr
|
|
|
+ .endm
|
|
|
+#endif /* CONFIG_CPU_V7M */
|
|
|
+
|
|
|
@
|
|
|
@ Store/load the USER SP and LR registers by switching to the SYS
|
|
|
@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
|
|
@@ -131,6 +242,18 @@
|
|
|
rfeia sp!
|
|
|
.endm
|
|
|
|
|
|
+#ifdef CONFIG_CPU_V7M
|
|
|
+ /*
|
|
|
+ * Note we don't need to do clrex here as clearing the local monitor is
|
|
|
+ * part of each exception entry and exit sequence.
|
|
|
+ */
|
|
|
+ .macro restore_user_regs, fast = 0, offset = 0
|
|
|
+ .if \offset
|
|
|
+ add sp, #\offset
|
|
|
+ .endif
|
|
|
+ v7m_exception_slow_exit ret_r0 = \fast
|
|
|
+ .endm
|
|
|
+#else /* ifdef CONFIG_CPU_V7M */
|
|
|
.macro restore_user_regs, fast = 0, offset = 0
|
|
|
clrex @ clear the exclusive monitor
|
|
|
mov r2, sp
|
|
@@ -147,6 +270,7 @@
|
|
|
add sp, sp, #S_FRAME_SIZE - S_SP
|
|
|
movs pc, lr @ return & move spsr_svc into cpsr
|
|
|
.endm
|
|
|
+#endif /* ifdef CONFIG_CPU_V7M / else */
|
|
|
|
|
|
.macro get_thread_info, rd
|
|
|
mov \rd, sp
|