entry.S 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /* -*- mode: asm -*-
  2. *
  3. * linux/arch/h8300/platform/h8300h/entry.S
  4. *
  5. * Yoshinori Sato <ysato@users.sourceforge.jp>
  6. * David McCullough <davidm@snapgear.com>
  7. *
  8. */
  9. /*
  10. * entry.S
  11. * include exception/interrupt gateway
  12. * system call entry
  13. */
  14. #include <linux/sys.h>
  15. #include <asm/unistd.h>
  16. #include <asm/setup.h>
  17. #include <asm/segment.h>
  18. #include <asm/linkage.h>
  19. #include <asm/asm-offsets.h>
  20. #include <asm/thread_info.h>
  21. #include <asm/errno.h>
  22. #if defined(CONFIG_CPU_H8300H)
  23. #define USERRET 8
  24. INTERRUPTS = 64
  25. .h8300h
  26. .macro SHLL2 reg
  27. shll.l \reg
  28. shll.l \reg
  29. .endm
  30. .macro SHLR2 reg
  31. shlr.l \reg
  32. shlr.l \reg
  33. .endm
  34. .macro SAVEREGS
  35. mov.l er0,@-sp
  36. mov.l er1,@-sp
  37. mov.l er2,@-sp
  38. mov.l er3,@-sp
  39. .endm
  40. .macro RESTOREREGS
  41. mov.l @sp+,er3
  42. mov.l @sp+,er2
  43. .endm
  44. .macro SAVEEXR
  45. .endm
  46. .macro RESTOREEXR
  47. .endm
  48. #endif
  49. #if defined(CONFIG_CPU_H8S)
  50. #define USERRET 10
  51. #define USEREXR 8
  52. INTERRUPTS = 128
  53. .h8300s
  54. .macro SHLL2 reg
  55. shll.l #2,\reg
  56. .endm
  57. .macro SHLR2 reg
  58. shlr.l #2,\reg
  59. .endm
  60. .macro SAVEREGS
  61. stm.l er0-er3,@-sp
  62. .endm
  63. .macro RESTOREREGS
  64. ldm.l @sp+,er2-er3
  65. .endm
  66. .macro SAVEEXR
  67. mov.w @(USEREXR:16,er0),r1
  68. mov.w r1,@(LEXR-LER3:16,sp) /* copy EXR */
  69. .endm
  70. .macro RESTOREEXR
  71. mov.w @(LEXR-LER1:16,sp),r1 /* restore EXR */
  72. mov.b r1l,r1h
  73. mov.w r1,@(USEREXR:16,er0)
  74. .endm
  75. #endif
  76. /* CPU context save/restore macros. */
  77. .macro SAVE_ALL
  78. mov.l er0,@-sp
  79. stc ccr,r0l /* check kernel mode */
  80. btst #4,r0l
  81. bne 5f
  82. /* user mode */
  83. mov.l sp,@SYMBOL_NAME(sw_usp)
  84. mov.l @sp,er0 /* restore saved er0 */
  85. orc #0x10,ccr /* switch kernel stack */
  86. mov.l @SYMBOL_NAME(sw_ksp),sp
  87. sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */
  88. SAVEREGS
  89. mov.l @SYMBOL_NAME(sw_usp),er0
  90. mov.l @(USERRET:16,er0),er1 /* copy the RET addr */
  91. mov.l er1,@(LRET-LER3:16,sp)
  92. SAVEEXR
  93. mov.l @(LORIG-LER3:16,sp),er0
  94. mov.l er0,@(LER0-LER3:16,sp) /* copy ER0 */
  95. mov.w e1,r1 /* e1 highbyte = ccr */
  96. and #0xef,r1h /* mask mode? flag */
  97. bra 6f
  98. 5:
  99. /* kernel mode */
  100. mov.l @sp,er0 /* restore saved er0 */
  101. subs #2,sp /* set dummy ccr */
  102. SAVEREGS
  103. mov.w @(LRET-LER3:16,sp),r1 /* copy old ccr */
  104. 6:
  105. mov.b r1h,r1l
  106. mov.b #0,r1h
  107. mov.w r1,@(LCCR-LER3:16,sp) /* set ccr */
  108. mov.l er6,@-sp /* syscall arg #6 */
  109. mov.l er5,@-sp /* syscall arg #5 */
  110. mov.l er4,@-sp /* syscall arg #4 */
  111. .endm /* r1 = ccr */
  112. .macro RESTORE_ALL
  113. mov.l @sp+,er4
  114. mov.l @sp+,er5
  115. mov.l @sp+,er6
  116. RESTOREREGS
  117. mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */
  118. btst #4,r0l
  119. bne 7f
  120. orc #0x80,ccr
  121. mov.l @SYMBOL_NAME(sw_usp),er0
  122. mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */
  123. mov.l er1,@er0
  124. RESTOREEXR
  125. mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */
  126. mov.b r1l,r1h
  127. mov.b @(LRET+1-LER1:16,sp),r1l
  128. mov.w r1,e1
  129. mov.w @(LRET+2-LER1:16,sp),r1
  130. mov.l er1,@(USERRET:16,er0)
  131. mov.l @sp+,er1
  132. add.l #(LRET-LER1),sp /* remove LORIG - LRET */
  133. mov.l sp,@SYMBOL_NAME(sw_ksp)
  134. andc #0xef,ccr /* switch to user mode */
  135. mov.l er0,sp
  136. bra 8f
  137. 7:
  138. mov.l @sp+,er1
  139. adds #4,sp
  140. adds #2,sp
  141. 8:
  142. mov.l @sp+,er0
  143. adds #4,sp /* remove the sw created LVEC */
  144. rte
  145. .endm
  146. .globl SYMBOL_NAME(system_call)
  147. .globl SYMBOL_NAME(ret_from_exception)
  148. .globl SYMBOL_NAME(ret_from_fork)
  149. .globl SYMBOL_NAME(ret_from_kernel_thread)
  150. .globl SYMBOL_NAME(ret_from_interrupt)
  151. .globl SYMBOL_NAME(interrupt_redirect_table)
  152. .globl SYMBOL_NAME(sw_ksp),SYMBOL_NAME(sw_usp)
  153. .globl SYMBOL_NAME(resume)
  154. .globl SYMBOL_NAME(interrupt_entry)
  155. .globl SYMBOL_NAME(trace_break)
  156. #if defined(CONFIG_ROMKERNEL)
  157. .section .int_redirect,"ax"
  158. SYMBOL_NAME_LABEL(interrupt_redirect_table)
  159. #if defined(CONFIG_CPU_H8300H)
  160. .rept 7
  161. .long 0
  162. .endr
  163. #endif
  164. #if defined(CONFIG_CPU_H8S)
  165. .rept 5
  166. .long 0
  167. .endr
  168. jmp @SYMBOL_NAME(trace_break)
  169. .long 0
  170. #endif
  171. jsr @SYMBOL_NAME(interrupt_entry) /* NMI */
  172. jmp @SYMBOL_NAME(system_call) /* TRAPA #0 (System call) */
  173. .long 0
  174. .long 0
  175. jmp @SYMBOL_NAME(trace_break) /* TRAPA #3 (breakpoint) */
  176. .rept INTERRUPTS-12
  177. jsr @SYMBOL_NAME(interrupt_entry)
  178. .endr
  179. #endif
  180. #if defined(CONFIG_RAMKERNEL)
  181. .globl SYMBOL_NAME(interrupt_redirect_table)
  182. .section .bss
  183. SYMBOL_NAME_LABEL(interrupt_redirect_table)
  184. .space 4
  185. #endif
  186. .section .text
  187. .align 2
  188. SYMBOL_NAME_LABEL(interrupt_entry)
  189. SAVE_ALL
  190. mov.l sp,er0
  191. add.l #LVEC,er0
  192. btst #4,r1l
  193. bne 1f
  194. /* user LVEC */
  195. mov.l @SYMBOL_NAME(sw_usp),er0
  196. adds #4,er0
  197. 1:
  198. mov.l @er0,er0 /* LVEC address */
  199. #if defined(CONFIG_ROMKERNEL)
  200. sub.l #SYMBOL_NAME(interrupt_redirect_table),er0
  201. #endif
  202. #if defined(CONFIG_RAMKERNEL)
  203. mov.l @SYMBOL_NAME(interrupt_redirect_table),er1
  204. sub.l er1,er0
  205. #endif
  206. SHLR2 er0
  207. dec.l #1,er0
  208. mov.l sp,er1
  209. subs #4,er1 /* adjust ret_pc */
  210. jsr @SYMBOL_NAME(do_IRQ)
  211. jmp @SYMBOL_NAME(ret_from_interrupt)
  212. SYMBOL_NAME_LABEL(system_call)
  213. subs #4,sp /* dummy LVEC */
  214. SAVE_ALL
  215. andc #0x7f,ccr
  216. mov.l er0,er4
  217. /* save top of frame */
  218. mov.l sp,er0
  219. jsr @SYMBOL_NAME(set_esp0)
  220. mov.l sp,er2
  221. and.w #0xe000,r2
  222. mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
  223. btst #(TIF_SYSCALL_TRACE & 7),r2l
  224. beq 1f
  225. jsr @SYMBOL_NAME(do_syscall_trace)
  226. 1:
  227. cmp.l #NR_syscalls,er4
  228. bcc badsys
  229. SHLL2 er4
  230. mov.l #SYMBOL_NAME(sys_call_table),er0
  231. add.l er4,er0
  232. mov.l @er0,er4
  233. beq SYMBOL_NAME(ret_from_exception):16
  234. mov.l @(LER1:16,sp),er0
  235. mov.l @(LER2:16,sp),er1
  236. mov.l @(LER3:16,sp),er2
  237. jsr @er4
  238. mov.l er0,@(LER0:16,sp) /* save the return value */
  239. mov.l sp,er2
  240. and.w #0xe000,r2
  241. mov.b @((TI_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
  242. btst #(TIF_SYSCALL_TRACE & 7),r2l
  243. beq 2f
  244. jsr @SYMBOL_NAME(do_syscall_trace)
  245. 2:
  246. #if defined(CONFIG_SYSCALL_PRINT)
  247. jsr @SYMBOL_NAME(syscall_print)
  248. #endif
  249. orc #0x80,ccr
  250. bra resume_userspace
  251. badsys:
  252. mov.l #-ENOSYS,er0
  253. mov.l er0,@(LER0:16,sp)
  254. bra resume_userspace
  255. #if !defined(CONFIG_PREEMPT)
  256. #define resume_kernel restore_all
  257. #endif
  258. SYMBOL_NAME_LABEL(ret_from_exception)
  259. #if defined(CONFIG_PREEMPT)
  260. orc #0x80,ccr
  261. #endif
  262. SYMBOL_NAME_LABEL(ret_from_interrupt)
  263. mov.b @(LCCR+1:16,sp),r0l
  264. btst #4,r0l
  265. bne resume_kernel:8 /* return from kernel */
  266. resume_userspace:
  267. andc #0x7f,ccr
  268. mov.l sp,er4
  269. and.w #0xe000,r4 /* er4 <- current thread info */
  270. mov.l @(TI_FLAGS:16,er4),er1
  271. and.l #_TIF_WORK_MASK,er1
  272. beq restore_all:8
  273. work_pending:
  274. btst #TIF_NEED_RESCHED,r1l
  275. bne work_resched:8
  276. /* work notifysig */
  277. mov.l sp,er0
  278. subs #4,er0 /* er0: pt_regs */
  279. jsr @SYMBOL_NAME(do_notify_resume)
  280. bra restore_all:8
  281. work_resched:
  282. mov.l sp,er0
  283. jsr @SYMBOL_NAME(set_esp0)
  284. jsr @SYMBOL_NAME(schedule)
  285. bra resume_userspace:8
  286. restore_all:
  287. RESTORE_ALL /* Does RTE */
  288. #if defined(CONFIG_PREEMPT)
  289. resume_kernel:
  290. mov.l @(TI_PRE_COUNT:16,er4),er0
  291. bne restore_all:8
  292. need_resched:
  293. mov.l @(TI_FLAGS:16,er4),er0
  294. btst #TIF_NEED_RESCHED,r0l
  295. beq restore_all:8
  296. mov.b @(LCCR+1:16,sp),r0l /* Interrupt Enabled? */
  297. bmi restore_all:8
  298. mov.l #PREEMPT_ACTIVE,er0
  299. mov.l er0,@(TI_PRE_COUNT:16,er4)
  300. andc #0x7f,ccr
  301. mov.l sp,er0
  302. jsr @SYMBOL_NAME(set_esp0)
  303. jsr @SYMBOL_NAME(schedule)
  304. orc #0x80,ccr
  305. bra need_resched:8
  306. #endif
  307. SYMBOL_NAME_LABEL(ret_from_fork)
  308. mov.l er2,er0
  309. jsr @SYMBOL_NAME(schedule_tail)
  310. jmp @SYMBOL_NAME(ret_from_exception)
  311. SYMBOL_NAME_LABEL(ret_from_kernel_thread)
  312. mov.l er2,er0
  313. jsr @SYMBOL_NAME(schedule_tail)
  314. mov.l @(LER4:16,sp),er0
  315. mov.l @(LER5:16,sp),er1
  316. jsr @er1
  317. jmp @SYMBOL_NAME(ret_from_exception)
  318. SYMBOL_NAME_LABEL(resume)
  319. /*
  320. * Beware - when entering resume, offset of tss is in d1,
  321. * prev (the current task) is in a0, next (the new task)
  322. * is in a1 and d2.b is non-zero if the mm structure is
  323. * shared between the tasks, so don't change these
  324. * registers until their contents are no longer needed.
  325. */
  326. /* save sr */
  327. sub.w r3,r3
  328. stc ccr,r3l
  329. mov.w r3,@(THREAD_CCR+2:16,er0)
  330. /* disable interrupts */
  331. orc #0x80,ccr
  332. mov.l @SYMBOL_NAME(sw_usp),er3
  333. mov.l er3,@(THREAD_USP:16,er0)
  334. mov.l sp,@(THREAD_KSP:16,er0)
  335. /* Skip address space switching if they are the same. */
  336. /* FIXME: what did we hack out of here, this does nothing! */
  337. mov.l @(THREAD_USP:16,er1),er0
  338. mov.l er0,@SYMBOL_NAME(sw_usp)
  339. mov.l @(THREAD_KSP:16,er1),sp
  340. /* restore status register */
  341. mov.w @(THREAD_CCR+2:16,er1),r3
  342. ldc r3l,ccr
  343. rts
  344. SYMBOL_NAME_LABEL(trace_break)
  345. subs #4,sp
  346. SAVE_ALL
  347. sub.l er1,er1
  348. dec.l #1,er1
  349. mov.l er1,@(LORIG,sp)
  350. mov.l sp,er0
  351. jsr @SYMBOL_NAME(set_esp0)
  352. mov.l @SYMBOL_NAME(sw_usp),er0
  353. mov.l @er0,er1
  354. mov.w @(-2:16,er1),r2
  355. cmp.w #0x5730,r2
  356. beq 1f
  357. subs #2,er1
  358. mov.l er1,@er0
  359. 1:
  360. and.w #0xff,e1
  361. mov.l er1,er0
  362. jsr @SYMBOL_NAME(trace_trap)
  363. jmp @SYMBOL_NAME(ret_from_exception)
  364. .section .bss
  365. SYMBOL_NAME_LABEL(sw_ksp)
  366. .space 4
  367. SYMBOL_NAME_LABEL(sw_usp)
  368. .space 4
  369. .end