entry.S 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /* -*- mode: asm -*-
  2. *
  3. * linux/arch/h8300/platform/h8300h/entry.S
  4. *
  5. * Yoshinori Sato <ysato@users.sourceforge.jp>
  6. * David McCullough <davidm@snapgear.com>
  7. *
  8. */
  9. /*
  10. * entry.S
  11. * include exception/interrupt gateway
  12. * system call entry
  13. */
  14. #include <linux/sys.h>
  15. #include <asm/unistd.h>
  16. #include <asm/setup.h>
  17. #include <asm/segment.h>
  18. #include <asm/linkage.h>
  19. #include <asm/asm-offsets.h>
  20. #include <asm/thread_info.h>
  21. #include <asm/errno.h>
  22. .h8300h
  23. /* CPU context save/restore macros. */
  24. .macro SAVE_ALL
  25. mov.l er0,@-sp
  26. stc ccr,r0l /* check kernel mode */
  27. orc #0x10,ccr
  28. btst #4,r0l
  29. bne 5f
  30. mov.l sp,@SYMBOL_NAME(sw_usp) /* user mode */
  31. mov.l @sp,er0
  32. mov.l @SYMBOL_NAME(sw_ksp),sp
  33. sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */
  34. mov.l er0,@-sp
  35. mov.l er1,@-sp
  36. mov.l @SYMBOL_NAME(sw_usp),er0
  37. mov.l @(8:16,er0),er1 /* copy the RET addr */
  38. mov.l er1,@(LRET-LER1:16,sp)
  39. mov.w e1,r1 /* e1 highbyte = ccr */
  40. and #0xef,r1h /* mask mode? flag */
  41. sub.w r0,r0
  42. mov.b r1h,r0l
  43. mov.w r0,@(LCCR-LER1:16,sp) /* copy ccr */
  44. mov.l @(LORIG-LER1:16,sp),er0
  45. mov.l er0,@(LER0-LER1:16,sp) /* copy ER0 */
  46. bra 6f
  47. 5:
  48. mov.l @sp,er0 /* kernel mode */
  49. subs #2,sp /* dummy ccr */
  50. mov.l er0,@-sp
  51. mov.l er1,@-sp
  52. mov.w @(LRET-LER1:16,sp),r1 /* copy old ccr */
  53. mov.b r1h,r1l
  54. mov.b #0,r1h
  55. mov.w r1,@(LCCR-LER1:16,sp) /* set ccr */
  56. 6:
  57. mov.l er2,@-sp
  58. mov.l er3,@-sp
  59. mov.l er6,@-sp /* syscall arg #6 */
  60. mov.l er5,@-sp /* syscall arg #5 */
  61. mov.l er4,@-sp /* syscall arg #4 */
  62. .endm
  63. .macro RESTORE_ALL
  64. mov.l @sp+,er4
  65. mov.l @sp+,er5
  66. mov.l @sp+,er6
  67. mov.l @sp+,er3
  68. mov.l @sp+,er2
  69. mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */
  70. btst #4,r0l
  71. bne 7f
  72. orc #0x80,ccr
  73. mov.l @SYMBOL_NAME(sw_usp),er0
  74. mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */
  75. mov.l er1,@er0
  76. mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */
  77. mov.b r1l,r1h
  78. mov.b @(LRET+1-LER1:16,sp),r1l
  79. mov.w r1,e1
  80. mov.w @(LRET+2-LER1:16,sp),r1
  81. mov.l er1,@(8:16,er0)
  82. mov.l @sp+,er1
  83. add.l #(LRET-LER1),sp /* remove LORIG - LRET */
  84. mov.l sp,@SYMBOL_NAME(sw_ksp)
  85. mov.l er0,sp
  86. bra 8f
  87. 7:
  88. mov.l @sp+,er1
  89. adds #4,sp
  90. adds #2,sp
  91. 8:
  92. mov.l @sp+,er0
  93. adds #4,sp /* remove the sw created LVEC */
  94. rte
  95. .endm
  96. .globl SYMBOL_NAME(system_call)
  97. .globl SYMBOL_NAME(ret_from_exception)
  98. .globl SYMBOL_NAME(ret_from_fork)
  99. .globl SYMBOL_NAME(ret_from_interrupt)
  100. .globl SYMBOL_NAME(interrupt_redirect_table)
  101. .globl SYMBOL_NAME(sw_ksp),SYMBOL_NAME(sw_usp)
  102. .globl SYMBOL_NAME(resume)
  103. .globl SYMBOL_NAME(interrupt_redirect_table)
  104. .globl SYMBOL_NAME(interrupt_entry)
  105. .globl SYMBOL_NAME(system_call)
  106. .globl SYMBOL_NAME(trace_break)
  107. #if defined(CONFIG_ROMKERNEL)
  108. INTERRUPTS = 64
  109. .section .int_redirect,"ax"
  110. SYMBOL_NAME_LABEL(interrupt_redirect_table)
  111. .rept 7
  112. .long 0
  113. .endr
  114. jsr @SYMBOL_NAME(interrupt_entry) /* NMI */
  115. jmp @SYMBOL_NAME(system_call) /* TRAPA #0 (System call) */
  116. .long 0
  117. .long 0
  118. jmp @SYMBOL_NAME(trace_break) /* TRAPA #3 (breakpoint) */
  119. .rept INTERRUPTS-12
  120. jsr @SYMBOL_NAME(interrupt_entry)
  121. .endr
  122. #endif
  123. #if defined(CONFIG_RAMKERNEL)
  124. .globl SYMBOL_NAME(interrupt_redirect_table)
  125. .section .bss
  126. SYMBOL_NAME_LABEL(interrupt_redirect_table)
  127. .space 4
  128. #endif
  129. .section .text
  130. .align 2
  131. SYMBOL_NAME_LABEL(interrupt_entry)
  132. SAVE_ALL
  133. mov.w @(LCCR,sp),r0
  134. btst #4,r0l
  135. bne 1f
  136. mov.l @SYMBOL_NAME(sw_usp),er0
  137. mov.l @(4:16,er0),er0
  138. bra 2f
  139. 1:
  140. mov.l @(LVEC,sp),er0
  141. 2:
  142. #if defined(CONFIG_ROMKERNEL)
  143. sub.l #SYMBOL_NAME(interrupt_redirect_table),er0
  144. #endif
  145. #if defined(CONFIG_RAMKERNEL)
  146. mov.l @SYMBOL_NAME(interrupt_redirect_table),er1
  147. sub.l er1,er0
  148. #endif
  149. shlr.l er0
  150. shlr.l er0
  151. dec.l #1,er0
  152. mov.l sp,er1
  153. subs #4,er1 /* adjust ret_pc */
  154. jsr @SYMBOL_NAME(process_int)
  155. mov.l @SYMBOL_NAME(irq_stat)+CPUSTAT_SOFTIRQ_PENDING,er0
  156. beq 1f
  157. jsr @SYMBOL_NAME(do_softirq)
  158. 1:
  159. jmp @SYMBOL_NAME(ret_from_interrupt)
  160. SYMBOL_NAME_LABEL(system_call)
  161. subs #4,sp /* dummy LVEC */
  162. SAVE_ALL
  163. mov.w @(LCCR:16,sp),r1
  164. bset #4,r1l
  165. ldc r1l,ccr
  166. mov.l er0,er4
  167. mov.l #-ENOSYS,er0
  168. mov.l er0,@(LER0:16,sp)
  169. /* save top of frame */
  170. mov.l sp,er0
  171. jsr @SYMBOL_NAME(set_esp0)
  172. cmp.l #NR_syscalls,er4
  173. bcc SYMBOL_NAME(ret_from_exception):16
  174. shll.l er4
  175. shll.l er4
  176. mov.l #SYMBOL_NAME(sys_call_table),er0
  177. add.l er4,er0
  178. mov.l @er0,er4
  179. beq SYMBOL_NAME(ret_from_exception):16
  180. mov.l sp,er2
  181. and.w #0xe000,r2
  182. mov.b @((TASK_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
  183. btst #(TIF_SYSCALL_TRACE & 7),r2l
  184. bne 1f
  185. mov.l @(LER1:16,sp),er0
  186. mov.l @(LER2:16,sp),er1
  187. mov.l @(LER3:16,sp),er2
  188. jsr @er4
  189. mov.l er0,@(LER0:16,sp) /* save the return value */
  190. #if defined(CONFIG_SYSCALL_PRINT)
  191. jsr @SYMBOL_NAME(syscall_print)
  192. #endif
  193. bra SYMBOL_NAME(ret_from_exception):8
  194. 1:
  195. jsr SYMBOL_NAME(syscall_trace)
  196. mov.l @(LER1:16,sp),er0
  197. mov.l @(LER2:16,sp),er1
  198. mov.l @(LER3:16,sp),er2
  199. jsr @er4
  200. mov.l er0,@(LER0:16,sp) /* save the return value */
  201. jsr @SYMBOL_NAME(syscall_trace)
  202. bra SYMBOL_NAME(ret_from_exception):8
  203. SYMBOL_NAME_LABEL(ret_from_fork)
  204. mov.l er2,er0
  205. jsr @SYMBOL_NAME(schedule_tail)
  206. bra SYMBOL_NAME(ret_from_exception):8
  207. SYMBOL_NAME_LABEL(reschedule)
  208. /* save top of frame */
  209. mov.l sp,er0
  210. jsr @SYMBOL_NAME(set_esp0)
  211. jsr @SYMBOL_NAME(schedule)
  212. SYMBOL_NAME_LABEL(ret_from_exception)
  213. #if defined(CONFIG_PREEMPT)
  214. orc #0x80,ccr
  215. #endif
  216. SYMBOL_NAME_LABEL(ret_from_interrupt)
  217. mov.b @(LCCR+1:16,sp),r0l
  218. btst #4,r0l /* check if returning to kernel */
  219. bne done:8 /* if so, skip resched, signals */
  220. andc #0x7f,ccr
  221. mov.l sp,er4
  222. and.w #0xe000,r4
  223. mov.l @(TI_FLAGS:16,er4),er1
  224. and.l #_TIF_WORK_MASK,er1
  225. beq done:8
  226. 1:
  227. mov.l @(TI_FLAGS:16,er4),er1
  228. btst #TIF_NEED_RESCHED,r1l
  229. bne SYMBOL_NAME(reschedule):16
  230. mov.l sp,er0
  231. subs #4,er0 /* adjust retpc */
  232. mov.l er2,er1
  233. jsr @SYMBOL_NAME(do_signal)
  234. #if defined(CONFIG_PREEMPT)
  235. bra done:8 /* userspace thoru */
  236. 3:
  237. btst #4,r0l
  238. beq done:8 /* userspace thoru */
  239. 4:
  240. mov.l @(TI_PRE_COUNT:16,er4),er1
  241. bne done:8
  242. mov.l @(TI_FLAGS:16,er4),er1
  243. btst #TIF_NEED_RESCHED,r1l
  244. beq done:8
  245. mov.b r0l,r0l
  246. bpl done:8 /* interrupt off (exception path?) */
  247. mov.l #PREEMPT_ACTIVE,er1
  248. mov.l er1,@(TI_PRE_COUNT:16,er4)
  249. andc #0x7f,ccr
  250. jsr @SYMBOL_NAME(schedule)
  251. sub.l er1,er1
  252. mov.l er1,@(TI_PRE_COUNT:16,er4)
  253. orc #0x80,ccr
  254. bra 4b:8
  255. #endif
  256. done:
  257. RESTORE_ALL /* Does RTE */
  258. SYMBOL_NAME_LABEL(resume)
  259. /*
  260. * Beware - when entering resume, offset of tss is in d1,
  261. * prev (the current task) is in a0, next (the new task)
  262. * is in a1 and d2.b is non-zero if the mm structure is
  263. * shared between the tasks, so don't change these
  264. * registers until their contents are no longer needed.
  265. */
  266. /* save sr */
  267. sub.w r3,r3
  268. stc ccr,r3l
  269. mov.w r3,@(THREAD_CCR+2:16,er0)
  270. /* disable interrupts */
  271. orc #0x80,ccr
  272. mov.l @SYMBOL_NAME(sw_usp),er3
  273. mov.l er3,@(THREAD_USP:16,er0)
  274. mov.l sp,@(THREAD_KSP:16,er0)
  275. /* Skip address space switching if they are the same. */
  276. /* FIXME: what did we hack out of here, this does nothing! */
  277. mov.l @(THREAD_USP:16,er1),er0
  278. mov.l er0,@SYMBOL_NAME(sw_usp)
  279. mov.l @(THREAD_KSP:16,er1),sp
  280. /* restore status register */
  281. mov.w @(THREAD_CCR+2:16,er1),r3
  282. ldc r3l,ccr
  283. rts
  284. SYMBOL_NAME_LABEL(trace_break)
  285. subs #4,sp
  286. SAVE_ALL
  287. sub.l er1,er1
  288. dec.l #1,er1
  289. mov.l er1,@(LORIG,sp)
  290. mov.l sp,er0
  291. jsr @SYMBOL_NAME(set_esp0)
  292. mov.l @SYMBOL_NAME(sw_usp),er0
  293. mov.l @er0,er1
  294. subs #2,er1
  295. mov.l er1,@er0
  296. and.w #0xff,e1
  297. mov.l er1,er0
  298. jsr @SYMBOL_NAME(trace_trap)
  299. jmp @SYMBOL_NAME(ret_from_exception)
  300. .section .bss
  301. SYMBOL_NAME_LABEL(sw_ksp)
  302. .space 4
  303. SYMBOL_NAME_LABEL(sw_usp)
  304. .space 4