entry.S 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /* -*- mode: asm -*-
  2. *
  3. * linux/arch/h8300/platform/h8300h/entry.S
  4. *
  5. * Yoshinori Sato <ysato@users.sourceforge.jp>
  6. * David McCullough <davidm@snapgear.com>
  7. *
  8. */
  9. /*
  10. * entry.S
  11. * include exception/interrupt gateway
  12. * system call entry
  13. */
  14. #include <linux/sys.h>
  15. #include <linux/config.h>
  16. #include <asm/unistd.h>
  17. #include <asm/setup.h>
  18. #include <asm/segment.h>
  19. #include <asm/linkage.h>
  20. #include <asm/asm-offsets.h>
  21. #include <asm/thread_info.h>
  22. #include <asm/errno.h>
  23. .h8300h
  24. /* CPU context save/restore macros. */
  25. .macro SAVE_ALL
  26. mov.l er0,@-sp
  27. stc ccr,r0l /* check kernel mode */
  28. orc #0x10,ccr
  29. btst #4,r0l
  30. bne 5f
  31. mov.l sp,@SYMBOL_NAME(sw_usp) /* user mode */
  32. mov.l @sp,er0
  33. mov.l @SYMBOL_NAME(sw_ksp),sp
  34. sub.l #(LRET-LORIG),sp /* allocate LORIG - LRET */
  35. mov.l er0,@-sp
  36. mov.l er1,@-sp
  37. mov.l @SYMBOL_NAME(sw_usp),er0
  38. mov.l @(8:16,er0),er1 /* copy the RET addr */
  39. mov.l er1,@(LRET-LER1:16,sp)
  40. mov.w e1,r1 /* e1 highbyte = ccr */
  41. and #0xef,r1h /* mask mode? flag */
  42. sub.w r0,r0
  43. mov.b r1h,r0l
  44. mov.w r0,@(LCCR-LER1:16,sp) /* copy ccr */
  45. mov.l @(LORIG-LER1:16,sp),er0
  46. mov.l er0,@(LER0-LER1:16,sp) /* copy ER0 */
  47. bra 6f
  48. 5:
  49. mov.l @sp,er0 /* kernel mode */
  50. subs #2,sp /* dummy ccr */
  51. mov.l er0,@-sp
  52. mov.l er1,@-sp
  53. mov.w @(LRET-LER1:16,sp),r1 /* copy old ccr */
  54. mov.b r1h,r1l
  55. mov.b #0,r1h
  56. mov.w r1,@(LCCR-LER1:16,sp) /* set ccr */
  57. 6:
  58. mov.l er2,@-sp
  59. mov.l er3,@-sp
  60. mov.l er6,@-sp /* syscall arg #6 */
  61. mov.l er5,@-sp /* syscall arg #5 */
  62. mov.l er4,@-sp /* syscall arg #4 */
  63. .endm
  64. .macro RESTORE_ALL
  65. mov.l @sp+,er4
  66. mov.l @sp+,er5
  67. mov.l @sp+,er6
  68. mov.l @sp+,er3
  69. mov.l @sp+,er2
  70. mov.w @(LCCR-LER1:16,sp),r0 /* check kernel mode */
  71. btst #4,r0l
  72. bne 7f
  73. orc #0x80,ccr
  74. mov.l @SYMBOL_NAME(sw_usp),er0
  75. mov.l @(LER0-LER1:16,sp),er1 /* restore ER0 */
  76. mov.l er1,@er0
  77. mov.w @(LCCR-LER1:16,sp),r1 /* restore the RET addr */
  78. mov.b r1l,r1h
  79. mov.b @(LRET+1-LER1:16,sp),r1l
  80. mov.w r1,e1
  81. mov.w @(LRET+2-LER1:16,sp),r1
  82. mov.l er1,@(8:16,er0)
  83. mov.l @sp+,er1
  84. add.l #(LRET-LER1),sp /* remove LORIG - LRET */
  85. mov.l sp,@SYMBOL_NAME(sw_ksp)
  86. mov.l er0,sp
  87. bra 8f
  88. 7:
  89. mov.l @sp+,er1
  90. adds #4,sp
  91. adds #2,sp
  92. 8:
  93. mov.l @sp+,er0
  94. adds #4,sp /* remove the sw created LVEC */
  95. rte
  96. .endm
  97. .globl SYMBOL_NAME(system_call)
  98. .globl SYMBOL_NAME(ret_from_exception)
  99. .globl SYMBOL_NAME(ret_from_fork)
  100. .globl SYMBOL_NAME(ret_from_interrupt)
  101. .globl SYMBOL_NAME(interrupt_redirect_table)
  102. .globl SYMBOL_NAME(sw_ksp),SYMBOL_NAME(sw_usp)
  103. .globl SYMBOL_NAME(resume)
  104. .globl SYMBOL_NAME(interrupt_redirect_table)
  105. .globl SYMBOL_NAME(interrupt_entry)
  106. .globl SYMBOL_NAME(system_call)
  107. .globl SYMBOL_NAME(trace_break)
  108. #if defined(CONFIG_ROMKERNEL)
  109. INTERRUPTS = 64
  110. .section .int_redirect,"ax"
  111. SYMBOL_NAME_LABEL(interrupt_redirect_table)
  112. .rept 7
  113. .long 0
  114. .endr
  115. jsr @SYMBOL_NAME(interrupt_entry) /* NMI */
  116. jmp @SYMBOL_NAME(system_call) /* TRAPA #0 (System call) */
  117. .long 0
  118. .long 0
  119. jmp @SYMBOL_NAME(trace_break) /* TRAPA #3 (breakpoint) */
  120. .rept INTERRUPTS-12
  121. jsr @SYMBOL_NAME(interrupt_entry)
  122. .endr
  123. #endif
  124. #if defined(CONFIG_RAMKERNEL)
  125. .globl SYMBOL_NAME(interrupt_redirect_table)
  126. .section .bss
  127. SYMBOL_NAME_LABEL(interrupt_redirect_table)
  128. .space 4
  129. #endif
  130. .section .text
  131. .align 2
  132. SYMBOL_NAME_LABEL(interrupt_entry)
  133. SAVE_ALL
  134. mov.w @(LCCR,sp),r0
  135. btst #4,r0l
  136. bne 1f
  137. mov.l @SYMBOL_NAME(sw_usp),er0
  138. mov.l @(4:16,er0),er0
  139. bra 2f
  140. 1:
  141. mov.l @(LVEC,sp),er0
  142. 2:
  143. #if defined(CONFIG_ROMKERNEL)
  144. sub.l #SYMBOL_NAME(interrupt_redirect_table),er0
  145. #endif
  146. #if defined(CONFIG_RAMKERNEL)
  147. mov.l @SYMBOL_NAME(interrupt_redirect_table),er1
  148. sub.l er1,er0
  149. #endif
  150. shlr.l er0
  151. shlr.l er0
  152. dec.l #1,er0
  153. mov.l sp,er1
  154. subs #4,er1 /* adjust ret_pc */
  155. jsr @SYMBOL_NAME(process_int)
  156. mov.l @SYMBOL_NAME(irq_stat)+CPUSTAT_SOFTIRQ_PENDING,er0
  157. beq 1f
  158. jsr @SYMBOL_NAME(do_softirq)
  159. 1:
  160. jmp @SYMBOL_NAME(ret_from_interrupt)
  161. SYMBOL_NAME_LABEL(system_call)
  162. subs #4,sp /* dummy LVEC */
  163. SAVE_ALL
  164. mov.w @(LCCR:16,sp),r1
  165. bset #4,r1l
  166. ldc r1l,ccr
  167. mov.l er0,er4
  168. mov.l #-ENOSYS,er0
  169. mov.l er0,@(LER0:16,sp)
  170. /* save top of frame */
  171. mov.l sp,er0
  172. jsr @SYMBOL_NAME(set_esp0)
  173. cmp.l #NR_syscalls,er4
  174. bcc SYMBOL_NAME(ret_from_exception):16
  175. shll.l er4
  176. shll.l er4
  177. mov.l #SYMBOL_NAME(sys_call_table),er0
  178. add.l er4,er0
  179. mov.l @er0,er4
  180. beq SYMBOL_NAME(ret_from_exception):16
  181. mov.l sp,er2
  182. and.w #0xe000,r2
  183. mov.b @((TASK_FLAGS+3-(TIF_SYSCALL_TRACE >> 3)):16,er2),r2l
  184. btst #(TIF_SYSCALL_TRACE & 7),r2l
  185. bne 1f
  186. mov.l @(LER1:16,sp),er0
  187. mov.l @(LER2:16,sp),er1
  188. mov.l @(LER3:16,sp),er2
  189. jsr @er4
  190. mov.l er0,@(LER0:16,sp) /* save the return value */
  191. #if defined(CONFIG_SYSCALL_PRINT)
  192. jsr @SYMBOL_NAME(syscall_print)
  193. #endif
  194. bra SYMBOL_NAME(ret_from_exception):8
  195. 1:
  196. jsr SYMBOL_NAME(syscall_trace)
  197. mov.l @(LER1:16,sp),er0
  198. mov.l @(LER2:16,sp),er1
  199. mov.l @(LER3:16,sp),er2
  200. jsr @er4
  201. mov.l er0,@(LER0:16,sp) /* save the return value */
  202. jsr @SYMBOL_NAME(syscall_trace)
  203. bra SYMBOL_NAME(ret_from_exception):8
  204. SYMBOL_NAME_LABEL(ret_from_fork)
  205. mov.l er2,er0
  206. jsr @SYMBOL_NAME(schedule_tail)
  207. bra SYMBOL_NAME(ret_from_exception):8
  208. SYMBOL_NAME_LABEL(reschedule)
  209. /* save top of frame */
  210. mov.l sp,er0
  211. jsr @SYMBOL_NAME(set_esp0)
  212. jsr @SYMBOL_NAME(schedule)
  213. SYMBOL_NAME_LABEL(ret_from_exception)
  214. #if defined(CONFIG_PREEMPT)
  215. orc #0x80,ccr
  216. #endif
  217. SYMBOL_NAME_LABEL(ret_from_interrupt)
  218. mov.b @(LCCR+1:16,sp),r0l
  219. btst #4,r0l /* check if returning to kernel */
  220. bne done:8 /* if so, skip resched, signals */
  221. andc #0x7f,ccr
  222. mov.l sp,er4
  223. and.w #0xe000,r4
  224. mov.l @(TI_FLAGS:16,er4),er1
  225. and.l #_TIF_WORK_MASK,er1
  226. beq done:8
  227. 1:
  228. mov.l @(TI_FLAGS:16,er4),er1
  229. btst #TIF_NEED_RESCHED,r1l
  230. bne SYMBOL_NAME(reschedule):16
  231. mov.l sp,er0
  232. subs #4,er0 /* adjust retpc */
  233. mov.l er2,er1
  234. jsr @SYMBOL_NAME(do_signal)
  235. #if defined(CONFIG_PREEMPT)
  236. bra done:8 /* userspace thoru */
  237. 3:
  238. btst #4,r0l
  239. beq done:8 /* userspace thoru */
  240. 4:
  241. mov.l @(TI_PRE_COUNT:16,er4),er1
  242. bne done:8
  243. mov.l @(TI_FLAGS:16,er4),er1
  244. btst #TIF_NEED_RESCHED,r1l
  245. beq done:8
  246. mov.b r0l,r0l
  247. bpl done:8 /* interrupt off (exception path?) */
  248. mov.l #PREEMPT_ACTIVE,er1
  249. mov.l er1,@(TI_PRE_COUNT:16,er4)
  250. andc #0x7f,ccr
  251. jsr @SYMBOL_NAME(schedule)
  252. sub.l er1,er1
  253. mov.l er1,@(TI_PRE_COUNT:16,er4)
  254. orc #0x80,ccr
  255. bra 4b:8
  256. #endif
  257. done:
  258. RESTORE_ALL /* Does RTE */
  259. SYMBOL_NAME_LABEL(resume)
  260. /*
  261. * Beware - when entering resume, offset of tss is in d1,
  262. * prev (the current task) is in a0, next (the new task)
  263. * is in a1 and d2.b is non-zero if the mm structure is
  264. * shared between the tasks, so don't change these
  265. * registers until their contents are no longer needed.
  266. */
  267. /* save sr */
  268. sub.w r3,r3
  269. stc ccr,r3l
  270. mov.w r3,@(THREAD_CCR+2:16,er0)
  271. /* disable interrupts */
  272. orc #0x80,ccr
  273. mov.l @SYMBOL_NAME(sw_usp),er3
  274. mov.l er3,@(THREAD_USP:16,er0)
  275. mov.l sp,@(THREAD_KSP:16,er0)
  276. /* Skip address space switching if they are the same. */
  277. /* FIXME: what did we hack out of here, this does nothing! */
  278. mov.l @(THREAD_USP:16,er1),er0
  279. mov.l er0,@SYMBOL_NAME(sw_usp)
  280. mov.l @(THREAD_KSP:16,er1),sp
  281. /* restore status register */
  282. mov.w @(THREAD_CCR+2:16,er1),r3
  283. ldc r3l,ccr
  284. rts
  285. SYMBOL_NAME_LABEL(trace_break)
  286. subs #4,sp
  287. SAVE_ALL
  288. sub.l er1,er1
  289. dec.l #1,er1
  290. mov.l er1,@(LORIG,sp)
  291. mov.l sp,er0
  292. jsr @SYMBOL_NAME(set_esp0)
  293. mov.l @SYMBOL_NAME(sw_usp),er0
  294. mov.l @er0,er1
  295. subs #2,er1
  296. mov.l er1,@er0
  297. and.w #0xff,e1
  298. mov.l er1,er0
  299. jsr @SYMBOL_NAME(trace_trap)
  300. jmp @SYMBOL_NAME(ret_from_exception)
  301. .section .bss
  302. SYMBOL_NAME_LABEL(sw_ksp)
  303. .space 4
  304. SYMBOL_NAME_LABEL(sw_usp)
  305. .space 4