entry_mm.S 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /* -*- mode: asm -*-
  2. *
  3. * linux/arch/m68k/kernel/entry.S
  4. *
  5. * Copyright (C) 1991, 1992 Linus Torvalds
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file README.legal in the main directory of this archive
  9. * for more details.
  10. *
  11. * Linux/m68k support by Hamish Macdonald
  12. *
  13. * 68060 fixes by Jesper Skov
  14. *
  15. */
  16. /*
  17. * entry.S contains the system-call and fault low-level handling routines.
  18. * This also contains the timer-interrupt handler, as well as all interrupts
  19. * and faults that can result in a task-switch.
  20. *
  21. * NOTE: This code handles signal-recognition, which happens every time
  22. * after a timer-interrupt and after each system call.
  23. *
  24. */
  25. /*
  26. * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
  27. * all pointers that used to be 'current' are now entry
  28. * number 0 in the 'current_set' list.
  29. *
  30. * 6/05/00 RZ: addedd writeback completion after return from sighandler
  31. * for 68040
  32. */
  33. #include <linux/linkage.h>
  34. #include <asm/entry.h>
  35. #include <asm/errno.h>
  36. #include <asm/setup.h>
  37. #include <asm/segment.h>
  38. #include <asm/traps.h>
  39. #include <asm/unistd.h>
  40. #include <asm/asm-offsets.h>
  41. .globl system_call, buserr, trap, resume
  42. .globl sys_call_table
  43. .globl sys_fork, sys_clone, sys_vfork
  44. .globl ret_from_interrupt, bad_interrupt
  45. .globl auto_irqhandler_fixup
  46. .globl user_irqvec_fixup, user_irqhandler_fixup
  47. .text
  48. ENTRY(buserr)
  49. SAVE_ALL_INT
  50. GET_CURRENT(%d0)
  51. movel %sp,%sp@- | stack frame pointer argument
  52. bsrl buserr_c
  53. addql #4,%sp
  54. jra .Lret_from_exception
  55. ENTRY(trap)
  56. SAVE_ALL_INT
  57. GET_CURRENT(%d0)
  58. movel %sp,%sp@- | stack frame pointer argument
  59. bsrl trap_c
  60. addql #4,%sp
  61. jra .Lret_from_exception
  62. | After a fork we jump here directly from resume,
  63. | so that %d1 contains the previous task
  64. | schedule_tail now used regardless of CONFIG_SMP
  65. ENTRY(ret_from_fork)
  66. movel %d1,%sp@-
  67. jsr schedule_tail
  68. addql #4,%sp
  69. jra .Lret_from_exception
  70. do_trace_entry:
  71. movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
  72. subql #4,%sp
  73. SAVE_SWITCH_STACK
  74. jbsr syscall_trace
  75. RESTORE_SWITCH_STACK
  76. addql #4,%sp
  77. movel %sp@(PT_OFF_ORIG_D0),%d0
  78. cmpl #NR_syscalls,%d0
  79. jcs syscall
  80. badsys:
  81. movel #-ENOSYS,%sp@(PT_OFF_D0)
  82. jra ret_from_syscall
  83. do_trace_exit:
  84. subql #4,%sp
  85. SAVE_SWITCH_STACK
  86. jbsr syscall_trace
  87. RESTORE_SWITCH_STACK
  88. addql #4,%sp
  89. jra .Lret_from_exception
  90. ENTRY(ret_from_signal)
  91. tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
  92. jge 1f
  93. jbsr syscall_trace
  94. 1: RESTORE_SWITCH_STACK
  95. addql #4,%sp
  96. /* on 68040 complete pending writebacks if any */
  97. #ifdef CONFIG_M68040
  98. bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
  99. subql #7,%d0 | bus error frame ?
  100. jbne 1f
  101. movel %sp,%sp@-
  102. jbsr berr_040cleanup
  103. addql #4,%sp
  104. 1:
  105. #endif
  106. jra .Lret_from_exception
  107. ENTRY(system_call)
  108. SAVE_ALL_SYS
  109. GET_CURRENT(%d1)
  110. | save top of frame
  111. movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
  112. | syscall trace?
  113. tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
  114. jmi do_trace_entry
  115. cmpl #NR_syscalls,%d0
  116. jcc badsys
  117. syscall:
  118. jbsr @(sys_call_table,%d0:l:4)@(0)
  119. movel %d0,%sp@(PT_OFF_D0) | save the return value
  120. ret_from_syscall:
  121. |oriw #0x0700,%sr
  122. movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
  123. jne syscall_exit_work
  124. 1: RESTORE_ALL
  125. syscall_exit_work:
  126. btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
  127. bnes 1b | if so, skip resched, signals
  128. lslw #1,%d0
  129. jcs do_trace_exit
  130. jmi do_delayed_trace
  131. lslw #8,%d0
  132. jmi do_signal_return
  133. pea resume_userspace
  134. jra schedule
  135. ENTRY(ret_from_exception)
  136. .Lret_from_exception:
  137. btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
  138. bnes 1f | if so, skip resched, signals
  139. | only allow interrupts when we are really the last one on the
  140. | kernel stack, otherwise stack overflow can occur during
  141. | heavy interrupt load
  142. andw #ALLOWINT,%sr
  143. resume_userspace:
  144. moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
  145. jne exit_work
  146. 1: RESTORE_ALL
  147. exit_work:
  148. | save top of frame
  149. movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
  150. lslb #1,%d0
  151. jmi do_signal_return
  152. pea resume_userspace
  153. jra schedule
  154. do_signal_return:
  155. |andw #ALLOWINT,%sr
  156. subql #4,%sp | dummy return address
  157. SAVE_SWITCH_STACK
  158. pea %sp@(SWITCH_STACK_SIZE)
  159. bsrl do_signal
  160. addql #4,%sp
  161. RESTORE_SWITCH_STACK
  162. addql #4,%sp
  163. jbra resume_userspace
  164. do_delayed_trace:
  165. bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
  166. pea 1 | send SIGTRAP
  167. movel %curptr,%sp@-
  168. pea LSIGTRAP
  169. jbsr send_sig
  170. addql #8,%sp
  171. addql #4,%sp
  172. jbra resume_userspace
  173. /* This is the main interrupt handler for autovector interrupts */
  174. ENTRY(auto_inthandler)
  175. SAVE_ALL_INT
  176. GET_CURRENT(%d0)
  177. addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
  178. | put exception # in d0
  179. bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
  180. subw #VEC_SPUR,%d0
  181. movel %sp,%sp@-
  182. movel %d0,%sp@- | put vector # on stack
  183. auto_irqhandler_fixup = . + 2
  184. jsr __m68k_handle_int | process the IRQ
  185. addql #8,%sp | pop parameters off stack
  186. ret_from_interrupt:
  187. subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
  188. jeq ret_from_last_interrupt
  189. 2: RESTORE_ALL
  190. ALIGN
  191. ret_from_last_interrupt:
  192. moveq #(~ALLOWINT>>8)&0xff,%d0
  193. andb %sp@(PT_OFF_SR),%d0
  194. jne 2b
  195. /* check if we need to do software interrupts */
  196. tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
  197. jeq .Lret_from_exception
  198. pea ret_from_exception
  199. jra do_softirq
  200. /* Handler for user defined interrupt vectors */
  201. ENTRY(user_inthandler)
  202. SAVE_ALL_INT
  203. GET_CURRENT(%d0)
  204. addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
  205. | put exception # in d0
  206. bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
  207. user_irqvec_fixup = . + 2
  208. subw #VEC_USER,%d0
  209. movel %sp,%sp@-
  210. movel %d0,%sp@- | put vector # on stack
  211. user_irqhandler_fixup = . + 2
  212. jsr __m68k_handle_int | process the IRQ
  213. addql #8,%sp | pop parameters off stack
  214. subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
  215. jeq ret_from_last_interrupt
  216. RESTORE_ALL
  217. /* Handler for uninitialized and spurious interrupts */
  218. ENTRY(bad_inthandler)
  219. SAVE_ALL_INT
  220. GET_CURRENT(%d0)
  221. addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
  222. movel %sp,%sp@-
  223. jsr handle_badint
  224. addql #4,%sp
  225. subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
  226. jeq ret_from_last_interrupt
  227. RESTORE_ALL
  228. ENTRY(sys_fork)
  229. SAVE_SWITCH_STACK
  230. pea %sp@(SWITCH_STACK_SIZE)
  231. jbsr m68k_fork
  232. addql #4,%sp
  233. RESTORE_SWITCH_STACK
  234. rts
  235. ENTRY(sys_clone)
  236. SAVE_SWITCH_STACK
  237. pea %sp@(SWITCH_STACK_SIZE)
  238. jbsr m68k_clone
  239. addql #4,%sp
  240. RESTORE_SWITCH_STACK
  241. rts
  242. ENTRY(sys_vfork)
  243. SAVE_SWITCH_STACK
  244. pea %sp@(SWITCH_STACK_SIZE)
  245. jbsr m68k_vfork
  246. addql #4,%sp
  247. RESTORE_SWITCH_STACK
  248. rts
  249. ENTRY(sys_sigreturn)
  250. SAVE_SWITCH_STACK
  251. jbsr do_sigreturn
  252. RESTORE_SWITCH_STACK
  253. rts
  254. ENTRY(sys_rt_sigreturn)
  255. SAVE_SWITCH_STACK
  256. jbsr do_rt_sigreturn
  257. RESTORE_SWITCH_STACK
  258. rts
  259. resume:
  260. /*
  261. * Beware - when entering resume, prev (the current task) is
  262. * in a0, next (the new task) is in a1,so don't change these
  263. * registers until their contents are no longer needed.
  264. */
  265. /* save sr */
  266. movew %sr,%a0@(TASK_THREAD+THREAD_SR)
  267. /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
  268. movec %sfc,%d0
  269. movew %d0,%a0@(TASK_THREAD+THREAD_FS)
  270. /* save usp */
  271. /* it is better to use a movel here instead of a movew 8*) */
  272. movec %usp,%d0
  273. movel %d0,%a0@(TASK_THREAD+THREAD_USP)
  274. /* save non-scratch registers on stack */
  275. SAVE_SWITCH_STACK
  276. /* save current kernel stack pointer */
  277. movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
  278. /* save floating point context */
  279. #ifndef CONFIG_M68KFPU_EMU_ONLY
  280. #ifdef CONFIG_M68KFPU_EMU
  281. tstl m68k_fputype
  282. jeq 3f
  283. #endif
  284. fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
  285. #if defined(CONFIG_M68060)
  286. #if !defined(CPU_M68060_ONLY)
  287. btst #3,m68k_cputype+3
  288. beqs 1f
  289. #endif
  290. /* The 060 FPU keeps status in bits 15-8 of the first longword */
  291. tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
  292. jeq 3f
  293. #if !defined(CPU_M68060_ONLY)
  294. jra 2f
  295. #endif
  296. #endif /* CONFIG_M68060 */
  297. #if !defined(CPU_M68060_ONLY)
  298. 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
  299. jeq 3f
  300. #endif
  301. 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
  302. fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
  303. 3:
  304. #endif /* CONFIG_M68KFPU_EMU_ONLY */
  305. /* Return previous task in %d1 */
  306. movel %curptr,%d1
  307. /* switch to new task (a1 contains new task) */
  308. movel %a1,%curptr
  309. /* restore floating point context */
  310. #ifndef CONFIG_M68KFPU_EMU_ONLY
  311. #ifdef CONFIG_M68KFPU_EMU
  312. tstl m68k_fputype
  313. jeq 4f
  314. #endif
  315. #if defined(CONFIG_M68060)
  316. #if !defined(CPU_M68060_ONLY)
  317. btst #3,m68k_cputype+3
  318. beqs 1f
  319. #endif
  320. /* The 060 FPU keeps status in bits 15-8 of the first longword */
  321. tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
  322. jeq 3f
  323. #if !defined(CPU_M68060_ONLY)
  324. jra 2f
  325. #endif
  326. #endif /* CONFIG_M68060 */
  327. #if !defined(CPU_M68060_ONLY)
  328. 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
  329. jeq 3f
  330. #endif
  331. 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
  332. fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
  333. 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
  334. 4:
  335. #endif /* CONFIG_M68KFPU_EMU_ONLY */
  336. /* restore the kernel stack pointer */
  337. movel %a1@(TASK_THREAD+THREAD_KSP),%sp
  338. /* restore non-scratch registers */
  339. RESTORE_SWITCH_STACK
  340. /* restore user stack pointer */
  341. movel %a1@(TASK_THREAD+THREAD_USP),%a0
  342. movel %a0,%usp
  343. /* restore fs (sfc,%dfc) */
  344. movew %a1@(TASK_THREAD+THREAD_FS),%a0
  345. movec %a0,%sfc
  346. movec %a0,%dfc
  347. /* restore status register */
  348. movew %a1@(TASK_THREAD+THREAD_SR),%sr
  349. rts