entry.S 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /* -*- mode: asm -*-
  2. *
  3. * linux/arch/m68k/kernel/entry.S
  4. *
  5. * Copyright (C) 1991, 1992 Linus Torvalds
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file README.legal in the main directory of this archive
  9. * for more details.
  10. *
  11. * Linux/m68k support by Hamish Macdonald
  12. *
  13. * 68060 fixes by Jesper Skov
  14. *
  15. */
  16. /*
  17. * entry.S contains the system-call and fault low-level handling routines.
  18. * This also contains the timer-interrupt handler, as well as all interrupts
  19. * and faults that can result in a task-switch.
  20. *
  21. * NOTE: This code handles signal-recognition, which happens every time
  22. * after a timer-interrupt and after each system call.
  23. *
  24. */
  25. /*
  26. * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
  27. * all pointers that used to be 'current' are now entry
  28. * number 0 in the 'current_set' list.
  29. *
  30. * 6/05/00 RZ: addedd writeback completion after return from sighandler
  31. * for 68040
  32. */
  33. #include <linux/linkage.h>
  34. #include <asm/errno.h>
  35. #include <asm/setup.h>
  36. #include <asm/segment.h>
  37. #include <asm/traps.h>
  38. #include <asm/unistd.h>
  39. #include <asm/asm-offsets.h>
  40. #include <asm/entry.h>
  41. .globl system_call, buserr, trap, resume
  42. .globl sys_call_table
  43. .globl __sys_fork, __sys_clone, __sys_vfork
  44. .globl ret_from_interrupt, bad_interrupt
  45. .globl auto_irqhandler_fixup
  46. .globl user_irqvec_fixup
  47. .text
  48. ENTRY(__sys_fork)
  49. SAVE_SWITCH_STACK
  50. jbsr sys_fork
  51. lea %sp@(24),%sp
  52. rts
  53. ENTRY(__sys_clone)
  54. SAVE_SWITCH_STACK
  55. pea %sp@(SWITCH_STACK_SIZE)
  56. jbsr m68k_clone
  57. lea %sp@(28),%sp
  58. rts
  59. ENTRY(__sys_vfork)
  60. SAVE_SWITCH_STACK
  61. jbsr sys_vfork
  62. lea %sp@(24),%sp
  63. rts
  64. ENTRY(sys_sigreturn)
  65. SAVE_SWITCH_STACK
  66. jbsr do_sigreturn
  67. RESTORE_SWITCH_STACK
  68. rts
  69. ENTRY(sys_rt_sigreturn)
  70. SAVE_SWITCH_STACK
  71. jbsr do_rt_sigreturn
  72. RESTORE_SWITCH_STACK
  73. rts
  74. ENTRY(buserr)
  75. SAVE_ALL_INT
  76. GET_CURRENT(%d0)
  77. movel %sp,%sp@- | stack frame pointer argument
  78. jbsr buserr_c
  79. addql #4,%sp
  80. jra ret_from_exception
  81. ENTRY(trap)
  82. SAVE_ALL_INT
  83. GET_CURRENT(%d0)
  84. movel %sp,%sp@- | stack frame pointer argument
  85. jbsr trap_c
  86. addql #4,%sp
  87. jra ret_from_exception
  88. | After a fork we jump here directly from resume,
  89. | so that %d1 contains the previous task
  90. | schedule_tail now used regardless of CONFIG_SMP
  91. ENTRY(ret_from_fork)
  92. movel %d1,%sp@-
  93. jsr schedule_tail
  94. addql #4,%sp
  95. jra ret_from_exception
  96. ENTRY(ret_from_kernel_thread)
  97. | a3 contains the kernel thread payload, d7 - its argument
  98. movel %d1,%sp@-
  99. jsr schedule_tail
  100. movel %d7,(%sp)
  101. jsr %a3@
  102. addql #4,%sp
  103. jra ret_from_exception
  104. #if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
  105. #ifdef TRAP_DBG_INTERRUPT
  106. .globl dbginterrupt
  107. ENTRY(dbginterrupt)
  108. SAVE_ALL_INT
  109. GET_CURRENT(%d0)
  110. movel %sp,%sp@- /* stack frame pointer argument */
  111. jsr dbginterrupt_c
  112. addql #4,%sp
  113. jra ret_from_exception
  114. #endif
  115. ENTRY(reschedule)
  116. /* save top of frame */
  117. pea %sp@
  118. jbsr set_esp0
  119. addql #4,%sp
  120. pea ret_from_exception
  121. jmp schedule
  122. ENTRY(ret_from_user_signal)
  123. moveq #__NR_sigreturn,%d0
  124. trap #0
  125. ENTRY(ret_from_user_rt_signal)
  126. movel #__NR_rt_sigreturn,%d0
  127. trap #0
  128. #else
  129. do_trace_entry:
  130. movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
  131. subql #4,%sp
  132. SAVE_SWITCH_STACK
  133. jbsr syscall_trace
  134. RESTORE_SWITCH_STACK
  135. addql #4,%sp
  136. movel %sp@(PT_OFF_ORIG_D0),%d0
  137. cmpl #NR_syscalls,%d0
  138. jcs syscall
  139. badsys:
  140. movel #-ENOSYS,%sp@(PT_OFF_D0)
  141. jra ret_from_syscall
  142. do_trace_exit:
  143. subql #4,%sp
  144. SAVE_SWITCH_STACK
  145. jbsr syscall_trace
  146. RESTORE_SWITCH_STACK
  147. addql #4,%sp
  148. jra .Lret_from_exception
  149. ENTRY(ret_from_signal)
  150. movel %curptr@(TASK_STACK),%a1
  151. tstb %a1@(TINFO_FLAGS+2)
  152. jge 1f
  153. jbsr syscall_trace
  154. 1: RESTORE_SWITCH_STACK
  155. addql #4,%sp
  156. /* on 68040 complete pending writebacks if any */
  157. #ifdef CONFIG_M68040
  158. bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
  159. subql #7,%d0 | bus error frame ?
  160. jbne 1f
  161. movel %sp,%sp@-
  162. jbsr berr_040cleanup
  163. addql #4,%sp
  164. 1:
  165. #endif
  166. jra .Lret_from_exception
  167. ENTRY(system_call)
  168. SAVE_ALL_SYS
  169. GET_CURRENT(%d1)
  170. movel %d1,%a1
  171. | save top of frame
  172. movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
  173. | syscall trace?
  174. tstb %a1@(TINFO_FLAGS+2)
  175. jmi do_trace_entry
  176. cmpl #NR_syscalls,%d0
  177. jcc badsys
  178. syscall:
  179. jbsr @(sys_call_table,%d0:l:4)@(0)
  180. movel %d0,%sp@(PT_OFF_D0) | save the return value
  181. ret_from_syscall:
  182. |oriw #0x0700,%sr
  183. movel %curptr@(TASK_STACK),%a1
  184. movew %a1@(TINFO_FLAGS+2),%d0
  185. jne syscall_exit_work
  186. 1: RESTORE_ALL
  187. syscall_exit_work:
  188. btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
  189. bnes 1b | if so, skip resched, signals
  190. lslw #1,%d0
  191. jcs do_trace_exit
  192. jmi do_delayed_trace
  193. lslw #8,%d0
  194. jne do_signal_return
  195. pea resume_userspace
  196. jra schedule
  197. ENTRY(ret_from_exception)
  198. .Lret_from_exception:
  199. btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
  200. bnes 1f | if so, skip resched, signals
  201. | only allow interrupts when we are really the last one on the
  202. | kernel stack, otherwise stack overflow can occur during
  203. | heavy interrupt load
  204. andw #ALLOWINT,%sr
  205. resume_userspace:
  206. movel %curptr@(TASK_STACK),%a1
  207. moveb %a1@(TINFO_FLAGS+3),%d0
  208. jne exit_work
  209. 1: RESTORE_ALL
  210. exit_work:
  211. | save top of frame
  212. movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
  213. lslb #1,%d0
  214. jne do_signal_return
  215. pea resume_userspace
  216. jra schedule
  217. do_signal_return:
  218. |andw #ALLOWINT,%sr
  219. subql #4,%sp | dummy return address
  220. SAVE_SWITCH_STACK
  221. pea %sp@(SWITCH_STACK_SIZE)
  222. bsrl do_notify_resume
  223. addql #4,%sp
  224. RESTORE_SWITCH_STACK
  225. addql #4,%sp
  226. jbra resume_userspace
  227. do_delayed_trace:
  228. bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
  229. pea 1 | send SIGTRAP
  230. movel %curptr,%sp@-
  231. pea LSIGTRAP
  232. jbsr send_sig
  233. addql #8,%sp
  234. addql #4,%sp
  235. jbra resume_userspace
  236. /* This is the main interrupt handler for autovector interrupts */
  237. ENTRY(auto_inthandler)
  238. SAVE_ALL_INT
  239. GET_CURRENT(%d0)
  240. movel %d0,%a1
  241. addqb #1,%a1@(TINFO_PREEMPT+1)
  242. | put exception # in d0
  243. bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
  244. subw #VEC_SPUR,%d0
  245. movel %sp,%sp@-
  246. movel %d0,%sp@- | put vector # on stack
  247. auto_irqhandler_fixup = . + 2
  248. jsr do_IRQ | process the IRQ
  249. addql #8,%sp | pop parameters off stack
  250. ret_from_interrupt:
  251. movel %curptr@(TASK_STACK),%a1
  252. subqb #1,%a1@(TINFO_PREEMPT+1)
  253. jeq ret_from_last_interrupt
  254. 2: RESTORE_ALL
  255. ALIGN
  256. ret_from_last_interrupt:
  257. moveq #(~ALLOWINT>>8)&0xff,%d0
  258. andb %sp@(PT_OFF_SR),%d0
  259. jne 2b
  260. /* check if we need to do software interrupts */
  261. tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
  262. jeq .Lret_from_exception
  263. pea ret_from_exception
  264. jra do_softirq
  265. /* Handler for user defined interrupt vectors */
  266. ENTRY(user_inthandler)
  267. SAVE_ALL_INT
  268. GET_CURRENT(%d0)
  269. movel %d0,%a1
  270. addqb #1,%a1@(TINFO_PREEMPT+1)
  271. | put exception # in d0
  272. bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
  273. user_irqvec_fixup = . + 2
  274. subw #VEC_USER,%d0
  275. movel %sp,%sp@-
  276. movel %d0,%sp@- | put vector # on stack
  277. jsr do_IRQ | process the IRQ
  278. addql #8,%sp | pop parameters off stack
  279. movel %curptr@(TASK_STACK),%a1
  280. subqb #1,%a1@(TINFO_PREEMPT+1)
  281. jeq ret_from_last_interrupt
  282. RESTORE_ALL
  283. /* Handler for uninitialized and spurious interrupts */
  284. ENTRY(bad_inthandler)
  285. SAVE_ALL_INT
  286. GET_CURRENT(%d0)
  287. movel %d0,%a1
  288. addqb #1,%a1@(TINFO_PREEMPT+1)
  289. movel %sp,%sp@-
  290. jsr handle_badint
  291. addql #4,%sp
  292. movel %curptr@(TASK_STACK),%a1
  293. subqb #1,%a1@(TINFO_PREEMPT+1)
  294. jeq ret_from_last_interrupt
  295. RESTORE_ALL
  296. resume:
  297. /*
  298. * Beware - when entering resume, prev (the current task) is
  299. * in a0, next (the new task) is in a1,so don't change these
  300. * registers until their contents are no longer needed.
  301. */
  302. /* save sr */
  303. movew %sr,%a0@(TASK_THREAD+THREAD_SR)
  304. /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
  305. movec %sfc,%d0
  306. movew %d0,%a0@(TASK_THREAD+THREAD_FS)
  307. /* save usp */
  308. /* it is better to use a movel here instead of a movew 8*) */
  309. movec %usp,%d0
  310. movel %d0,%a0@(TASK_THREAD+THREAD_USP)
  311. /* save non-scratch registers on stack */
  312. SAVE_SWITCH_STACK
  313. /* save current kernel stack pointer */
  314. movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
  315. /* save floating point context */
  316. #ifndef CONFIG_M68KFPU_EMU_ONLY
  317. #ifdef CONFIG_M68KFPU_EMU
  318. tstl m68k_fputype
  319. jeq 3f
  320. #endif
  321. fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
  322. #if defined(CONFIG_M68060)
  323. #if !defined(CPU_M68060_ONLY)
  324. btst #3,m68k_cputype+3
  325. beqs 1f
  326. #endif
  327. /* The 060 FPU keeps status in bits 15-8 of the first longword */
  328. tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
  329. jeq 3f
  330. #if !defined(CPU_M68060_ONLY)
  331. jra 2f
  332. #endif
  333. #endif /* CONFIG_M68060 */
  334. #if !defined(CPU_M68060_ONLY)
  335. 1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
  336. jeq 3f
  337. #endif
  338. 2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
  339. fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
  340. 3:
  341. #endif /* CONFIG_M68KFPU_EMU_ONLY */
  342. /* Return previous task in %d1 */
  343. movel %curptr,%d1
  344. /* switch to new task (a1 contains new task) */
  345. movel %a1,%curptr
  346. /* restore floating point context */
  347. #ifndef CONFIG_M68KFPU_EMU_ONLY
  348. #ifdef CONFIG_M68KFPU_EMU
  349. tstl m68k_fputype
  350. jeq 4f
  351. #endif
  352. #if defined(CONFIG_M68060)
  353. #if !defined(CPU_M68060_ONLY)
  354. btst #3,m68k_cputype+3
  355. beqs 1f
  356. #endif
  357. /* The 060 FPU keeps status in bits 15-8 of the first longword */
  358. tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
  359. jeq 3f
  360. #if !defined(CPU_M68060_ONLY)
  361. jra 2f
  362. #endif
  363. #endif /* CONFIG_M68060 */
  364. #if !defined(CPU_M68060_ONLY)
  365. 1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
  366. jeq 3f
  367. #endif
  368. 2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
  369. fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
  370. 3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
  371. 4:
  372. #endif /* CONFIG_M68KFPU_EMU_ONLY */
  373. /* restore the kernel stack pointer */
  374. movel %a1@(TASK_THREAD+THREAD_KSP),%sp
  375. /* restore non-scratch registers */
  376. RESTORE_SWITCH_STACK
  377. /* restore user stack pointer */
  378. movel %a1@(TASK_THREAD+THREAD_USP),%a0
  379. movel %a0,%usp
  380. /* restore fs (sfc,%dfc) */
  381. movew %a1@(TASK_THREAD+THREAD_FS),%a0
  382. movec %a0,%sfc
  383. movec %a0,%dfc
  384. /* restore status register */
  385. movew %a1@(TASK_THREAD+THREAD_SR),%sr
  386. rts
  387. #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */