entry.S 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /*
  2. * linux/arch/m68knommu/platform/5307/entry.S
  3. *
  4. * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
  5. * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
  6. * Kenneth Albanowski <kjahds@kjahds.com>,
  7. * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
  8. * Copyright (C) 2004 Macq Electronique SA. (www.macqel.com)
  9. *
  10. * Based on:
  11. *
  12. * linux/arch/m68k/kernel/entry.S
  13. *
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * This file is subject to the terms and conditions of the GNU General Public
  17. * License. See the file README.legal in the main directory of this archive
  18. * for more details.
  19. *
  20. * Linux/m68k support by Hamish Macdonald
  21. *
  22. * 68060 fixes by Jesper Skov
  23. * ColdFire support by Greg Ungerer (gerg@snapgear.com)
  24. * 5307 fixes by David W. Miller
  25. * linux 2.4 support David McCullough <davidm@snapgear.com>
  26. * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
  27. */
  28. #include <linux/config.h>
  29. #include <linux/sys.h>
  30. #include <linux/linkage.h>
  31. #include <asm/unistd.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/errno.h>
  34. #include <asm/setup.h>
  35. #include <asm/segment.h>
  36. #include <asm/asm-offsets.h>
  37. #include <asm/entry.h>
  38. .bss
  39. sw_ksp:
  40. .long 0
  41. sw_usp:
  42. .long 0
  43. .text
  44. .globl system_call
  45. .globl resume
  46. .globl ret_from_exception
  47. .globl ret_from_signal
  48. .globl sys_call_table
  49. .globl ret_from_interrupt
  50. .globl inthandler
  51. .globl fasthandler
  52. ENTRY(system_call)
  53. SAVE_ALL
  54. move #0x2000,%sr /* enable intrs again */
  55. movel #-LENOSYS,%d2
  56. movel %d2,PT_D0(%sp) /* default return value in d0 */
  57. /* original D0 is in orig_d0 */
  58. movel %d0,%d2
  59. /* save top of frame */
  60. pea %sp@
  61. jbsr set_esp0
  62. addql #4,%sp
  63. cmpl #NR_syscalls,%d2
  64. jcc ret_from_exception
  65. lea sys_call_table,%a0
  66. lsll #2,%d2 /* movel %a0@(%d2:l:4),%d3 */
  67. movel %a0@(%d2),%d3
  68. jeq ret_from_exception
  69. lsrl #2,%d2
  70. movel %sp,%d2 /* get thread_info pointer */
  71. andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
  72. movel %d2,%a0
  73. btst #TIF_SYSCALL_TRACE,%a0@(TI_FLAGS)
  74. bnes 1f
  75. movel %d3,%a0
  76. jbsr %a0@
  77. movel %d0,%sp@(PT_D0) /* save the return value */
  78. jra ret_from_exception
  79. 1:
  80. subql #4,%sp
  81. SAVE_SWITCH_STACK
  82. jbsr syscall_trace
  83. RESTORE_SWITCH_STACK
  84. addql #4,%sp
  85. movel %d3,%a0
  86. jbsr %a0@
  87. movel %d0,%sp@(PT_D0) /* save the return value */
  88. subql #4,%sp /* dummy return address */
  89. SAVE_SWITCH_STACK
  90. jbsr syscall_trace
  91. ret_from_signal:
  92. RESTORE_SWITCH_STACK
  93. addql #4,%sp
  94. ret_from_exception:
  95. btst #5,%sp@(PT_SR) /* check if returning to kernel */
  96. jeq Luser_return /* if so, skip resched, signals */
  97. Lkernel_return:
  98. moveml %sp@,%d1-%d5/%a0-%a2
  99. lea %sp@(32),%sp /* space for 8 regs */
  100. movel %sp@+,%d0
  101. addql #4,%sp /* orig d0 */
  102. addl %sp@+,%sp /* stk adj */
  103. rte
  104. Luser_return:
  105. movel %sp,%d1 /* get thread_info pointer */
  106. andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
  107. movel %d1,%a0
  108. movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
  109. andl #_TIF_WORK_MASK,%d1
  110. jne Lwork_to_do /* still work to do */
  111. Lreturn:
  112. move #0x2700,%sr /* disable intrs */
  113. movel sw_usp,%a0 /* get usp */
  114. movel %sp@(PT_PC),%a0@- /* copy exception program counter */
  115. movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */
  116. moveml %sp@,%d1-%d5/%a0-%a2
  117. lea %sp@(32),%sp /* space for 8 regs */
  118. movel %sp@+,%d0
  119. addql #4,%sp /* orig d0 */
  120. addl %sp@+,%sp /* stk adj */
  121. addql #8,%sp /* remove exception */
  122. movel %sp,sw_ksp /* save ksp */
  123. subql #8,sw_usp /* set exception */
  124. movel sw_usp,%sp /* restore usp */
  125. rte
  126. Lwork_to_do:
  127. movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
  128. btst #TIF_NEED_RESCHED,%d1
  129. jne reschedule
  130. /* GERG: do we need something here for TRACEing?? */
  131. Lsignal_return:
  132. subql #4,%sp /* dummy return address */
  133. SAVE_SWITCH_STACK
  134. pea %sp@(SWITCH_STACK_SIZE)
  135. clrl %sp@-
  136. jsr do_signal
  137. addql #8,%sp
  138. RESTORE_SWITCH_STACK
  139. addql #4,%sp
  140. jmp Lreturn
  141. /*
  142. * This is the generic interrupt handler (for all hardware interrupt
  143. * sources). It figures out the vector number and calls the appropriate
  144. * interrupt service routine directly.
  145. */
  146. ENTRY(inthandler)
  147. SAVE_ALL
  148. moveq #-1,%d0
  149. movel %d0,%sp@(PT_ORIG_D0)
  150. addql #1,local_irq_count
  151. movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */
  152. andl #0x03fc,%d0 /* mask out vector only */
  153. leal per_cpu__kstat+STAT_IRQ,%a0
  154. addql #1,%a0@(%d0)
  155. lsrl #2,%d0 /* calculate real vector # */
  156. movel %d0,%d1 /* calculate array offset */
  157. lsll #4,%d1
  158. lea irq_list,%a0
  159. addl %d1,%a0 /* pointer to array struct */
  160. movel %sp,%sp@- /* push regs arg onto stack */
  161. movel %a0@(8),%sp@- /* push devid arg */
  162. movel %d0,%sp@- /* push vector # on stack */
  163. movel %a0@,%a0 /* get function to call */
  164. jbsr %a0@ /* call vector handler */
  165. lea %sp@(12),%sp /* pop parameters off stack */
  166. bra ret_from_interrupt /* this was fallthrough */
  167. /*
  168. * This is the fast interrupt handler (for certain hardware interrupt
  169. * sources). Unlike the normal interrupt handler it just uses the
  170. * current stack (doesn't care if it is user or kernel). It also
  171. * doesn't bother doing the bottom half handlers.
  172. */
  173. ENTRY(fasthandler)
  174. SAVE_LOCAL
  175. movew %sp@(PT_FORMATVEC),%d0
  176. andl #0x03fc,%d0 /* mask out vector only */
  177. leal per_cpu__kstat+STAT_IRQ,%a0
  178. addql #1,%a0@(%d0)
  179. movel %sp,%sp@- /* push regs arg onto stack */
  180. clrl %sp@- /* push devid arg */
  181. lsrl #2,%d0 /* calculate real vector # */
  182. movel %d0,%sp@- /* push vector # on stack */
  183. lsll #4,%d0 /* adjust for array offset */
  184. lea irq_list,%a0
  185. movel %a0@(%d0),%a0 /* get function to call */
  186. jbsr %a0@ /* call vector handler */
  187. lea %sp@(12),%sp /* pop parameters off stack */
  188. RESTORE_LOCAL
  189. ENTRY(ret_from_interrupt)
  190. subql #1,local_irq_count
  191. jeq 2f
  192. 1:
  193. RESTORE_ALL
  194. 2:
  195. moveb %sp@(PT_SR),%d0
  196. andl #0x7,%d0
  197. jhi 1b
  198. /* check if we need to do software interrupts */
  199. movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0
  200. jeq ret_from_exception
  201. pea ret_from_exception
  202. jmp do_softirq
  203. /*
  204. * Beware - when entering resume, prev (the current task) is
  205. * in a0, next (the new task) is in a1,so don't change these
  206. * registers until their contents are no longer needed.
  207. */
  208. ENTRY(resume)
  209. movel %a0, %d1 /* get prev thread in d1 */
  210. movew %sr,%d0 /* save thread status reg */
  211. movew %d0,%a0@(TASK_THREAD+THREAD_SR)
  212. oril #0x700,%d0 /* disable interrupts */
  213. move %d0,%sr
  214. movel sw_usp,%d0 /* save usp */
  215. movel %d0,%a0@(TASK_THREAD+THREAD_USP)
  216. SAVE_SWITCH_STACK
  217. movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
  218. movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
  219. RESTORE_SWITCH_STACK
  220. movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
  221. movel %a0, sw_usp
  222. movew %a1@(TASK_THREAD+THREAD_SR),%d0 /* restore thread status reg */
  223. movew %d0, %sr
  224. rts