entry.S 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /*
  2. * linux/arch/m68knommu/platform/5307/entry.S
  3. *
  4. * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
  5. * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
  6. * Kenneth Albanowski <kjahds@kjahds.com>,
  7. * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
  8. * Copyright (C) 2004-2006 Macq Electronique SA. (www.macqel.com)
  9. *
  10. * Based on:
  11. *
  12. * linux/arch/m68k/kernel/entry.S
  13. *
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * This file is subject to the terms and conditions of the GNU General Public
  17. * License. See the file README.legal in the main directory of this archive
  18. * for more details.
  19. *
  20. * Linux/m68k support by Hamish Macdonald
  21. *
  22. * 68060 fixes by Jesper Skov
  23. * ColdFire support by Greg Ungerer (gerg@snapgear.com)
  24. * 5307 fixes by David W. Miller
  25. * linux 2.4 support David McCullough <davidm@snapgear.com>
  26. * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
  27. */
  28. #include <linux/sys.h>
  29. #include <linux/linkage.h>
  30. #include <asm/unistd.h>
  31. #include <asm/thread_info.h>
  32. #include <asm/errno.h>
  33. #include <asm/setup.h>
  34. #include <asm/segment.h>
  35. #include <asm/asm-offsets.h>
  36. #include <asm/entry.h>
  37. .bss
  38. sw_ksp:
  39. .long 0
  40. sw_usp:
  41. .long 0
  42. .text
  43. .globl system_call
  44. .globl resume
  45. .globl ret_from_exception
  46. .globl ret_from_signal
  47. .globl sys_call_table
  48. .globl ret_from_interrupt
  49. .globl inthandler
  50. .globl fasthandler
  51. enosys:
  52. mov.l #sys_ni_syscall,%d3
  53. bra 1f
  54. ENTRY(system_call)
  55. SAVE_ALL
  56. move #0x2000,%sr /* enable intrs again */
  57. cmpl #NR_syscalls,%d0
  58. jcc enosys
  59. lea sys_call_table,%a0
  60. lsll #2,%d0 /* movel %a0@(%d0:l:4),%d3 */
  61. movel %a0@(%d0),%d3
  62. jeq enosys
  63. 1:
  64. movel %sp,%d2 /* get thread_info pointer */
  65. andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
  66. movel %d2,%a0
  67. movel %sp,%a0@(THREAD_ESP0) /* save top of frame */
  68. btst #(TIF_SYSCALL_TRACE%8),%a0@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
  69. bnes 1f
  70. movel %d3,%a0
  71. jbsr %a0@
  72. movel %d0,%sp@(PT_D0) /* save the return value */
  73. jra ret_from_exception
  74. 1:
  75. subql #4,%sp
  76. SAVE_SWITCH_STACK
  77. jbsr syscall_trace
  78. RESTORE_SWITCH_STACK
  79. addql #4,%sp
  80. movel %d3,%a0
  81. jbsr %a0@
  82. movel %d0,%sp@(PT_D0) /* save the return value */
  83. subql #4,%sp /* dummy return address */
  84. SAVE_SWITCH_STACK
  85. jbsr syscall_trace
  86. ret_from_signal:
  87. RESTORE_SWITCH_STACK
  88. addql #4,%sp
  89. ret_from_exception:
  90. btst #5,%sp@(PT_SR) /* check if returning to kernel */
  91. jeq Luser_return /* if so, skip resched, signals */
  92. Lkernel_return:
  93. moveml %sp@,%d1-%d5/%a0-%a2
  94. lea %sp@(32),%sp /* space for 8 regs */
  95. movel %sp@+,%d0
  96. addql #4,%sp /* orig d0 */
  97. addl %sp@+,%sp /* stk adj */
  98. rte
  99. Luser_return:
  100. movel %sp,%d1 /* get thread_info pointer */
  101. andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
  102. movel %d1,%a0
  103. movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
  104. andl #_TIF_WORK_MASK,%d1
  105. jne Lwork_to_do /* still work to do */
  106. Lreturn:
  107. move #0x2700,%sr /* disable intrs */
  108. movel sw_usp,%a0 /* get usp */
  109. movel %sp@(PT_PC),%a0@- /* copy exception program counter */
  110. movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */
  111. moveml %sp@,%d1-%d5/%a0-%a2
  112. lea %sp@(32),%sp /* space for 8 regs */
  113. movel %sp@+,%d0
  114. addql #4,%sp /* orig d0 */
  115. addl %sp@+,%sp /* stk adj */
  116. addql #8,%sp /* remove exception */
  117. movel %sp,sw_ksp /* save ksp */
  118. subql #8,sw_usp /* set exception */
  119. movel sw_usp,%sp /* restore usp */
  120. rte
  121. Lwork_to_do:
  122. movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
  123. btst #TIF_NEED_RESCHED,%d1
  124. jne reschedule
  125. /* GERG: do we need something here for TRACEing?? */
  126. Lsignal_return:
  127. subql #4,%sp /* dummy return address */
  128. SAVE_SWITCH_STACK
  129. pea %sp@(SWITCH_STACK_SIZE)
  130. clrl %sp@-
  131. jsr do_signal
  132. addql #8,%sp
  133. RESTORE_SWITCH_STACK
  134. addql #4,%sp
  135. jmp Lreturn
  136. /*
  137. * This is the generic interrupt handler (for all hardware interrupt
  138. * sources). It figures out the vector number and calls the appropriate
  139. * interrupt service routine directly.
  140. */
  141. ENTRY(inthandler)
  142. SAVE_ALL
  143. moveq #-1,%d0
  144. movel %d0,%sp@(PT_ORIG_D0)
  145. addql #1,local_irq_count
  146. movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */
  147. andl #0x03fc,%d0 /* mask out vector only */
  148. leal per_cpu__kstat+STAT_IRQ,%a0
  149. addql #1,%a0@(%d0)
  150. lsrl #2,%d0 /* calculate real vector # */
  151. movel %d0,%d1 /* calculate array offset */
  152. lsll #4,%d1
  153. lea irq_list,%a0
  154. addl %d1,%a0 /* pointer to array struct */
  155. movel %sp,%sp@- /* push regs arg onto stack */
  156. movel %a0@(8),%sp@- /* push devid arg */
  157. movel %d0,%sp@- /* push vector # on stack */
  158. movel %a0@,%a0 /* get function to call */
  159. jbsr %a0@ /* call vector handler */
  160. lea %sp@(12),%sp /* pop parameters off stack */
  161. bra ret_from_interrupt /* this was fallthrough */
  162. /*
  163. * This is the fast interrupt handler (for certain hardware interrupt
  164. * sources). Unlike the normal interrupt handler it just uses the
  165. * current stack (doesn't care if it is user or kernel). It also
  166. * doesn't bother doing the bottom half handlers.
  167. */
  168. ENTRY(fasthandler)
  169. SAVE_LOCAL
  170. movew %sp@(PT_FORMATVEC),%d0
  171. andl #0x03fc,%d0 /* mask out vector only */
  172. leal per_cpu__kstat+STAT_IRQ,%a0
  173. addql #1,%a0@(%d0)
  174. movel %sp,%sp@- /* push regs arg onto stack */
  175. clrl %sp@- /* push devid arg */
  176. lsrl #2,%d0 /* calculate real vector # */
  177. movel %d0,%sp@- /* push vector # on stack */
  178. lsll #4,%d0 /* adjust for array offset */
  179. lea irq_list,%a0
  180. movel %a0@(%d0),%a0 /* get function to call */
  181. jbsr %a0@ /* call vector handler */
  182. lea %sp@(12),%sp /* pop parameters off stack */
  183. RESTORE_LOCAL
  184. ENTRY(ret_from_interrupt)
  185. subql #1,local_irq_count
  186. jeq 2f
  187. 1:
  188. RESTORE_ALL
  189. 2:
  190. moveb %sp@(PT_SR),%d0
  191. andl #0x7,%d0
  192. jhi 1b
  193. /* check if we need to do software interrupts */
  194. movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0
  195. jeq ret_from_exception
  196. pea ret_from_exception
  197. jmp do_softirq
  198. /*
  199. * Beware - when entering resume, prev (the current task) is
  200. * in a0, next (the new task) is in a1,so don't change these
  201. * registers until their contents are no longer needed.
  202. */
  203. ENTRY(resume)
  204. movel %a0, %d1 /* get prev thread in d1 */
  205. movew %sr,%d0 /* save thread status reg */
  206. movew %d0,%a0@(TASK_THREAD+THREAD_SR)
  207. oril #0x700,%d0 /* disable interrupts */
  208. move %d0,%sr
  209. movel sw_usp,%d0 /* save usp */
  210. movel %d0,%a0@(TASK_THREAD+THREAD_USP)
  211. SAVE_SWITCH_STACK
  212. movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
  213. movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
  214. RESTORE_SWITCH_STACK
  215. movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
  216. movel %a0, sw_usp
  217. movew %a1@(TASK_THREAD+THREAD_SR),%d0 /* restore thread status reg */
  218. movew %d0, %sr
  219. rts