entry.S 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /*
  2. * linux/arch/m68knommu/platform/5307/entry.S
  3. *
  4. * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
  5. * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
  6. * Kenneth Albanowski <kjahds@kjahds.com>,
  7. * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
  8. * Copyright (C) 2004-2006 Macq Electronique SA. (www.macqel.com)
  9. *
  10. * Based on:
  11. *
  12. * linux/arch/m68k/kernel/entry.S
  13. *
  14. * Copyright (C) 1991, 1992 Linus Torvalds
  15. *
  16. * This file is subject to the terms and conditions of the GNU General Public
  17. * License. See the file README.legal in the main directory of this archive
  18. * for more details.
  19. *
  20. * Linux/m68k support by Hamish Macdonald
  21. *
  22. * 68060 fixes by Jesper Skov
  23. * ColdFire support by Greg Ungerer (gerg@snapgear.com)
  24. * 5307 fixes by David W. Miller
  25. * linux 2.4 support David McCullough <davidm@snapgear.com>
  26. * Bug, speed and maintainability fixes by Philippe De Muyter <phdm@macqel.be>
  27. */
  28. #include <linux/config.h>
  29. #include <linux/sys.h>
  30. #include <linux/linkage.h>
  31. #include <asm/unistd.h>
  32. #include <asm/thread_info.h>
  33. #include <asm/errno.h>
  34. #include <asm/setup.h>
  35. #include <asm/segment.h>
  36. #include <asm/asm-offsets.h>
  37. #include <asm/entry.h>
  38. .bss
  39. sw_ksp:
  40. .long 0
  41. sw_usp:
  42. .long 0
  43. .text
  44. .globl system_call
  45. .globl resume
  46. .globl ret_from_exception
  47. .globl ret_from_signal
  48. .globl sys_call_table
  49. .globl ret_from_interrupt
  50. .globl inthandler
  51. .globl fasthandler
  52. enosys:
  53. mov.l #sys_ni_syscall,%d3
  54. bra 1f
  55. ENTRY(system_call)
  56. SAVE_ALL
  57. move #0x2000,%sr /* enable intrs again */
  58. cmpl #NR_syscalls,%d0
  59. jcc enosys
  60. lea sys_call_table,%a0
  61. lsll #2,%d0 /* movel %a0@(%d0:l:4),%d3 */
  62. movel %a0@(%d0),%d3
  63. jeq enosys
  64. 1:
  65. movel %sp,%d2 /* get thread_info pointer */
  66. andl #-THREAD_SIZE,%d2 /* at start of kernel stack */
  67. movel %d2,%a0
  68. movel %sp,%a0@(THREAD_ESP0) /* save top of frame */
  69. btst #(TIF_SYSCALL_TRACE%8),%a0@(TI_FLAGS+(31-TIF_SYSCALL_TRACE)/8)
  70. bnes 1f
  71. movel %d3,%a0
  72. jbsr %a0@
  73. movel %d0,%sp@(PT_D0) /* save the return value */
  74. jra ret_from_exception
  75. 1:
  76. subql #4,%sp
  77. SAVE_SWITCH_STACK
  78. jbsr syscall_trace
  79. RESTORE_SWITCH_STACK
  80. addql #4,%sp
  81. movel %d3,%a0
  82. jbsr %a0@
  83. movel %d0,%sp@(PT_D0) /* save the return value */
  84. subql #4,%sp /* dummy return address */
  85. SAVE_SWITCH_STACK
  86. jbsr syscall_trace
  87. ret_from_signal:
  88. RESTORE_SWITCH_STACK
  89. addql #4,%sp
  90. ret_from_exception:
  91. btst #5,%sp@(PT_SR) /* check if returning to kernel */
  92. jeq Luser_return /* if so, skip resched, signals */
  93. Lkernel_return:
  94. moveml %sp@,%d1-%d5/%a0-%a2
  95. lea %sp@(32),%sp /* space for 8 regs */
  96. movel %sp@+,%d0
  97. addql #4,%sp /* orig d0 */
  98. addl %sp@+,%sp /* stk adj */
  99. rte
  100. Luser_return:
  101. movel %sp,%d1 /* get thread_info pointer */
  102. andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
  103. movel %d1,%a0
  104. movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
  105. andl #_TIF_WORK_MASK,%d1
  106. jne Lwork_to_do /* still work to do */
  107. Lreturn:
  108. move #0x2700,%sr /* disable intrs */
  109. movel sw_usp,%a0 /* get usp */
  110. movel %sp@(PT_PC),%a0@- /* copy exception program counter */
  111. movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */
  112. moveml %sp@,%d1-%d5/%a0-%a2
  113. lea %sp@(32),%sp /* space for 8 regs */
  114. movel %sp@+,%d0
  115. addql #4,%sp /* orig d0 */
  116. addl %sp@+,%sp /* stk adj */
  117. addql #8,%sp /* remove exception */
  118. movel %sp,sw_ksp /* save ksp */
  119. subql #8,sw_usp /* set exception */
  120. movel sw_usp,%sp /* restore usp */
  121. rte
  122. Lwork_to_do:
  123. movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
  124. btst #TIF_NEED_RESCHED,%d1
  125. jne reschedule
  126. /* GERG: do we need something here for TRACEing?? */
  127. Lsignal_return:
  128. subql #4,%sp /* dummy return address */
  129. SAVE_SWITCH_STACK
  130. pea %sp@(SWITCH_STACK_SIZE)
  131. clrl %sp@-
  132. jsr do_signal
  133. addql #8,%sp
  134. RESTORE_SWITCH_STACK
  135. addql #4,%sp
  136. jmp Lreturn
  137. /*
  138. * This is the generic interrupt handler (for all hardware interrupt
  139. * sources). It figures out the vector number and calls the appropriate
  140. * interrupt service routine directly.
  141. */
  142. ENTRY(inthandler)
  143. SAVE_ALL
  144. moveq #-1,%d0
  145. movel %d0,%sp@(PT_ORIG_D0)
  146. addql #1,local_irq_count
  147. movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */
  148. andl #0x03fc,%d0 /* mask out vector only */
  149. leal per_cpu__kstat+STAT_IRQ,%a0
  150. addql #1,%a0@(%d0)
  151. lsrl #2,%d0 /* calculate real vector # */
  152. movel %d0,%d1 /* calculate array offset */
  153. lsll #4,%d1
  154. lea irq_list,%a0
  155. addl %d1,%a0 /* pointer to array struct */
  156. movel %sp,%sp@- /* push regs arg onto stack */
  157. movel %a0@(8),%sp@- /* push devid arg */
  158. movel %d0,%sp@- /* push vector # on stack */
  159. movel %a0@,%a0 /* get function to call */
  160. jbsr %a0@ /* call vector handler */
  161. lea %sp@(12),%sp /* pop parameters off stack */
  162. bra ret_from_interrupt /* this was fallthrough */
  163. /*
  164. * This is the fast interrupt handler (for certain hardware interrupt
  165. * sources). Unlike the normal interrupt handler it just uses the
  166. * current stack (doesn't care if it is user or kernel). It also
  167. * doesn't bother doing the bottom half handlers.
  168. */
  169. ENTRY(fasthandler)
  170. SAVE_LOCAL
  171. movew %sp@(PT_FORMATVEC),%d0
  172. andl #0x03fc,%d0 /* mask out vector only */
  173. leal per_cpu__kstat+STAT_IRQ,%a0
  174. addql #1,%a0@(%d0)
  175. movel %sp,%sp@- /* push regs arg onto stack */
  176. clrl %sp@- /* push devid arg */
  177. lsrl #2,%d0 /* calculate real vector # */
  178. movel %d0,%sp@- /* push vector # on stack */
  179. lsll #4,%d0 /* adjust for array offset */
  180. lea irq_list,%a0
  181. movel %a0@(%d0),%a0 /* get function to call */
  182. jbsr %a0@ /* call vector handler */
  183. lea %sp@(12),%sp /* pop parameters off stack */
  184. RESTORE_LOCAL
  185. ENTRY(ret_from_interrupt)
  186. subql #1,local_irq_count
  187. jeq 2f
  188. 1:
  189. RESTORE_ALL
  190. 2:
  191. moveb %sp@(PT_SR),%d0
  192. andl #0x7,%d0
  193. jhi 1b
  194. /* check if we need to do software interrupts */
  195. movel irq_stat+CPUSTAT_SOFTIRQ_PENDING,%d0
  196. jeq ret_from_exception
  197. pea ret_from_exception
  198. jmp do_softirq
  199. /*
  200. * Beware - when entering resume, prev (the current task) is
  201. * in a0, next (the new task) is in a1,so don't change these
  202. * registers until their contents are no longer needed.
  203. */
  204. ENTRY(resume)
  205. movel %a0, %d1 /* get prev thread in d1 */
  206. movew %sr,%d0 /* save thread status reg */
  207. movew %d0,%a0@(TASK_THREAD+THREAD_SR)
  208. oril #0x700,%d0 /* disable interrupts */
  209. move %d0,%sr
  210. movel sw_usp,%d0 /* save usp */
  211. movel %d0,%a0@(TASK_THREAD+THREAD_USP)
  212. SAVE_SWITCH_STACK
  213. movel %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
  214. movel %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new thread stack */
  215. RESTORE_SWITCH_STACK
  216. movel %a1@(TASK_THREAD+THREAD_USP),%a0 /* restore thread user stack */
  217. movel %a0, sw_usp
  218. movew %a1@(TASK_THREAD+THREAD_SR),%d0 /* restore thread status reg */
  219. movew %d0, %sr
  220. rts