entry.S 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2001 MIPS Technologies, Inc.
  9. */
  10. #include <asm/asm.h>
  11. #include <asm/asmmacro.h>
  12. #include <asm/regdef.h>
  13. #include <asm/mipsregs.h>
  14. #include <asm/stackframe.h>
  15. #include <asm/isadep.h>
  16. #include <asm/thread_info.h>
  17. #include <asm/war.h>
  18. #ifdef CONFIG_MIPS_MT_SMTC
  19. #include <asm/mipsmtregs.h>
  20. #endif
  21. #ifndef CONFIG_PREEMPT
  22. #define resume_kernel restore_all
  23. #else
  24. #define __ret_from_irq ret_from_exception
  25. #endif
  26. .text
  27. .align 5
  28. #ifndef CONFIG_PREEMPT
  29. FEXPORT(ret_from_exception)
  30. local_irq_disable # preempt stop
  31. b __ret_from_irq
  32. #endif
  33. FEXPORT(ret_from_irq)
  34. LONG_S s0, TI_REGS($28)
  35. FEXPORT(__ret_from_irq)
  36. /*
  37. * We can be coming here from a syscall done in the kernel space,
  38. * e.g. a failed kernel_execve().
  39. */
  40. resume_userspace_check:
  41. LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
  42. andi t0, t0, KU_USER
  43. beqz t0, resume_kernel
  44. resume_userspace:
  45. local_irq_disable # make sure we dont miss an
  46. # interrupt setting need_resched
  47. # between sampling and return
  48. LONG_L a2, TI_FLAGS($28) # current->work
  49. andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
  50. bnez t0, work_pending
  51. j restore_all
  52. #ifdef CONFIG_PREEMPT
  53. resume_kernel:
  54. local_irq_disable
  55. lw t0, TI_PRE_COUNT($28)
  56. bnez t0, restore_all
  57. need_resched:
  58. LONG_L t0, TI_FLAGS($28)
  59. andi t1, t0, _TIF_NEED_RESCHED
  60. beqz t1, restore_all
  61. LONG_L t0, PT_STATUS(sp) # Interrupts off?
  62. andi t0, 1
  63. beqz t0, restore_all
  64. jal preempt_schedule_irq
  65. b need_resched
  66. #endif
  67. FEXPORT(ret_from_kernel_thread)
  68. jal schedule_tail # a0 = struct task_struct *prev
  69. move a0, s1
  70. jal s0
  71. j syscall_exit
  72. FEXPORT(ret_from_fork)
  73. jal schedule_tail # a0 = struct task_struct *prev
  74. FEXPORT(syscall_exit)
  75. local_irq_disable # make sure need_resched and
  76. # signals dont change between
  77. # sampling and return
  78. LONG_L a2, TI_FLAGS($28) # current->work
  79. li t0, _TIF_ALLWORK_MASK
  80. and t0, a2, t0
  81. bnez t0, syscall_exit_work
  82. restore_all: # restore full frame
  83. #ifdef CONFIG_MIPS_MT_SMTC
  84. #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
  85. /* Re-arm any temporarily masked interrupts not explicitly "acked" */
  86. mfc0 v0, CP0_TCSTATUS
  87. ori v1, v0, TCSTATUS_IXMT
  88. mtc0 v1, CP0_TCSTATUS
  89. andi v0, TCSTATUS_IXMT
  90. _ehb
  91. mfc0 t0, CP0_TCCONTEXT
  92. DMT 9 # dmt t1
  93. jal mips_ihb
  94. mfc0 t2, CP0_STATUS
  95. andi t3, t0, 0xff00
  96. or t2, t2, t3
  97. mtc0 t2, CP0_STATUS
  98. _ehb
  99. andi t1, t1, VPECONTROL_TE
  100. beqz t1, 1f
  101. EMT
  102. 1:
  103. mfc0 v1, CP0_TCSTATUS
  104. /* We set IXMT above, XOR should clear it here */
  105. xori v1, v1, TCSTATUS_IXMT
  106. or v1, v0, v1
  107. mtc0 v1, CP0_TCSTATUS
  108. _ehb
  109. xor t0, t0, t3
  110. mtc0 t0, CP0_TCCONTEXT
  111. #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
  112. /* Detect and execute deferred IPI "interrupts" */
  113. LONG_L s0, TI_REGS($28)
  114. LONG_S sp, TI_REGS($28)
  115. jal deferred_smtc_ipi
  116. LONG_S s0, TI_REGS($28)
  117. #endif /* CONFIG_MIPS_MT_SMTC */
  118. .set noat
  119. RESTORE_TEMP
  120. RESTORE_AT
  121. RESTORE_STATIC
  122. restore_partial: # restore partial frame
  123. #ifdef CONFIG_TRACE_IRQFLAGS
  124. SAVE_STATIC
  125. SAVE_AT
  126. SAVE_TEMP
  127. LONG_L v0, PT_STATUS(sp)
  128. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  129. and v0, ST0_IEP
  130. #else
  131. and v0, ST0_IE
  132. #endif
  133. beqz v0, 1f
  134. jal trace_hardirqs_on
  135. b 2f
  136. 1: jal trace_hardirqs_off
  137. 2:
  138. RESTORE_TEMP
  139. RESTORE_AT
  140. RESTORE_STATIC
  141. #endif
  142. RESTORE_SOME
  143. RESTORE_SP_AND_RET
  144. .set at
  145. work_pending:
  146. andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
  147. beqz t0, work_notifysig
  148. work_resched:
  149. jal schedule
  150. local_irq_disable # make sure need_resched and
  151. # signals dont change between
  152. # sampling and return
  153. LONG_L a2, TI_FLAGS($28)
  154. andi t0, a2, _TIF_WORK_MASK # is there any work to be done
  155. # other than syscall tracing?
  156. beqz t0, restore_all
  157. andi t0, a2, _TIF_NEED_RESCHED
  158. bnez t0, work_resched
  159. work_notifysig: # deal with pending signals and
  160. # notify-resume requests
  161. move a0, sp
  162. li a1, 0
  163. jal do_notify_resume # a2 already loaded
  164. j resume_userspace_check
  165. FEXPORT(syscall_exit_partial)
  166. local_irq_disable # make sure need_resched doesn't
  167. # change between and return
  168. LONG_L a2, TI_FLAGS($28) # current->work
  169. li t0, _TIF_ALLWORK_MASK
  170. and t0, a2
  171. beqz t0, restore_partial
  172. SAVE_STATIC
  173. syscall_exit_work:
  174. LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
  175. andi t0, t0, KU_USER
  176. beqz t0, resume_kernel
  177. li t0, _TIF_WORK_SYSCALL_EXIT
  178. and t0, a2 # a2 is preloaded with TI_FLAGS
  179. beqz t0, work_pending # trace bit set?
  180. local_irq_enable # could let syscall_trace_leave()
  181. # call schedule() instead
  182. move a0, sp
  183. jal syscall_trace_leave
  184. b resume_userspace
  185. #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
  186. /*
  187. * MIPS32R2 Instruction Hazard Barrier - must be called
  188. *
  189. * For C code use the inline version named instruction_hazard().
  190. */
  191. LEAF(mips_ihb)
  192. .set mips32r2
  193. jr.hb ra
  194. nop
  195. END(mips_ihb)
  196. #endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */