entry.S 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2001 MIPS Technologies, Inc.
  9. */
  10. #include <linux/config.h>
  11. #include <asm/asm.h>
  12. #include <asm/asmmacro.h>
  13. #include <asm/regdef.h>
  14. #include <asm/mipsregs.h>
  15. #include <asm/stackframe.h>
  16. #include <asm/isadep.h>
  17. #include <asm/thread_info.h>
  18. #include <asm/war.h>
  19. #ifdef CONFIG_PREEMPT
  20. .macro preempt_stop reg=t0
  21. .endm
  22. #else
  23. .macro preempt_stop reg=t0
  24. local_irq_disable \reg
  25. .endm
  26. #define resume_kernel restore_all
  27. #endif
  28. .text
  29. .align 5
  30. FEXPORT(ret_from_exception)
  31. preempt_stop
  32. FEXPORT(ret_from_irq)
  33. LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
  34. andi t0, t0, KU_USER
  35. beqz t0, resume_kernel
  36. FEXPORT(resume_userspace)
  37. local_irq_disable t0 # make sure we dont miss an
  38. # interrupt setting need_resched
  39. # between sampling and return
  40. LONG_L a2, TI_FLAGS($28) # current->work
  41. andi a2, _TIF_WORK_MASK # (ignoring syscall_trace)
  42. bnez a2, work_pending
  43. j restore_all
  44. #ifdef CONFIG_PREEMPT
  45. ENTRY(resume_kernel)
  46. lw t0, TI_PRE_COUNT($28)
  47. bnez t0, restore_all
  48. need_resched:
  49. LONG_L t0, TI_FLAGS($28)
  50. andi t1, t0, _TIF_NEED_RESCHED
  51. beqz t1, restore_all
  52. LONG_L t0, PT_STATUS(sp) # Interrupts off?
  53. andi t0, 1
  54. beqz t0, restore_all
  55. li t0, PREEMPT_ACTIVE
  56. sw t0, TI_PRE_COUNT($28)
  57. local_irq_enable t0
  58. jal schedule
  59. sw zero, TI_PRE_COUNT($28)
  60. local_irq_disable t0
  61. b need_resched
  62. #endif
  63. FEXPORT(ret_from_fork)
  64. jal schedule_tail # a0 = task_t *prev
  65. FEXPORT(syscall_exit)
  66. local_irq_disable # make sure need_resched and
  67. # signals dont change between
  68. # sampling and return
  69. LONG_L a2, TI_FLAGS($28) # current->work
  70. li t0, _TIF_ALLWORK_MASK
  71. and t0, a2, t0
  72. bnez t0, syscall_exit_work
  73. FEXPORT(restore_all) # restore full frame
  74. .set noat
  75. RESTORE_TEMP
  76. RESTORE_AT
  77. RESTORE_STATIC
  78. FEXPORT(restore_partial) # restore partial frame
  79. RESTORE_SOME
  80. RESTORE_SP_AND_RET
  81. .set at
  82. FEXPORT(work_pending)
  83. andi t0, a2, _TIF_NEED_RESCHED
  84. beqz t0, work_notifysig
  85. work_resched:
  86. jal schedule
  87. local_irq_disable t0 # make sure need_resched and
  88. # signals dont change between
  89. # sampling and return
  90. LONG_L a2, TI_FLAGS($28)
  91. andi t0, a2, _TIF_WORK_MASK # is there any work to be done
  92. # other than syscall tracing?
  93. beqz t0, restore_all
  94. andi t0, a2, _TIF_NEED_RESCHED
  95. bnez t0, work_resched
  96. work_notifysig: # deal with pending signals and
  97. # notify-resume requests
  98. move a0, sp
  99. li a1, 0
  100. jal do_notify_resume # a2 already loaded
  101. j restore_all
  102. FEXPORT(syscall_exit_work_partial)
  103. SAVE_STATIC
  104. FEXPORT(syscall_exit_work)
  105. LONG_L t0, TI_FLAGS($28)
  106. li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
  107. and t0, t1
  108. beqz t0, work_pending # trace bit is set
  109. local_irq_enable # could let do_syscall_trace()
  110. # call schedule() instead
  111. move a0, sp
  112. li a1, 1
  113. jal do_syscall_trace
  114. b resume_userspace
  115. /*
  116. * Common spurious interrupt handler.
  117. */
  118. .text
  119. .align 5
  120. LEAF(spurious_interrupt)
  121. /*
  122. * Someone tried to fool us by sending an interrupt but we
  123. * couldn't find a cause for it.
  124. */
  125. #ifdef CONFIG_SMP
  126. lui t1, %hi(irq_err_count)
  127. 1: ll t0, %lo(irq_err_count)(t1)
  128. addiu t0, 1
  129. sc t0, %lo(irq_err_count)(t1)
  130. #if R10000_LLSC_WAR
  131. beqzl t0, 1b
  132. #else
  133. beqz t0, 1b
  134. #endif
  135. #else
  136. lui t1, %hi(irq_err_count)
  137. lw t0, %lo(irq_err_count)(t1)
  138. addiu t0, 1
  139. sw t0, %lo(irq_err_count)(t1)
  140. #endif
  141. j ret_from_irq
  142. END(spurious_interrupt)