rtrap.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. /* $Id: rtrap.S,v 1.61 2002/02/09 19:49:31 davem Exp $
  2. * rtrap.S: Preparing for return from trap on Sparc V9.
  3. *
  4. * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  5. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  6. */
  7. #include <linux/config.h>
  8. #include <asm/asi.h>
  9. #include <asm/pstate.h>
  10. #include <asm/ptrace.h>
  11. #include <asm/spitfire.h>
  12. #include <asm/head.h>
  13. #include <asm/visasm.h>
  14. #include <asm/processor.h>
  15. #define RTRAP_PSTATE (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
  16. #define RTRAP_PSTATE_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV)
  17. #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_RMO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
  18. /* Register %l6 keeps track of whether we are returning
  19. * from a system call or not. It is cleared if we call
  20. * do_notify_resume, and it must not be otherwise modified
  21. * until we fully commit to returning to userspace.
  22. */
  23. .text
  24. .align 32
  25. __handle_softirq:
  26. call do_softirq
  27. nop
  28. ba,a,pt %xcc, __handle_softirq_continue
  29. nop
  30. __handle_preemption:
  31. call schedule
  32. wrpr %g0, RTRAP_PSTATE, %pstate
  33. ba,pt %xcc, __handle_preemption_continue
  34. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  35. __handle_user_windows:
  36. call fault_in_user_windows
  37. wrpr %g0, RTRAP_PSTATE, %pstate
  38. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  39. /* Redo sched+sig checks */
  40. ldx [%g6 + TI_FLAGS], %l0
  41. andcc %l0, _TIF_NEED_RESCHED, %g0
  42. be,pt %xcc, 1f
  43. nop
  44. call schedule
  45. wrpr %g0, RTRAP_PSTATE, %pstate
  46. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  47. ldx [%g6 + TI_FLAGS], %l0
  48. 1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
  49. be,pt %xcc, __handle_user_windows_continue
  50. nop
  51. mov %l5, %o1
  52. mov %l6, %o2
  53. add %sp, PTREGS_OFF, %o0
  54. mov %l0, %o3
  55. call do_notify_resume
  56. wrpr %g0, RTRAP_PSTATE, %pstate
  57. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  58. clr %l6
  59. /* Signal delivery can modify pt_regs tstate, so we must
  60. * reload it.
  61. */
  62. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  63. sethi %hi(0xf << 20), %l4
  64. and %l1, %l4, %l4
  65. ba,pt %xcc, __handle_user_windows_continue
  66. andn %l1, %l4, %l1
  67. __handle_perfctrs:
  68. call update_perfctrs
  69. wrpr %g0, RTRAP_PSTATE, %pstate
  70. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  71. ldub [%g6 + TI_WSAVED], %o2
  72. brz,pt %o2, 1f
  73. nop
  74. /* Redo userwin+sched+sig checks */
  75. call fault_in_user_windows
  76. wrpr %g0, RTRAP_PSTATE, %pstate
  77. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  78. ldx [%g6 + TI_FLAGS], %l0
  79. andcc %l0, _TIF_NEED_RESCHED, %g0
  80. be,pt %xcc, 1f
  81. nop
  82. call schedule
  83. wrpr %g0, RTRAP_PSTATE, %pstate
  84. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  85. ldx [%g6 + TI_FLAGS], %l0
  86. 1: andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
  87. be,pt %xcc, __handle_perfctrs_continue
  88. sethi %hi(TSTATE_PEF), %o0
  89. mov %l5, %o1
  90. mov %l6, %o2
  91. add %sp, PTREGS_OFF, %o0
  92. mov %l0, %o3
  93. call do_notify_resume
  94. wrpr %g0, RTRAP_PSTATE, %pstate
  95. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  96. clr %l6
  97. /* Signal delivery can modify pt_regs tstate, so we must
  98. * reload it.
  99. */
  100. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  101. sethi %hi(0xf << 20), %l4
  102. and %l1, %l4, %l4
  103. andn %l1, %l4, %l1
  104. ba,pt %xcc, __handle_perfctrs_continue
  105. sethi %hi(TSTATE_PEF), %o0
  106. __handle_userfpu:
  107. rd %fprs, %l5
  108. andcc %l5, FPRS_FEF, %g0
  109. sethi %hi(TSTATE_PEF), %o0
  110. be,a,pn %icc, __handle_userfpu_continue
  111. andn %l1, %o0, %l1
  112. ba,a,pt %xcc, __handle_userfpu_continue
  113. __handle_signal:
  114. mov %l5, %o1
  115. mov %l6, %o2
  116. add %sp, PTREGS_OFF, %o0
  117. mov %l0, %o3
  118. call do_notify_resume
  119. wrpr %g0, RTRAP_PSTATE, %pstate
  120. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  121. clr %l6
  122. /* Signal delivery can modify pt_regs tstate, so we must
  123. * reload it.
  124. */
  125. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  126. sethi %hi(0xf << 20), %l4
  127. and %l1, %l4, %l4
  128. ba,pt %xcc, __handle_signal_continue
  129. andn %l1, %l4, %l1
  130. .align 64
  131. .globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
  132. rtrap_irq:
  133. rtrap_clr_l6: clr %l6
  134. rtrap:
  135. #ifndef CONFIG_SMP
  136. sethi %hi(per_cpu____cpu_data), %l0
  137. lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
  138. #else
  139. sethi %hi(per_cpu____cpu_data), %l0
  140. or %l0, %lo(per_cpu____cpu_data), %l0
  141. lduw [%l0 + %g5], %l1
  142. #endif
  143. cmp %l1, 0
  144. /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
  145. bne,pn %icc, __handle_softirq
  146. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  147. __handle_softirq_continue:
  148. rtrap_xcall:
  149. sethi %hi(0xf << 20), %l4
  150. andcc %l1, TSTATE_PRIV, %l3
  151. and %l1, %l4, %l4
  152. bne,pn %icc, to_kernel
  153. andn %l1, %l4, %l1
  154. /* We must hold IRQs off and atomically test schedule+signal
  155. * state, then hold them off all the way back to userspace.
  156. * If we are returning to kernel, none of this matters.
  157. *
  158. * If we do not do this, there is a window where we would do
  159. * the tests, later the signal/resched event arrives but we do
  160. * not process it since we are still in kernel mode. It would
  161. * take until the next local IRQ before the signal/resched
  162. * event would be handled.
  163. *
  164. * This also means that if we have to deal with performance
  165. * counters or user windows, we have to redo all of these
  166. * sched+signal checks with IRQs disabled.
  167. */
  168. to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  169. wrpr 0, %pil
  170. __handle_preemption_continue:
  171. ldx [%g6 + TI_FLAGS], %l0
  172. sethi %hi(_TIF_USER_WORK_MASK), %o0
  173. or %o0, %lo(_TIF_USER_WORK_MASK), %o0
  174. andcc %l0, %o0, %g0
  175. sethi %hi(TSTATE_PEF), %o0
  176. be,pt %xcc, user_nowork
  177. andcc %l1, %o0, %g0
  178. andcc %l0, _TIF_NEED_RESCHED, %g0
  179. bne,pn %xcc, __handle_preemption
  180. andcc %l0, (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), %g0
  181. bne,pn %xcc, __handle_signal
  182. __handle_signal_continue:
  183. ldub [%g6 + TI_WSAVED], %o2
  184. brnz,pn %o2, __handle_user_windows
  185. nop
  186. __handle_user_windows_continue:
  187. ldx [%g6 + TI_FLAGS], %l5
  188. andcc %l5, _TIF_PERFCTR, %g0
  189. sethi %hi(TSTATE_PEF), %o0
  190. bne,pn %xcc, __handle_perfctrs
  191. __handle_perfctrs_continue:
  192. andcc %l1, %o0, %g0
  193. /* This fpdepth clear is necessary for non-syscall rtraps only */
  194. user_nowork:
  195. bne,pn %xcc, __handle_userfpu
  196. stb %g0, [%g6 + TI_FPDEPTH]
  197. __handle_userfpu_continue:
  198. rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
  199. ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
  200. ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
  201. ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
  202. ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
  203. brz,pt %l3, 1f
  204. mov %g6, %l2
  205. /* Must do this before thread reg is clobbered below. */
  206. LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
  207. 1:
  208. ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
  209. ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
  210. /* Normal globals are restored, go to trap globals. */
  211. 661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
  212. nop
  213. .section .sun4v_2insn_patch, "ax"
  214. .word 661b
  215. wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
  216. SET_GL(1)
  217. .previous
  218. mov %l2, %g6
  219. ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
  220. ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
  221. ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
  222. ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
  223. ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
  224. ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
  225. ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
  226. ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
  227. ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
  228. ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
  229. ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
  230. wr %o3, %g0, %y
  231. srl %l4, 20, %l4
  232. wrpr %l4, 0x0, %pil
  233. wrpr %g0, 0x1, %tl
  234. wrpr %l1, %g0, %tstate
  235. wrpr %l2, %g0, %tpc
  236. wrpr %o2, %g0, %tnpc
  237. brnz,pn %l3, kern_rtt
  238. mov PRIMARY_CONTEXT, %l7
  239. 661: ldxa [%l7 + %l7] ASI_DMMU, %l0
  240. .section .sun4v_1insn_patch, "ax"
  241. .word 661b
  242. ldxa [%l7 + %l7] ASI_MMU, %l0
  243. .previous
  244. sethi %hi(sparc64_kern_pri_nuc_bits), %l1
  245. ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
  246. or %l0, %l1, %l0
  247. 661: stxa %l0, [%l7] ASI_DMMU
  248. .section .sun4v_1insn_patch, "ax"
  249. .word 661b
  250. stxa %l0, [%l7] ASI_MMU
  251. .previous
  252. sethi %hi(KERNBASE), %l7
  253. flush %l7
  254. rdpr %wstate, %l1
  255. rdpr %otherwin, %l2
  256. srl %l1, 3, %l1
  257. wrpr %l2, %g0, %canrestore
  258. wrpr %l1, %g0, %wstate
  259. brnz,pt %l2, user_rtt_restore
  260. wrpr %g0, %g0, %otherwin
  261. ldx [%g6 + TI_FLAGS], %g3
  262. wr %g0, ASI_AIUP, %asi
  263. rdpr %cwp, %g1
  264. andcc %g3, _TIF_32BIT, %g0
  265. sub %g1, 1, %g1
  266. bne,pt %xcc, user_rtt_fill_32bit
  267. wrpr %g1, %cwp
  268. ba,a,pt %xcc, user_rtt_fill_64bit
  269. user_rtt_fill_fixup:
  270. rdpr %cwp, %g1
  271. add %g1, 1, %g1
  272. wrpr %g1, 0x0, %cwp
  273. rdpr %wstate, %g2
  274. sll %g2, 3, %g2
  275. wrpr %g2, 0x0, %wstate
  276. /* We know %canrestore and %otherwin are both zero. */
  277. sethi %hi(sparc64_kern_pri_context), %g2
  278. ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
  279. mov PRIMARY_CONTEXT, %g1
  280. 661: stxa %g2, [%g1] ASI_DMMU
  281. .section .sun4v_1insn_patch, "ax"
  282. .word 661b
  283. stxa %g2, [%g1] ASI_MMU
  284. .previous
  285. sethi %hi(KERNBASE), %g1
  286. flush %g1
  287. or %g4, FAULT_CODE_WINFIXUP, %g4
  288. stb %g4, [%g6 + TI_FAULT_CODE]
  289. stx %g5, [%g6 + TI_FAULT_ADDR]
  290. mov %g6, %l1
  291. wrpr %g0, 0x0, %tl
  292. 661: nop
  293. .section .sun4v_1insn_patch, "ax"
  294. .word 661b
  295. SET_GL(0)
  296. .previous
  297. wrpr %g0, RTRAP_PSTATE, %pstate
  298. mov %l1, %g6
  299. ldx [%g6 + TI_TASK], %g4
  300. LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
  301. call do_sparc64_fault
  302. add %sp, PTREGS_OFF, %o0
  303. ba,pt %xcc, rtrap
  304. nop
  305. user_rtt_pre_restore:
  306. add %g1, 1, %g1
  307. wrpr %g1, 0x0, %cwp
  308. user_rtt_restore:
  309. restore
  310. rdpr %canrestore, %g1
  311. wrpr %g1, 0x0, %cleanwin
  312. retry
  313. nop
  314. kern_rtt: rdpr %canrestore, %g1
  315. brz,pn %g1, kern_rtt_fill
  316. nop
  317. kern_rtt_restore:
  318. restore
  319. retry
  320. to_kernel:
  321. #ifdef CONFIG_PREEMPT
  322. ldsw [%g6 + TI_PRE_COUNT], %l5
  323. brnz %l5, kern_fpucheck
  324. ldx [%g6 + TI_FLAGS], %l5
  325. andcc %l5, _TIF_NEED_RESCHED, %g0
  326. be,pt %xcc, kern_fpucheck
  327. srl %l4, 20, %l5
  328. cmp %l5, 0
  329. bne,pn %xcc, kern_fpucheck
  330. sethi %hi(PREEMPT_ACTIVE), %l6
  331. stw %l6, [%g6 + TI_PRE_COUNT]
  332. call schedule
  333. nop
  334. ba,pt %xcc, rtrap
  335. stw %g0, [%g6 + TI_PRE_COUNT]
  336. #endif
  337. kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
  338. brz,pt %l5, rt_continue
  339. srl %l5, 1, %o0
  340. add %g6, TI_FPSAVED, %l6
  341. ldub [%l6 + %o0], %l2
  342. sub %l5, 2, %l5
  343. add %g6, TI_GSR, %o1
  344. andcc %l2, (FPRS_FEF|FPRS_DU), %g0
  345. be,pt %icc, 2f
  346. and %l2, FPRS_DL, %l6
  347. andcc %l2, FPRS_FEF, %g0
  348. be,pn %icc, 5f
  349. sll %o0, 3, %o5
  350. rd %fprs, %g1
  351. wr %g1, FPRS_FEF, %fprs
  352. ldx [%o1 + %o5], %g1
  353. add %g6, TI_XFSR, %o1
  354. sll %o0, 8, %o2
  355. add %g6, TI_FPREGS, %o3
  356. brz,pn %l6, 1f
  357. add %g6, TI_FPREGS+0x40, %o4
  358. membar #Sync
  359. ldda [%o3 + %o2] ASI_BLK_P, %f0
  360. ldda [%o4 + %o2] ASI_BLK_P, %f16
  361. membar #Sync
  362. 1: andcc %l2, FPRS_DU, %g0
  363. be,pn %icc, 1f
  364. wr %g1, 0, %gsr
  365. add %o2, 0x80, %o2
  366. membar #Sync
  367. ldda [%o3 + %o2] ASI_BLK_P, %f32
  368. ldda [%o4 + %o2] ASI_BLK_P, %f48
  369. 1: membar #Sync
  370. ldx [%o1 + %o5], %fsr
  371. 2: stb %l5, [%g6 + TI_FPDEPTH]
  372. ba,pt %xcc, rt_continue
  373. nop
  374. 5: wr %g0, FPRS_FEF, %fprs
  375. sll %o0, 8, %o2
  376. add %g6, TI_FPREGS+0x80, %o3
  377. add %g6, TI_FPREGS+0xc0, %o4
  378. membar #Sync
  379. ldda [%o3 + %o2] ASI_BLK_P, %f32
  380. ldda [%o4 + %o2] ASI_BLK_P, %f48
  381. membar #Sync
  382. wr %g0, FPRS_DU, %fprs
  383. ba,pt %xcc, rt_continue
  384. stb %l5, [%g6 + TI_FPDEPTH]