break.S 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. /* break.S: Break interrupt handling (kept separate from entry.S)
  2. *
  3. * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/config.h>
  13. #include <linux/linkage.h>
  14. #include <asm/setup.h>
  15. #include <asm/segment.h>
  16. #include <asm/ptrace.h>
  17. #include <asm/spr-regs.h>
  18. #include <asm/errno.h>
  19. #
  20. # the break handler has its own stack
  21. #
  22. .section .bss.stack
  23. .globl __break_user_context
  24. .balign 8192
  25. __break_stack:
  26. .space (8192 - (USER_CONTEXT_SIZE + REG__DEBUG_XTRA)) & ~7
  27. __break_stack_tos:
  28. .space REG__DEBUG_XTRA
  29. __break_user_context:
  30. .space USER_CONTEXT_SIZE
  31. #
  32. # miscellaneous variables
  33. #
  34. .section .bss
  35. #ifdef CONFIG_MMU
  36. .globl __break_tlb_miss_real_return_info
  37. __break_tlb_miss_real_return_info:
  38. .balign 8
  39. .space 2*4 /* saved PCSR, PSR for TLB-miss handler fixup */
  40. #endif
  41. __break_trace_through_exceptions:
  42. .space 4
  43. #define CS2_ECS1 0xe1200000
  44. #define CS2_USERLED 0x4
  45. .macro LEDS val,reg
  46. # sethi.p %hi(CS2_ECS1+CS2_USERLED),gr30
  47. # setlo %lo(CS2_ECS1+CS2_USERLED),gr30
  48. # setlos #~\val,\reg
  49. # st \reg,@(gr30,gr0)
  50. # setlos #0x5555,\reg
  51. # sethi.p %hi(0xffc00100),gr30
  52. # setlo %lo(0xffc00100),gr30
  53. # sth \reg,@(gr30,gr0)
  54. # membar
  55. .endm
  56. ###############################################################################
  57. #
  58. # entry point for Break Exceptions/Interrupts
  59. #
  60. ###############################################################################
  61. .text
  62. .balign 4
  63. .globl __entry_break
  64. __entry_break:
  65. #ifdef CONFIG_MMU
  66. movgs gr31,scr3
  67. #endif
  68. LEDS 0x1001,gr31
  69. sethi.p %hi(__break_user_context),gr31
  70. setlo %lo(__break_user_context),gr31
  71. stdi gr2,@(gr31,#REG_GR(2))
  72. movsg ccr,gr3
  73. sti gr3,@(gr31,#REG_CCR)
  74. # catch the return from a TLB-miss handler that had single-step disabled
  75. # traps will be enabled, so we have to do this now
  76. #ifdef CONFIG_MMU
  77. movsg bpcsr,gr3
  78. sethi.p %hi(__break_tlb_miss_return_breaks_here),gr2
  79. setlo %lo(__break_tlb_miss_return_breaks_here),gr2
  80. subcc gr2,gr3,gr0,icc0
  81. beq icc0,#2,__break_return_singlestep_tlbmiss
  82. #endif
  83. # determine whether we have stepped through into an exception
  84. # - we need to take special action to suspend h/w single stepping if we've done
  85. # that, so that the gdbstub doesn't get bogged down endlessly stepping through
  86. # external interrupt handling
  87. movsg bpsr,gr3
  88. andicc gr3,#BPSR_BET,gr0,icc0
  89. bne icc0,#2,__break_maybe_userspace /* jump if PSR.ET was 1 */
  90. LEDS 0x1003,gr2
  91. movsg brr,gr3
  92. andicc gr3,#BRR_ST,gr0,icc0
  93. andicc.p gr3,#BRR_SB,gr0,icc1
  94. bne icc0,#2,__break_step /* jump if single-step caused break */
  95. beq icc1,#2,__break_continue /* jump if BREAK didn't cause break */
  96. LEDS 0x1007,gr2
  97. # handle special breaks
  98. movsg bpcsr,gr3
  99. sethi.p %hi(__entry_return_singlestep_breaks_here),gr2
  100. setlo %lo(__entry_return_singlestep_breaks_here),gr2
  101. subcc gr2,gr3,gr0,icc0
  102. beq icc0,#2,__break_return_singlestep
  103. bra __break_continue
  104. ###############################################################################
  105. #
  106. # handle BREAK instruction in kernel-mode exception epilogue
  107. #
  108. ###############################################################################
  109. __break_return_singlestep:
  110. LEDS 0x100f,gr2
  111. # special break insn requests single-stepping to be turned back on
  112. # HERE RETT
  113. # PSR.ET 0 0
  114. # PSR.PS old PSR.S ?
  115. # PSR.S 1 1
  116. # BPSR.ET 0 1 (can't have caused orig excep otherwise)
  117. # BPSR.BS 1 old PSR.S
  118. movsg dcr,gr2
  119. sethi.p %hi(DCR_SE),gr3
  120. setlo %lo(DCR_SE),gr3
  121. or gr2,gr3,gr2
  122. movgs gr2,dcr
  123. movsg psr,gr2
  124. andi gr2,#PSR_PS,gr2
  125. slli gr2,#11,gr2 /* PSR.PS -> BPSR.BS */
  126. ori gr2,#BPSR_BET,gr2 /* 1 -> BPSR.BET */
  127. movgs gr2,bpsr
  128. # return to the invoker of the original kernel exception
  129. movsg pcsr,gr2
  130. movgs gr2,bpcsr
  131. LEDS 0x101f,gr2
  132. ldi @(gr31,#REG_CCR),gr3
  133. movgs gr3,ccr
  134. lddi.p @(gr31,#REG_GR(2)),gr2
  135. xor gr31,gr31,gr31
  136. movgs gr0,brr
  137. #ifdef CONFIG_MMU
  138. movsg scr3,gr31
  139. #endif
  140. rett #1
  141. ###############################################################################
  142. #
  143. # handle BREAK instruction in TLB-miss handler return path
  144. #
  145. ###############################################################################
  146. #ifdef CONFIG_MMU
  147. __break_return_singlestep_tlbmiss:
  148. LEDS 0x1100,gr2
  149. sethi.p %hi(__break_tlb_miss_real_return_info),gr3
  150. setlo %lo(__break_tlb_miss_real_return_info),gr3
  151. lddi @(gr3,#0),gr2
  152. movgs gr2,pcsr
  153. movgs gr3,psr
  154. bra __break_return_singlestep
  155. #endif
  156. ###############################################################################
  157. #
  158. # handle single stepping into an exception prologue from kernel mode
  159. # - we try and catch it whilst it is still in the main vector table
  160. # - if we catch it there, we have to jump to the fixup handler
  161. # - there is a fixup table that has a pointer for every 16b slot in the trap
  162. # table
  163. #
  164. ###############################################################################
  165. __break_step:
  166. LEDS 0x2003,gr2
  167. # external interrupts seem to escape from the trap table before single
  168. # step catches up with them
  169. movsg bpcsr,gr2
  170. sethi.p %hi(__entry_kernel_external_interrupt),gr3
  171. setlo %lo(__entry_kernel_external_interrupt),gr3
  172. subcc.p gr2,gr3,gr0,icc0
  173. sethi %hi(__entry_uspace_external_interrupt),gr3
  174. setlo.p %lo(__entry_uspace_external_interrupt),gr3
  175. beq icc0,#2,__break_step_kernel_external_interrupt
  176. subcc.p gr2,gr3,gr0,icc0
  177. sethi %hi(__entry_kernel_external_interrupt_virtually_disabled),gr3
  178. setlo.p %lo(__entry_kernel_external_interrupt_virtually_disabled),gr3
  179. beq icc0,#2,__break_step_uspace_external_interrupt
  180. subcc.p gr2,gr3,gr0,icc0
  181. sethi %hi(__entry_kernel_external_interrupt_virtual_reenable),gr3
  182. setlo.p %lo(__entry_kernel_external_interrupt_virtual_reenable),gr3
  183. beq icc0,#2,__break_step_kernel_external_interrupt_virtually_disabled
  184. subcc gr2,gr3,gr0,icc0
  185. beq icc0,#2,__break_step_kernel_external_interrupt_virtual_reenable
  186. LEDS 0x2007,gr2
  187. # the two main vector tables are adjacent on one 8Kb slab
  188. movsg bpcsr,gr2
  189. setlos #0xffffe000,gr3
  190. and gr2,gr3,gr2
  191. sethi.p %hi(__trap_tables),gr3
  192. setlo %lo(__trap_tables),gr3
  193. subcc gr2,gr3,gr0,icc0
  194. bne icc0,#2,__break_continue
  195. LEDS 0x200f,gr2
  196. # skip workaround if so requested by GDB
  197. sethi.p %hi(__break_trace_through_exceptions),gr3
  198. setlo %lo(__break_trace_through_exceptions),gr3
  199. ld @(gr3,gr0),gr3
  200. subcc gr3,gr0,gr0,icc0
  201. bne icc0,#0,__break_continue
  202. LEDS 0x201f,gr2
  203. # access the fixup table - there's a 1:1 mapping between the slots in the trap tables and
  204. # the slots in the trap fixup tables allowing us to simply divide the offset into the
  205. # former by 4 to access the latter
  206. sethi.p %hi(__trap_tables),gr3
  207. setlo %lo(__trap_tables),gr3
  208. movsg bpcsr,gr2
  209. sub gr2,gr3,gr2
  210. srli.p gr2,#2,gr2
  211. sethi %hi(__trap_fixup_tables),gr3
  212. setlo.p %lo(__trap_fixup_tables),gr3
  213. andi gr2,#~3,gr2
  214. ld @(gr2,gr3),gr2
  215. jmpil @(gr2,#0)
  216. # step through an internal exception from kernel mode
  217. .globl __break_step_kernel_softprog_interrupt
  218. __break_step_kernel_softprog_interrupt:
  219. sethi.p %hi(__entry_kernel_softprog_interrupt_reentry),gr3
  220. setlo %lo(__entry_kernel_softprog_interrupt_reentry),gr3
  221. bra __break_return_as_kernel_prologue
  222. # step through an external interrupt from kernel mode
  223. .globl __break_step_kernel_external_interrupt
  224. __break_step_kernel_external_interrupt:
  225. # deal with virtual interrupt disablement
  226. beq icc2,#0,__break_step_kernel_external_interrupt_virtually_disabled
  227. sethi.p %hi(__entry_kernel_external_interrupt_reentry),gr3
  228. setlo %lo(__entry_kernel_external_interrupt_reentry),gr3
  229. __break_return_as_kernel_prologue:
  230. LEDS 0x203f,gr2
  231. movgs gr3,bpcsr
  232. # do the bit we had to skip
  233. #ifdef CONFIG_MMU
  234. movsg ear0,gr2 /* EAR0 can get clobbered by gdb-stub (ICI/ICEI) */
  235. movgs gr2,scr2
  236. #endif
  237. or.p sp,gr0,gr2 /* set up the stack pointer */
  238. subi sp,#REG__END,sp
  239. sti.p gr2,@(sp,#REG_SP)
  240. setlos #REG__STATUS_STEP,gr2
  241. sti gr2,@(sp,#REG__STATUS) /* record single step status */
  242. # cancel single-stepping mode
  243. movsg dcr,gr2
  244. sethi.p %hi(~DCR_SE),gr3
  245. setlo %lo(~DCR_SE),gr3
  246. and gr2,gr3,gr2
  247. movgs gr2,dcr
  248. LEDS 0x207f,gr2
  249. ldi @(gr31,#REG_CCR),gr3
  250. movgs gr3,ccr
  251. lddi.p @(gr31,#REG_GR(2)),gr2
  252. xor gr31,gr31,gr31
  253. movgs gr0,brr
  254. #ifdef CONFIG_MMU
  255. movsg scr3,gr31
  256. #endif
  257. rett #1
  258. # we single-stepped into an interrupt handler whilst interrupts were merely virtually disabled
  259. # need to really disable interrupts, set flag, fix up and return
  260. __break_step_kernel_external_interrupt_virtually_disabled:
  261. movsg psr,gr2
  262. andi gr2,#~PSR_PIL,gr2
  263. ori gr2,#PSR_PIL_14,gr2 /* debugging interrupts only */
  264. movgs gr2,psr
  265. ldi @(gr31,#REG_CCR),gr3
  266. movgs gr3,ccr
  267. subcc.p gr0,gr0,gr0,icc2 /* leave Z set, clear C */
  268. # exceptions must've been enabled and we must've been in supervisor mode
  269. setlos BPSR_BET|BPSR_BS,gr3
  270. movgs gr3,bpsr
  271. # return to where the interrupt happened
  272. movsg pcsr,gr2
  273. movgs gr2,bpcsr
  274. lddi.p @(gr31,#REG_GR(2)),gr2
  275. xor gr31,gr31,gr31
  276. movgs gr0,brr
  277. #ifdef CONFIG_MMU
  278. movsg scr3,gr31
  279. #endif
  280. rett #1
  281. # we stepped through into the virtual interrupt reenablement trap
  282. #
  283. # we also want to single step anyway, but after fixing up so that we get an event on the
  284. # instruction after the broken-into exception returns
  285. .globl __break_step_kernel_external_interrupt_virtual_reenable
  286. __break_step_kernel_external_interrupt_virtual_reenable:
  287. movsg psr,gr2
  288. andi gr2,#~PSR_PIL,gr2
  289. movgs gr2,psr
  290. ldi @(gr31,#REG_CCR),gr3
  291. movgs gr3,ccr
  292. subicc gr0,#1,gr0,icc2 /* clear Z, set C */
  293. # save the adjusted ICC2
  294. movsg ccr,gr3
  295. sti gr3,@(gr31,#REG_CCR)
  296. # exceptions must've been enabled and we must've been in supervisor mode
  297. setlos BPSR_BET|BPSR_BS,gr3
  298. movgs gr3,bpsr
  299. # return to where the trap happened
  300. movsg pcsr,gr2
  301. movgs gr2,bpcsr
  302. # and then process the single step
  303. bra __break_continue
  304. # step through an internal exception from uspace mode
  305. .globl __break_step_uspace_softprog_interrupt
  306. __break_step_uspace_softprog_interrupt:
  307. sethi.p %hi(__entry_uspace_softprog_interrupt_reentry),gr3
  308. setlo %lo(__entry_uspace_softprog_interrupt_reentry),gr3
  309. bra __break_return_as_uspace_prologue
  310. # step through an external interrupt from kernel mode
  311. .globl __break_step_uspace_external_interrupt
  312. __break_step_uspace_external_interrupt:
  313. sethi.p %hi(__entry_uspace_external_interrupt_reentry),gr3
  314. setlo %lo(__entry_uspace_external_interrupt_reentry),gr3
  315. __break_return_as_uspace_prologue:
  316. LEDS 0x20ff,gr2
  317. movgs gr3,bpcsr
  318. # do the bit we had to skip
  319. sethi.p %hi(__kernel_frame0_ptr),gr28
  320. setlo %lo(__kernel_frame0_ptr),gr28
  321. ldi.p @(gr28,#0),gr28
  322. setlos #REG__STATUS_STEP,gr2
  323. sti gr2,@(gr28,#REG__STATUS) /* record single step status */
  324. # cancel single-stepping mode
  325. movsg dcr,gr2
  326. sethi.p %hi(~DCR_SE),gr3
  327. setlo %lo(~DCR_SE),gr3
  328. and gr2,gr3,gr2
  329. movgs gr2,dcr
  330. LEDS 0x20fe,gr2
  331. ldi @(gr31,#REG_CCR),gr3
  332. movgs gr3,ccr
  333. lddi.p @(gr31,#REG_GR(2)),gr2
  334. xor gr31,gr31,gr31
  335. movgs gr0,brr
  336. #ifdef CONFIG_MMU
  337. movsg scr3,gr31
  338. #endif
  339. rett #1
  340. #ifdef CONFIG_MMU
  341. # step through an ITLB-miss handler from user mode
  342. .globl __break_user_insn_tlb_miss
  343. __break_user_insn_tlb_miss:
  344. # we'll want to try the trap stub again
  345. sethi.p %hi(__trap_user_insn_tlb_miss),gr2
  346. setlo %lo(__trap_user_insn_tlb_miss),gr2
  347. movgs gr2,bpcsr
  348. __break_tlb_miss_common:
  349. LEDS 0x2101,gr2
  350. # cancel single-stepping mode
  351. movsg dcr,gr2
  352. sethi.p %hi(~DCR_SE),gr3
  353. setlo %lo(~DCR_SE),gr3
  354. and gr2,gr3,gr2
  355. movgs gr2,dcr
  356. # we'll swap the real return address for one with a BREAK insn so that we can re-enable
  357. # single stepping on return
  358. movsg pcsr,gr2
  359. sethi.p %hi(__break_tlb_miss_real_return_info),gr3
  360. setlo %lo(__break_tlb_miss_real_return_info),gr3
  361. sti gr2,@(gr3,#0)
  362. sethi.p %hi(__break_tlb_miss_return_break),gr2
  363. setlo %lo(__break_tlb_miss_return_break),gr2
  364. movgs gr2,pcsr
  365. # we also have to fudge PSR because the return BREAK is in kernel space and we want
  366. # to get a BREAK fault not an access violation should the return be to userspace
  367. movsg psr,gr2
  368. sti.p gr2,@(gr3,#4)
  369. ori gr2,#PSR_PS,gr2
  370. movgs gr2,psr
  371. LEDS 0x2102,gr2
  372. ldi @(gr31,#REG_CCR),gr3
  373. movgs gr3,ccr
  374. lddi @(gr31,#REG_GR(2)),gr2
  375. movsg scr3,gr31
  376. movgs gr0,brr
  377. rett #1
  378. # step through a DTLB-miss handler from user mode
  379. .globl __break_user_data_tlb_miss
  380. __break_user_data_tlb_miss:
  381. # we'll want to try the trap stub again
  382. sethi.p %hi(__trap_user_data_tlb_miss),gr2
  383. setlo %lo(__trap_user_data_tlb_miss),gr2
  384. movgs gr2,bpcsr
  385. bra __break_tlb_miss_common
  386. # step through an ITLB-miss handler from kernel mode
  387. .globl __break_kernel_insn_tlb_miss
  388. __break_kernel_insn_tlb_miss:
  389. # we'll want to try the trap stub again
  390. sethi.p %hi(__trap_kernel_insn_tlb_miss),gr2
  391. setlo %lo(__trap_kernel_insn_tlb_miss),gr2
  392. movgs gr2,bpcsr
  393. bra __break_tlb_miss_common
  394. # step through a DTLB-miss handler from kernel mode
  395. .globl __break_kernel_data_tlb_miss
  396. __break_kernel_data_tlb_miss:
  397. # we'll want to try the trap stub again
  398. sethi.p %hi(__trap_kernel_data_tlb_miss),gr2
  399. setlo %lo(__trap_kernel_data_tlb_miss),gr2
  400. movgs gr2,bpcsr
  401. bra __break_tlb_miss_common
  402. #endif
  403. ###############################################################################
  404. #
  405. # handle debug events originating with userspace
  406. #
  407. ###############################################################################
  408. __break_maybe_userspace:
  409. LEDS 0x3003,gr2
  410. setlos #BPSR_BS,gr2
  411. andcc gr3,gr2,gr0,icc0
  412. bne icc0,#0,__break_continue /* skip if PSR.S was 1 */
  413. movsg brr,gr2
  414. andicc gr2,#BRR_ST|BRR_SB,gr0,icc0
  415. beq icc0,#0,__break_continue /* jump if not BREAK or single-step */
  416. LEDS 0x3007,gr2
  417. # do the first part of the exception prologue here
  418. sethi.p %hi(__kernel_frame0_ptr),gr28
  419. setlo %lo(__kernel_frame0_ptr),gr28
  420. ldi @(gr28,#0),gr28
  421. andi gr28,#~7,gr28
  422. # set up the kernel stack pointer
  423. sti sp ,@(gr28,#REG_SP)
  424. ori gr28,0,sp
  425. sti gr0 ,@(gr28,#REG_GR(28))
  426. stdi gr20,@(gr28,#REG_GR(20))
  427. stdi gr22,@(gr28,#REG_GR(22))
  428. movsg tbr,gr20
  429. movsg bpcsr,gr21
  430. movsg psr,gr22
  431. # determine the exception type and cancel single-stepping mode
  432. or gr0,gr0,gr23
  433. movsg dcr,gr2
  434. sethi.p %hi(DCR_SE),gr3
  435. setlo %lo(DCR_SE),gr3
  436. andcc gr2,gr3,gr0,icc0
  437. beq icc0,#0,__break_no_user_sstep /* must have been a BREAK insn */
  438. not gr3,gr3
  439. and gr2,gr3,gr2
  440. movgs gr2,dcr
  441. ori gr23,#REG__STATUS_STEP,gr23
  442. __break_no_user_sstep:
  443. LEDS 0x300f,gr2
  444. movsg brr,gr2
  445. andi gr2,#BRR_ST|BRR_SB,gr2
  446. slli gr2,#1,gr2
  447. or gr23,gr2,gr23
  448. sti.p gr23,@(gr28,#REG__STATUS) /* record single step status */
  449. # adjust the value acquired from TBR - this indicates the exception
  450. setlos #~TBR_TT,gr2
  451. and.p gr20,gr2,gr20
  452. setlos #TBR_TT_BREAK,gr2
  453. or.p gr20,gr2,gr20
  454. # fudge PSR.PS and BPSR.BS to return to kernel mode through the trap
  455. # table as trap 126
  456. andi gr22,#~PSR_PS,gr22 /* PSR.PS should be 0 */
  457. movgs gr22,psr
  458. setlos #BPSR_BS,gr2 /* BPSR.BS should be 1 and BPSR.BET 0 */
  459. movgs gr2,bpsr
  460. # return through remainder of the exception prologue
  461. # - need to load gr23 with return handler address
  462. sethi.p %hi(__entry_return_from_user_exception),gr23
  463. setlo %lo(__entry_return_from_user_exception),gr23
  464. sethi.p %hi(__entry_common),gr3
  465. setlo %lo(__entry_common),gr3
  466. movgs gr3,bpcsr
  467. LEDS 0x301f,gr2
  468. ldi @(gr31,#REG_CCR),gr3
  469. movgs gr3,ccr
  470. lddi.p @(gr31,#REG_GR(2)),gr2
  471. xor gr31,gr31,gr31
  472. movgs gr0,brr
  473. #ifdef CONFIG_MMU
  474. movsg scr3,gr31
  475. #endif
  476. rett #1
  477. ###############################################################################
  478. #
  479. # resume normal debug-mode entry
  480. #
  481. ###############################################################################
  482. __break_continue:
  483. LEDS 0x4003,gr2
  484. # set up the kernel stack pointer
  485. sti sp,@(gr31,#REG_SP)
  486. sethi.p %hi(__break_stack_tos),sp
  487. setlo %lo(__break_stack_tos),sp
  488. # finish building the exception frame
  489. stdi gr4 ,@(gr31,#REG_GR(4))
  490. stdi gr6 ,@(gr31,#REG_GR(6))
  491. stdi gr8 ,@(gr31,#REG_GR(8))
  492. stdi gr10,@(gr31,#REG_GR(10))
  493. stdi gr12,@(gr31,#REG_GR(12))
  494. stdi gr14,@(gr31,#REG_GR(14))
  495. stdi gr16,@(gr31,#REG_GR(16))
  496. stdi gr18,@(gr31,#REG_GR(18))
  497. stdi gr20,@(gr31,#REG_GR(20))
  498. stdi gr22,@(gr31,#REG_GR(22))
  499. stdi gr24,@(gr31,#REG_GR(24))
  500. stdi gr26,@(gr31,#REG_GR(26))
  501. sti gr0 ,@(gr31,#REG_GR(28)) /* NULL frame pointer */
  502. sti gr29,@(gr31,#REG_GR(29))
  503. sti gr30,@(gr31,#REG_GR(30))
  504. sti gr8 ,@(gr31,#REG_ORIG_GR8)
  505. #ifdef CONFIG_MMU
  506. movsg scr3,gr19
  507. sti gr19,@(gr31,#REG_GR(31))
  508. #endif
  509. movsg bpsr ,gr19
  510. movsg tbr ,gr20
  511. movsg bpcsr,gr21
  512. movsg psr ,gr22
  513. movsg isr ,gr23
  514. movsg cccr ,gr25
  515. movsg lr ,gr26
  516. movsg lcr ,gr27
  517. andi.p gr22,#~(PSR_S|PSR_ET),gr5 /* rebuild PSR */
  518. andi gr19,#PSR_ET,gr4
  519. or.p gr4,gr5,gr5
  520. srli gr19,#10,gr4
  521. andi gr4,#PSR_S,gr4
  522. or.p gr4,gr5,gr5
  523. setlos #-1,gr6
  524. sti gr20,@(gr31,#REG_TBR)
  525. sti gr21,@(gr31,#REG_PC)
  526. sti gr5 ,@(gr31,#REG_PSR)
  527. sti gr23,@(gr31,#REG_ISR)
  528. sti gr25,@(gr31,#REG_CCCR)
  529. stdi gr26,@(gr31,#REG_LR)
  530. sti gr6 ,@(gr31,#REG_SYSCALLNO)
  531. # store CPU-specific regs
  532. movsg iacc0h,gr4
  533. movsg iacc0l,gr5
  534. stdi gr4,@(gr31,#REG_IACC0)
  535. movsg gner0,gr4
  536. movsg gner1,gr5
  537. stdi gr4,@(gr31,#REG_GNER0)
  538. # build the debug register frame
  539. movsg brr,gr4
  540. movgs gr0,brr
  541. movsg nmar,gr5
  542. movsg dcr,gr6
  543. stdi gr4 ,@(gr31,#REG_BRR)
  544. sti gr19,@(gr31,#REG_BPSR)
  545. sti.p gr6 ,@(gr31,#REG_DCR)
  546. # trap exceptions during break handling and disable h/w breakpoints/watchpoints
  547. sethi %hi(DCR_EBE),gr5
  548. setlo.p %lo(DCR_EBE),gr5
  549. sethi %hi(__entry_breaktrap_table),gr4
  550. setlo %lo(__entry_breaktrap_table),gr4
  551. movgs gr5,dcr
  552. movgs gr4,tbr
  553. # set up kernel global registers
  554. sethi.p %hi(__kernel_current_task),gr5
  555. setlo %lo(__kernel_current_task),gr5
  556. ld @(gr5,gr0),gr29
  557. ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  558. sethi %hi(_gp),gr16
  559. setlo.p %lo(_gp),gr16
  560. # make sure we (the kernel) get div-zero and misalignment exceptions
  561. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  562. movgs gr5,isr
  563. # enter the GDB stub
  564. LEDS 0x4007,gr2
  565. or.p gr0,gr0,fp
  566. call debug_stub
  567. LEDS 0x403f,gr2
  568. # return from break
  569. lddi @(gr31,#REG_IACC0),gr4
  570. movgs gr4,iacc0h
  571. movgs gr5,iacc0l
  572. lddi @(gr31,#REG_GNER0),gr4
  573. movgs gr4,gner0
  574. movgs gr5,gner1
  575. lddi @(gr31,#REG_LR) ,gr26
  576. lddi @(gr31,#REG_CCR) ,gr24
  577. lddi @(gr31,#REG_PSR) ,gr22
  578. ldi @(gr31,#REG_PC) ,gr21
  579. ldi @(gr31,#REG_TBR) ,gr20
  580. ldi.p @(gr31,#REG_DCR) ,gr6
  581. andi gr22,#PSR_S,gr19 /* rebuild BPSR */
  582. andi.p gr22,#PSR_ET,gr5
  583. slli gr19,#10,gr19
  584. or gr5,gr19,gr19
  585. movgs gr6 ,dcr
  586. movgs gr19,bpsr
  587. movgs gr20,tbr
  588. movgs gr21,bpcsr
  589. movgs gr23,isr
  590. movgs gr24,ccr
  591. movgs gr25,cccr
  592. movgs gr26,lr
  593. movgs gr27,lcr
  594. LEDS 0x407f,gr2
  595. #ifdef CONFIG_MMU
  596. ldi @(gr31,#REG_GR(31)),gr2
  597. movgs gr2,scr3
  598. #endif
  599. ldi @(gr31,#REG_GR(30)),gr30
  600. ldi @(gr31,#REG_GR(29)),gr29
  601. lddi @(gr31,#REG_GR(26)),gr26
  602. lddi @(gr31,#REG_GR(24)),gr24
  603. lddi @(gr31,#REG_GR(22)),gr22
  604. lddi @(gr31,#REG_GR(20)),gr20
  605. lddi @(gr31,#REG_GR(18)),gr18
  606. lddi @(gr31,#REG_GR(16)),gr16
  607. lddi @(gr31,#REG_GR(14)),gr14
  608. lddi @(gr31,#REG_GR(12)),gr12
  609. lddi @(gr31,#REG_GR(10)),gr10
  610. lddi @(gr31,#REG_GR(8)) ,gr8
  611. lddi @(gr31,#REG_GR(6)) ,gr6
  612. lddi @(gr31,#REG_GR(4)) ,gr4
  613. lddi @(gr31,#REG_GR(2)) ,gr2
  614. ldi.p @(gr31,#REG_SP) ,sp
  615. xor gr31,gr31,gr31
  616. movgs gr0,brr
  617. #ifdef CONFIG_MMU
  618. movsg scr3,gr31
  619. #endif
  620. rett #1
  621. ###################################################################################################
  622. #
  623. # GDB stub "system calls"
  624. #
  625. ###################################################################################################
  626. #ifdef CONFIG_GDBSTUB
  627. # void gdbstub_console_write(struct console *con, const char *p, unsigned n)
  628. .globl gdbstub_console_write
  629. gdbstub_console_write:
  630. break
  631. bralr
  632. #endif
  633. # GDB stub BUG() trap
  634. # GR8 is the proposed signal number
  635. .globl __debug_bug_trap
  636. __debug_bug_trap:
  637. break
  638. bralr
  639. # transfer kernel exeception to GDB for handling
  640. .globl __break_hijack_kernel_event
  641. __break_hijack_kernel_event:
  642. break
  643. .globl __break_hijack_kernel_event_breaks_here
  644. __break_hijack_kernel_event_breaks_here:
  645. nop
  646. #ifdef CONFIG_MMU
  647. # handle a return from TLB-miss that requires single-step reactivation
  648. .globl __break_tlb_miss_return_break
  649. __break_tlb_miss_return_break:
  650. break
  651. __break_tlb_miss_return_breaks_here:
  652. nop
  653. #endif
  654. # guard the first .text label in the next file from confusion
  655. nop