entry.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843
  1. /*
  2. * linux/arch/sh/entry.S
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2003 - 2006 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. *
  11. */
  12. #include <linux/sys.h>
  13. #include <linux/errno.h>
  14. #include <linux/linkage.h>
  15. #include <asm/asm-offsets.h>
  16. #include <asm/thread_info.h>
  17. #include <asm/cpu/mmu_context.h>
  18. #include <asm/unistd.h>
  19. ! NOTE:
  20. ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
  21. ! to be jumped is too far, but it causes illegal slot exception.
  22. /*
  23. * entry.S contains the system-call and fault low-level handling routines.
  24. * This also contains the timer-interrupt handler, as well as all interrupts
  25. * and faults that can result in a task-switch.
  26. *
  27. * NOTE: This code handles signal-recognition, which happens every time
  28. * after a timer-interrupt and after each system call.
  29. *
  30. * NOTE: This code uses a convention that instructions in the delay slot
  31. * of a transfer-control instruction are indented by an extra space, thus:
  32. *
  33. * jmp @k0 ! control-transfer instruction
  34. * ldc k1, ssr ! delay slot
  35. *
  36. * Stack layout in 'ret_from_syscall':
  37. * ptrace needs to have all regs on the stack.
  38. * if the order here is changed, it needs to be
  39. * updated in ptrace.c and ptrace.h
  40. *
  41. * r0
  42. * ...
  43. * r15 = stack pointer
  44. * spc
  45. * pr
  46. * ssr
  47. * gbr
  48. * mach
  49. * macl
  50. * syscall #
  51. *
  52. */
  53. #if defined(CONFIG_KGDB_NMI)
  54. NMI_VEC = 0x1c0 ! Must catch early for debounce
  55. #endif
  56. /* Offsets to the stack */
  57. OFF_R0 = 0 /* Return value. New ABI also arg4 */
  58. OFF_R1 = 4 /* New ABI: arg5 */
  59. OFF_R2 = 8 /* New ABI: arg6 */
  60. OFF_R3 = 12 /* New ABI: syscall_nr */
  61. OFF_R4 = 16 /* New ABI: arg0 */
  62. OFF_R5 = 20 /* New ABI: arg1 */
  63. OFF_R6 = 24 /* New ABI: arg2 */
  64. OFF_R7 = 28 /* New ABI: arg3 */
  65. OFF_SP = (15*4)
  66. OFF_PC = (16*4)
  67. OFF_SR = (16*4+8)
  68. OFF_TRA = (16*4+6*4)
  69. #define k0 r0
  70. #define k1 r1
  71. #define k2 r2
  72. #define k3 r3
  73. #define k4 r4
  74. #define g_imask r6 /* r6_bank1 */
  75. #define k_g_imask r6_bank /* r6_bank1 */
  76. #define current r7 /* r7_bank1 */
  77. /*
  78. * Kernel mode register usage:
  79. * k0 scratch
  80. * k1 scratch
  81. * k2 scratch (Exception code)
  82. * k3 scratch (Return address)
  83. * k4 scratch
  84. * k5 reserved
  85. * k6 Global Interrupt Mask (0--15 << 4)
  86. * k7 CURRENT_THREAD_INFO (pointer to current thread info)
  87. */
  88. !
  89. ! TLB Miss / Initial Page write exception handling
  90. ! _and_
  91. ! TLB hits, but the access violate the protection.
  92. ! It can be valid access, such as stack grow and/or C-O-W.
  93. !
  94. !
  95. ! Find the pmd/pte entry and loadtlb
  96. ! If it's not found, cause address error (SEGV)
  97. !
  98. ! Although this could be written in assembly language (and it'd be faster),
  99. ! this first version depends *much* on C implementation.
  100. !
  101. #define CLI() \
  102. stc sr, r0; \
  103. or #0xf0, r0; \
  104. ldc r0, sr
  105. #define STI() \
  106. mov.l __INV_IMASK, r11; \
  107. stc sr, r10; \
  108. and r11, r10; \
  109. stc k_g_imask, r11; \
  110. or r11, r10; \
  111. ldc r10, sr
  112. #if defined(CONFIG_PREEMPT)
  113. # define preempt_stop() CLI()
  114. #else
  115. # define preempt_stop()
  116. # define resume_kernel restore_all
  117. #endif
  118. #if defined(CONFIG_MMU)
  119. .align 2
  120. ENTRY(tlb_miss_load)
  121. bra call_dpf
  122. mov #0, r5
  123. .align 2
  124. ENTRY(tlb_miss_store)
  125. bra call_dpf
  126. mov #1, r5
  127. .align 2
  128. ENTRY(initial_page_write)
  129. bra call_dpf
  130. mov #1, r5
  131. .align 2
  132. ENTRY(tlb_protection_violation_load)
  133. bra call_dpf
  134. mov #0, r5
  135. .align 2
  136. ENTRY(tlb_protection_violation_store)
  137. bra call_dpf
  138. mov #1, r5
  139. call_dpf:
  140. mov.l 1f, r0
  141. mov r5, r8
  142. mov.l @r0, r6
  143. mov r6, r9
  144. mov.l 2f, r0
  145. sts pr, r10
  146. jsr @r0
  147. mov r15, r4
  148. !
  149. tst r0, r0
  150. bf/s 0f
  151. lds r10, pr
  152. rts
  153. nop
  154. 0: STI()
  155. mov.l 3f, r0
  156. mov r9, r6
  157. mov r8, r5
  158. jmp @r0
  159. mov r15, r4
  160. .align 2
  161. 1: .long MMU_TEA
  162. 2: .long __do_page_fault
  163. 3: .long do_page_fault
  164. .align 2
  165. ENTRY(address_error_load)
  166. bra call_dae
  167. mov #0,r5 ! writeaccess = 0
  168. .align 2
  169. ENTRY(address_error_store)
  170. bra call_dae
  171. mov #1,r5 ! writeaccess = 1
  172. .align 2
  173. call_dae:
  174. mov.l 1f, r0
  175. mov.l @r0, r6 ! address
  176. mov.l 2f, r0
  177. jmp @r0
  178. mov r15, r4 ! regs
  179. .align 2
  180. 1: .long MMU_TEA
  181. 2: .long do_address_error
  182. #endif /* CONFIG_MMU */
  183. #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
  184. ! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present.
  185. ! If both are configured, handle the debug traps (breakpoints) in SW,
  186. ! but still allow BIOS traps to FW.
  187. .align 2
  188. debug_kernel:
  189. #if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB)
  190. /* Force BIOS call to FW (debug_trap put TRA in r8) */
  191. mov r8,r0
  192. shlr2 r0
  193. cmp/eq #0x3f,r0
  194. bt debug_kernel_fw
  195. #endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */
  196. debug_enter:
  197. #if defined(CONFIG_SH_KGDB)
  198. /* Jump to kgdb, pass stacked regs as arg */
  199. debug_kernel_sw:
  200. mov.l 3f, r0
  201. jmp @r0
  202. mov r15, r4
  203. .align 2
  204. 3: .long kgdb_handle_exception
  205. #endif /* CONFIG_SH_KGDB */
  206. #if defined(CONFIG_SH_STANDARD_BIOS)
  207. /* Unwind the stack and jmp to the debug entry */
  208. debug_kernel_fw:
  209. mov.l @r15+, r0
  210. mov.l @r15+, r1
  211. mov.l @r15+, r2
  212. mov.l @r15+, r3
  213. mov.l @r15+, r4
  214. mov.l @r15+, r5
  215. mov.l @r15+, r6
  216. mov.l @r15+, r7
  217. stc sr, r8
  218. mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
  219. or r9, r8
  220. ldc r8, sr ! here, change the register bank
  221. mov.l @r15+, r8
  222. mov.l @r15+, r9
  223. mov.l @r15+, r10
  224. mov.l @r15+, r11
  225. mov.l @r15+, r12
  226. mov.l @r15+, r13
  227. mov.l @r15+, r14
  228. mov.l @r15+, k0
  229. ldc.l @r15+, spc
  230. lds.l @r15+, pr
  231. mov.l @r15+, k1
  232. ldc.l @r15+, gbr
  233. lds.l @r15+, mach
  234. lds.l @r15+, macl
  235. mov k0, r15
  236. !
  237. mov.l 2f, k0
  238. mov.l @k0, k0
  239. jmp @k0
  240. ldc k1, ssr
  241. .align 2
  242. 1: .long 0x300000f0
  243. 2: .long gdb_vbr_vector
  244. #endif /* CONFIG_SH_STANDARD_BIOS */
  245. #endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */
  246. .align 2
  247. debug_trap:
  248. #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB)
  249. mov #OFF_SR, r0
  250. mov.l @(r0,r15), r0 ! get status register
  251. shll r0
  252. shll r0 ! kernel space?
  253. bt/s debug_kernel
  254. #endif
  255. mov.l @r15, r0 ! Restore R0 value
  256. mov.l 1f, r8
  257. jmp @r8
  258. nop
  259. .align 2
  260. ENTRY(exception_error)
  261. !
  262. STI()
  263. mov.l 2f, r0
  264. jmp @r0
  265. nop
  266. !
  267. .align 2
  268. 1: .long break_point_trap_software
  269. 2: .long do_exception_error
  270. .align 2
  271. ret_from_exception:
  272. preempt_stop()
  273. ENTRY(ret_from_irq)
  274. !
  275. mov #OFF_SR, r0
  276. mov.l @(r0,r15), r0 ! get status register
  277. shll r0
  278. shll r0 ! kernel space?
  279. bt/s resume_kernel ! Yes, it's from kernel, go back soon
  280. GET_THREAD_INFO(r8)
  281. #ifdef CONFIG_PREEMPT
  282. bra resume_userspace
  283. nop
  284. ENTRY(resume_kernel)
  285. mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count
  286. tst r0, r0
  287. bf noresched
  288. need_resched:
  289. mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
  290. tst #_TIF_NEED_RESCHED, r0 ! need_resched set?
  291. bt noresched
  292. mov #OFF_SR, r0
  293. mov.l @(r0,r15), r0 ! get status register
  294. and #0xf0, r0 ! interrupts off (exception path)?
  295. cmp/eq #0xf0, r0
  296. bt noresched
  297. mov.l 1f, r0
  298. mov.l r0, @(TI_PRE_COUNT,r8)
  299. STI()
  300. mov.l 2f, r0
  301. jsr @r0
  302. nop
  303. mov #0, r0
  304. mov.l r0, @(TI_PRE_COUNT,r8)
  305. CLI()
  306. bra need_resched
  307. nop
  308. noresched:
  309. bra restore_all
  310. nop
  311. .align 2
  312. 1: .long PREEMPT_ACTIVE
  313. 2: .long schedule
  314. #endif
  315. ENTRY(resume_userspace)
  316. ! r8: current_thread_info
  317. CLI()
  318. mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
  319. tst #_TIF_WORK_MASK, r0
  320. bt/s restore_all
  321. tst #_TIF_NEED_RESCHED, r0
  322. .align 2
  323. work_pending:
  324. ! r0: current_thread_info->flags
  325. ! r8: current_thread_info
  326. ! t: result of "tst #_TIF_NEED_RESCHED, r0"
  327. bf/s work_resched
  328. tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0
  329. work_notifysig:
  330. bt/s restore_all
  331. mov r15, r4
  332. mov r12, r5 ! set arg1(save_r0)
  333. mov r0, r6
  334. mov.l 2f, r1
  335. mova restore_all, r0
  336. jmp @r1
  337. lds r0, pr
  338. work_resched:
  339. #ifndef CONFIG_PREEMPT
  340. ! gUSA handling
  341. mov.l @(OFF_SP,r15), r0 ! get user space stack pointer
  342. mov r0, r1
  343. shll r0
  344. bf/s 1f
  345. shll r0
  346. bf/s 1f
  347. mov #OFF_PC, r0
  348. ! SP >= 0xc0000000 : gUSA mark
  349. mov.l @(r0,r15), r2 ! get user space PC (program counter)
  350. mov.l @(OFF_R0,r15), r3 ! end point
  351. cmp/hs r3, r2 ! r2 >= r3?
  352. bt 1f
  353. add r3, r1 ! rewind point #2
  354. mov.l r1, @(r0,r15) ! reset PC to rewind point #2
  355. !
  356. 1:
  357. #endif
  358. mov.l 1f, r1
  359. jsr @r1 ! schedule
  360. nop
  361. CLI()
  362. !
  363. mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
  364. tst #_TIF_WORK_MASK, r0
  365. bt restore_all
  366. bra work_pending
  367. tst #_TIF_NEED_RESCHED, r0
  368. .align 2
  369. 1: .long schedule
  370. 2: .long do_notify_resume
  371. .align 2
  372. syscall_exit_work:
  373. ! r0: current_thread_info->flags
  374. ! r8: current_thread_info
  375. tst #_TIF_SYSCALL_TRACE, r0
  376. bt/s work_pending
  377. tst #_TIF_NEED_RESCHED, r0
  378. STI()
  379. ! XXX setup arguments...
  380. mov.l 4f, r0 ! do_syscall_trace
  381. jsr @r0
  382. nop
  383. bra resume_userspace
  384. nop
  385. .align 2
  386. syscall_trace_entry:
  387. ! Yes it is traced.
  388. ! XXX setup arguments...
  389. mov.l 4f, r11 ! Call do_syscall_trace which notifies
  390. jsr @r11 ! superior (will chomp R[0-7])
  391. nop
  392. ! Reload R0-R4 from kernel stack, where the
  393. ! parent may have modified them using
  394. ! ptrace(POKEUSR). (Note that R0-R2 are
  395. ! used by the system call handler directly
  396. ! from the kernel stack anyway, so don't need
  397. ! to be reloaded here.) This allows the parent
  398. ! to rewrite system calls and args on the fly.
  399. mov.l @(OFF_R4,r15), r4 ! arg0
  400. mov.l @(OFF_R5,r15), r5
  401. mov.l @(OFF_R6,r15), r6
  402. mov.l @(OFF_R7,r15), r7 ! arg3
  403. mov.l @(OFF_R3,r15), r3 ! syscall_nr
  404. ! Arrange for do_syscall_trace to be called
  405. ! again as the system call returns.
  406. mov.l 2f, r10 ! Number of syscalls
  407. cmp/hs r10, r3
  408. bf syscall_call
  409. mov #-ENOSYS, r0
  410. bra syscall_exit
  411. mov.l r0, @(OFF_R0,r15) ! Return value
  412. /*
  413. * Syscall interface:
  414. *
  415. * Syscall #: R3
  416. * Arguments #0 to #3: R4--R7
  417. * Arguments #4 to #6: R0, R1, R2
  418. * TRA: (number of arguments + 0x10) x 4
  419. *
  420. * This code also handles delegating other traps to the BIOS/gdb stub
  421. * according to:
  422. *
  423. * Trap number
  424. * (TRA>>2) Purpose
  425. * -------- -------
  426. * 0x0-0xf old syscall ABI
  427. * 0x10-0x1f new syscall ABI
  428. * 0x20-0xff delegated through debug_trap to BIOS/gdb stub.
  429. *
  430. * Note: When we're first called, the TRA value must be shifted
  431. * right 2 bits in order to get the value that was used as the "trapa"
  432. * argument.
  433. */
  434. .align 2
  435. .globl ret_from_fork
  436. ret_from_fork:
  437. mov.l 1f, r8
  438. jsr @r8
  439. mov r0, r4
  440. bra syscall_exit
  441. nop
  442. .align 2
  443. 1: .long schedule_tail
  444. !
  445. ENTRY(system_call)
  446. mov.l 1f, r9
  447. mov.l @r9, r8 ! Read from TRA (Trap Address) Register
  448. !
  449. ! Is the trap argument >= 0x20? (TRA will be >= 0x80)
  450. mov #0x7f, r9
  451. cmp/hi r9, r8
  452. bt/s 0f
  453. mov #OFF_TRA, r9
  454. add r15, r9
  455. !
  456. mov.l r8, @r9 ! set TRA value to tra
  457. STI()
  458. ! Call the system call handler through the table.
  459. ! First check for bad syscall number
  460. mov r3, r9
  461. mov.l 2f, r8 ! Number of syscalls
  462. cmp/hs r8, r9
  463. bf/s good_system_call
  464. GET_THREAD_INFO(r8)
  465. syscall_badsys: ! Bad syscall number
  466. mov #-ENOSYS, r0
  467. bra resume_userspace
  468. mov.l r0, @(OFF_R0,r15) ! Return value
  469. !
  470. 0:
  471. bra debug_trap
  472. nop
  473. !
  474. good_system_call: ! Good syscall number
  475. mov.l @(TI_FLAGS,r8), r8
  476. mov #_TIF_SYSCALL_TRACE, r10
  477. tst r10, r8
  478. bf syscall_trace_entry
  479. !
  480. syscall_call:
  481. shll2 r9 ! x4
  482. mov.l 3f, r8 ! Load the address of sys_call_table
  483. add r8, r9
  484. mov.l @r9, r8
  485. jsr @r8 ! jump to specific syscall handler
  486. nop
  487. mov.l @(OFF_R0,r15), r12 ! save r0
  488. mov.l r0, @(OFF_R0,r15) ! save the return value
  489. !
  490. syscall_exit:
  491. CLI()
  492. !
  493. GET_THREAD_INFO(r8)
  494. mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
  495. tst #_TIF_ALLWORK_MASK, r0
  496. bf syscall_exit_work
  497. restore_all:
  498. mov.l @r15+, r0
  499. mov.l @r15+, r1
  500. mov.l @r15+, r2
  501. mov.l @r15+, r3
  502. mov.l @r15+, r4
  503. mov.l @r15+, r5
  504. mov.l @r15+, r6
  505. mov.l @r15+, r7
  506. !
  507. stc sr, r8
  508. mov.l 7f, r9
  509. or r9, r8 ! BL =1, RB=1
  510. ldc r8, sr ! here, change the register bank
  511. !
  512. mov.l @r15+, r8
  513. mov.l @r15+, r9
  514. mov.l @r15+, r10
  515. mov.l @r15+, r11
  516. mov.l @r15+, r12
  517. mov.l @r15+, r13
  518. mov.l @r15+, r14
  519. mov.l @r15+, k4 ! original stack pointer
  520. ldc.l @r15+, spc
  521. lds.l @r15+, pr
  522. mov.l @r15+, k3 ! original SR
  523. ldc.l @r15+, gbr
  524. lds.l @r15+, mach
  525. lds.l @r15+, macl
  526. add #4, r15 ! Skip syscall number
  527. !
  528. #ifdef CONFIG_SH_DSP
  529. mov.l @r15+, k0 ! DSP mode marker
  530. mov.l 5f, k1
  531. cmp/eq k0, k1 ! Do we have a DSP stack frame?
  532. bf skip_restore
  533. stc sr, k0 ! Enable CPU DSP mode
  534. or k1, k0 ! (within kernel it may be disabled)
  535. ldc k0, sr
  536. mov r2, k0 ! Backup r2
  537. ! Restore DSP registers from stack
  538. mov r15, r2
  539. movs.l @r2+, a1
  540. movs.l @r2+, a0g
  541. movs.l @r2+, a1g
  542. movs.l @r2+, m0
  543. movs.l @r2+, m1
  544. mov r2, r15
  545. lds.l @r15+, a0
  546. lds.l @r15+, x0
  547. lds.l @r15+, x1
  548. lds.l @r15+, y0
  549. lds.l @r15+, y1
  550. lds.l @r15+, dsr
  551. ldc.l @r15+, rs
  552. ldc.l @r15+, re
  553. ldc.l @r15+, mod
  554. mov k0, r2 ! Restore r2
  555. skip_restore:
  556. #endif
  557. !
  558. ! Calculate new SR value
  559. mov k3, k2 ! original SR value
  560. mov.l 9f, k1
  561. and k1, k2 ! Mask orignal SR value
  562. !
  563. mov k3, k0 ! Calculate IMASK-bits
  564. shlr2 k0
  565. and #0x3c, k0
  566. cmp/eq #0x3c, k0
  567. bt/s 6f
  568. shll2 k0
  569. mov g_imask, k0
  570. !
  571. 6: or k0, k2 ! Set the IMASK-bits
  572. ldc k2, ssr
  573. !
  574. #if defined(CONFIG_KGDB_NMI)
  575. ! Clear in_nmi
  576. mov.l 6f, k0
  577. mov #0, k1
  578. mov.b k1, @k0
  579. #endif
  580. mov.l @r15+, k2 ! restore EXPEVT
  581. mov k4, r15
  582. rte
  583. nop
  584. .align 2
  585. 1: .long TRA
  586. 2: .long NR_syscalls
  587. 3: .long sys_call_table
  588. 4: .long do_syscall_trace
  589. 5: .long 0x00001000 ! DSP
  590. 7: .long 0x30000000
  591. 9:
  592. __INV_IMASK:
  593. .long 0xffffff0f ! ~(IMASK)
  594. ! Exception Vector Base
  595. !
  596. ! Should be aligned page boundary.
  597. !
  598. .balign 4096,0,4096
  599. ENTRY(vbr_base)
  600. .long 0
  601. !
  602. .balign 256,0,256
  603. general_exception:
  604. mov.l 1f, k2
  605. mov.l 2f, k3
  606. bra handle_exception
  607. mov.l @k2, k2
  608. .align 2
  609. 1: .long EXPEVT
  610. 2: .long ret_from_exception
  611. !
  612. !
  613. .balign 1024,0,1024
  614. tlb_miss:
  615. mov.l 1f, k2
  616. mov.l 4f, k3
  617. bra handle_exception
  618. mov.l @k2, k2
  619. !
  620. .balign 512,0,512
  621. interrupt:
  622. mov.l 2f, k2
  623. mov.l 3f, k3
  624. #if defined(CONFIG_KGDB_NMI)
  625. ! Debounce (filter nested NMI)
  626. mov.l @k2, k0
  627. mov.l 5f, k1
  628. cmp/eq k1, k0
  629. bf 0f
  630. mov.l 6f, k1
  631. tas.b @k1
  632. bt 0f
  633. rte
  634. nop
  635. .align 2
  636. 5: .long NMI_VEC
  637. 6: .long in_nmi
  638. 0:
  639. #endif /* defined(CONFIG_KGDB_NMI) */
  640. bra handle_exception
  641. mov #-1, k2 ! interrupt exception marker
  642. .align 2
  643. 1: .long EXPEVT
  644. 2: .long INTEVT
  645. 3: .long ret_from_irq
  646. 4: .long ret_from_exception
  647. !
  648. !
  649. .align 2
  650. ENTRY(handle_exception)
  651. ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
  652. ! save all registers onto stack.
  653. !
  654. stc ssr, k0 ! Is it from kernel space?
  655. shll k0 ! Check MD bit (bit30) by shifting it into...
  656. shll k0 ! ...the T bit
  657. bt/s 1f ! It's a kernel to kernel transition.
  658. mov r15, k0 ! save original stack to k0
  659. /* User space to kernel */
  660. mov #(THREAD_SIZE >> 8), k1
  661. shll8 k1 ! k1 := THREAD_SIZE
  662. add current, k1
  663. mov k1, r15 ! change to kernel stack
  664. !
  665. 1: mov.l 2f, k1
  666. !
  667. #ifdef CONFIG_SH_DSP
  668. mov.l r2, @-r15 ! Save r2, we need another reg
  669. stc sr, k4
  670. mov.l 1f, r2
  671. tst r2, k4 ! Check if in DSP mode
  672. mov.l @r15+, r2 ! Restore r2 now
  673. bt/s skip_save
  674. mov #0, k4 ! Set marker for no stack frame
  675. mov r2, k4 ! Backup r2 (in k4) for later
  676. ! Save DSP registers on stack
  677. stc.l mod, @-r15
  678. stc.l re, @-r15
  679. stc.l rs, @-r15
  680. sts.l dsr, @-r15
  681. sts.l y1, @-r15
  682. sts.l y0, @-r15
  683. sts.l x1, @-r15
  684. sts.l x0, @-r15
  685. sts.l a0, @-r15
  686. ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
  687. ! FIXME: Make sure that this is still the case with newer toolchains,
  688. ! as we're not at all interested in supporting ancient toolchains at
  689. ! this point. -- PFM.
  690. mov r15, r2
  691. .word 0xf653 ! movs.l a1, @-r2
  692. .word 0xf6f3 ! movs.l a0g, @-r2
  693. .word 0xf6d3 ! movs.l a1g, @-r2
  694. .word 0xf6c3 ! movs.l m0, @-r2
  695. .word 0xf6e3 ! movs.l m1, @-r2
  696. mov r2, r15
  697. mov k4, r2 ! Restore r2
  698. mov.l 1f, k4 ! Force DSP stack frame
  699. skip_save:
  700. mov.l k4, @-r15 ! Push DSP mode marker onto stack
  701. #endif
  702. ! Save the user registers on the stack.
  703. mov.l k2, @-r15 ! EXPEVT
  704. mov #-1, k4
  705. mov.l k4, @-r15 ! set TRA (default: -1)
  706. !
  707. sts.l macl, @-r15
  708. sts.l mach, @-r15
  709. stc.l gbr, @-r15
  710. stc.l ssr, @-r15
  711. sts.l pr, @-r15
  712. stc.l spc, @-r15
  713. !
  714. lds k3, pr ! Set the return address to pr
  715. !
  716. mov.l k0, @-r15 ! save orignal stack
  717. mov.l r14, @-r15
  718. mov.l r13, @-r15
  719. mov.l r12, @-r15
  720. mov.l r11, @-r15
  721. mov.l r10, @-r15
  722. mov.l r9, @-r15
  723. mov.l r8, @-r15
  724. !
  725. stc sr, r8 ! Back to normal register bank, and
  726. or k1, r8 ! Block all interrupts
  727. mov.l 3f, k1
  728. and k1, r8 ! ...
  729. ldc r8, sr ! ...changed here.
  730. !
  731. mov.l r7, @-r15
  732. mov.l r6, @-r15
  733. mov.l r5, @-r15
  734. mov.l r4, @-r15
  735. mov.l r3, @-r15
  736. mov.l r2, @-r15
  737. mov.l r1, @-r15
  738. mov.l r0, @-r15
  739. /*
  740. * This gets a bit tricky.. in the INTEVT case we don't want to use
  741. * the VBR offset as a destination in the jump call table, since all
  742. * of the destinations are the same. In this case, (interrupt) sets
  743. * a marker in r2 (now r2_bank since SR.RB changed), which we check
  744. * to determine the exception type. For all other exceptions, we
  745. * forcibly read EXPEVT from memory and fix up the jump address, in
  746. * the interrupt exception case we jump to do_IRQ() and defer the
  747. * INTEVT read until there. As a bonus, we can also clean up the SR.RB
  748. * checks that do_IRQ() was doing..
  749. */
  750. stc r2_bank, r8
  751. cmp/pz r8
  752. bf interrupt_exception
  753. shlr2 r8
  754. shlr r8
  755. mov.l 4f, r9
  756. add r8, r9
  757. mov.l @r9, r9
  758. jmp @r9
  759. nop
  760. rts
  761. nop
  762. .align 2
  763. 1: .long 0x00001000 ! DSP=1
  764. 2: .long 0x000080f0 ! FD=1, IMASK=15
  765. 3: .long 0xcfffffff ! RB=0, BL=0
  766. 4: .long exception_handling_table
  767. interrupt_exception:
  768. mov.l 1f, r9
  769. jmp @r9
  770. nop
  771. rts
  772. nop
  773. .align 2
  774. 1: .long do_IRQ
  775. .align 2
  776. ENTRY(exception_none)
  777. rts
  778. nop