entry.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*
  2. * arch/sh/kernel/entry.S
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2003 - 2006 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/errno.h>
  13. #include <linux/linkage.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/thread_info.h>
  16. #include <asm/unistd.h>
  17. #include <asm/cpu/mmu_context.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/page.h>
  20. ! NOTE:
  21. ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
  22. ! to be jumped is too far, but it causes illegal slot exception.
  23. /*
  24. * entry.S contains the system-call and fault low-level handling routines.
  25. * This also contains the timer-interrupt handler, as well as all interrupts
  26. * and faults that can result in a task-switch.
  27. *
  28. * NOTE: This code handles signal-recognition, which happens every time
  29. * after a timer-interrupt and after each system call.
  30. *
  31. * NOTE: This code uses a convention that instructions in the delay slot
  32. * of a transfer-control instruction are indented by an extra space, thus:
  33. *
  34. * jmp @k0 ! control-transfer instruction
  35. * ldc k1, ssr ! delay slot
  36. *
  37. * Stack layout in 'ret_from_syscall':
  38. * ptrace needs to have all regs on the stack.
  39. * if the order here is changed, it needs to be
  40. * updated in ptrace.c and ptrace.h
  41. *
  42. * r0
  43. * ...
  44. * r15 = stack pointer
  45. * spc
  46. * pr
  47. * ssr
  48. * gbr
  49. * mach
  50. * macl
  51. * syscall #
  52. *
  53. */
  54. #if defined(CONFIG_KGDB_NMI)
  55. NMI_VEC = 0x1c0 ! Must catch early for debounce
  56. #endif
  57. /* Offsets to the stack */
  58. OFF_R0 = 0 /* Return value. New ABI also arg4 */
  59. OFF_R1 = 4 /* New ABI: arg5 */
  60. OFF_R2 = 8 /* New ABI: arg6 */
  61. OFF_R3 = 12 /* New ABI: syscall_nr */
  62. OFF_R4 = 16 /* New ABI: arg0 */
  63. OFF_R5 = 20 /* New ABI: arg1 */
  64. OFF_R6 = 24 /* New ABI: arg2 */
  65. OFF_R7 = 28 /* New ABI: arg3 */
  66. OFF_SP = (15*4)
  67. OFF_PC = (16*4)
  68. OFF_SR = (16*4+8)
  69. OFF_TRA = (16*4+6*4)
  70. #define k0 r0
  71. #define k1 r1
  72. #define k2 r2
  73. #define k3 r3
  74. #define k4 r4
  75. #define g_imask r6 /* r6_bank1 */
  76. #define k_g_imask r6_bank /* r6_bank1 */
  77. #define current r7 /* r7_bank1 */
  78. #include <asm/entry-macros.S>
  79. /*
  80. * Kernel mode register usage:
  81. * k0 scratch
  82. * k1 scratch
  83. * k2 scratch (Exception code)
  84. * k3 scratch (Return address)
  85. * k4 scratch
  86. * k5 reserved
  87. * k6 Global Interrupt Mask (0--15 << 4)
  88. * k7 CURRENT_THREAD_INFO (pointer to current thread info)
  89. */
  90. !
  91. ! TLB Miss / Initial Page write exception handling
  92. ! _and_
  93. ! TLB hits, but the access violate the protection.
  94. ! It can be valid access, such as stack grow and/or C-O-W.
  95. !
  96. !
  97. ! Find the pmd/pte entry and loadtlb
  98. ! If it's not found, cause address error (SEGV)
  99. !
  100. ! Although this could be written in assembly language (and it'd be faster),
  101. ! this first version depends *much* on C implementation.
  102. !
  103. #if defined(CONFIG_MMU)
  104. .align 2
  105. ENTRY(tlb_miss_load)
  106. bra call_dpf
  107. mov #0, r5
  108. .align 2
  109. ENTRY(tlb_miss_store)
  110. bra call_dpf
  111. mov #1, r5
  112. .align 2
  113. ENTRY(initial_page_write)
  114. bra call_dpf
  115. mov #1, r5
  116. .align 2
  117. ENTRY(tlb_protection_violation_load)
  118. bra call_dpf
  119. mov #0, r5
  120. .align 2
  121. ENTRY(tlb_protection_violation_store)
  122. bra call_dpf
  123. mov #1, r5
  124. call_dpf:
  125. mov.l 1f, r0
  126. mov.l @r0, r6 ! address
  127. mov.l 3f, r0
  128. jmp @r0
  129. mov r15, r4 ! regs
  130. .align 2
  131. 1: .long MMU_TEA
  132. 3: .long do_page_fault
  133. .align 2
  134. ENTRY(address_error_load)
  135. bra call_dae
  136. mov #0,r5 ! writeaccess = 0
  137. .align 2
  138. ENTRY(address_error_store)
  139. bra call_dae
  140. mov #1,r5 ! writeaccess = 1
  141. .align 2
  142. call_dae:
  143. mov.l 1f, r0
  144. mov.l @r0, r6 ! address
  145. mov.l 2f, r0
  146. jmp @r0
  147. mov r15, r4 ! regs
  148. .align 2
  149. 1: .long MMU_TEA
  150. 2: .long do_address_error
  151. #endif /* CONFIG_MMU */
  152. #if defined(CONFIG_SH_STANDARD_BIOS)
  153. /* Unwind the stack and jmp to the debug entry */
  154. debug_kernel_fw:
  155. mov.l @r15+, r0
  156. mov.l @r15+, r1
  157. mov.l @r15+, r2
  158. mov.l @r15+, r3
  159. mov.l @r15+, r4
  160. mov.l @r15+, r5
  161. mov.l @r15+, r6
  162. mov.l @r15+, r7
  163. stc sr, r8
  164. mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
  165. or r9, r8
  166. ldc r8, sr ! here, change the register bank
  167. mov.l @r15+, r8
  168. mov.l @r15+, r9
  169. mov.l @r15+, r10
  170. mov.l @r15+, r11
  171. mov.l @r15+, r12
  172. mov.l @r15+, r13
  173. mov.l @r15+, r14
  174. mov.l @r15+, k0
  175. ldc.l @r15+, spc
  176. lds.l @r15+, pr
  177. mov.l @r15+, k1
  178. ldc.l @r15+, gbr
  179. lds.l @r15+, mach
  180. lds.l @r15+, macl
  181. mov k0, r15
  182. !
  183. mov.l 2f, k0
  184. mov.l @k0, k0
  185. jmp @k0
  186. ldc k1, ssr
  187. .align 2
  188. 1: .long 0x300000f0
  189. 2: .long gdb_vbr_vector
  190. #endif /* CONFIG_SH_STANDARD_BIOS */
  191. restore_all:
  192. mov.l @r15+, r0
  193. mov.l @r15+, r1
  194. mov.l @r15+, r2
  195. mov.l @r15+, r3
  196. mov.l @r15+, r4
  197. mov.l @r15+, r5
  198. mov.l @r15+, r6
  199. mov.l @r15+, r7
  200. !
  201. stc sr, r8
  202. mov.l 7f, r9
  203. or r9, r8 ! BL =1, RB=1
  204. ldc r8, sr ! here, change the register bank
  205. !
  206. mov.l @r15+, r8
  207. mov.l @r15+, r9
  208. mov.l @r15+, r10
  209. mov.l @r15+, r11
  210. mov.l @r15+, r12
  211. mov.l @r15+, r13
  212. mov.l @r15+, r14
  213. mov.l @r15+, k4 ! original stack pointer
  214. ldc.l @r15+, spc
  215. lds.l @r15+, pr
  216. mov.l @r15+, k3 ! original SR
  217. ldc.l @r15+, gbr
  218. lds.l @r15+, mach
  219. lds.l @r15+, macl
  220. add #4, r15 ! Skip syscall number
  221. !
  222. #ifdef CONFIG_SH_DSP
  223. mov.l @r15+, k0 ! DSP mode marker
  224. mov.l 5f, k1
  225. cmp/eq k0, k1 ! Do we have a DSP stack frame?
  226. bf skip_restore
  227. stc sr, k0 ! Enable CPU DSP mode
  228. or k1, k0 ! (within kernel it may be disabled)
  229. ldc k0, sr
  230. mov r2, k0 ! Backup r2
  231. ! Restore DSP registers from stack
  232. mov r15, r2
  233. movs.l @r2+, a1
  234. movs.l @r2+, a0g
  235. movs.l @r2+, a1g
  236. movs.l @r2+, m0
  237. movs.l @r2+, m1
  238. mov r2, r15
  239. lds.l @r15+, a0
  240. lds.l @r15+, x0
  241. lds.l @r15+, x1
  242. lds.l @r15+, y0
  243. lds.l @r15+, y1
  244. lds.l @r15+, dsr
  245. ldc.l @r15+, rs
  246. ldc.l @r15+, re
  247. ldc.l @r15+, mod
  248. mov k0, r2 ! Restore r2
  249. skip_restore:
  250. #endif
  251. !
  252. ! Calculate new SR value
  253. mov k3, k2 ! original SR value
  254. mov #0xf0, k1
  255. extu.b k1, k1
  256. not k1, k1
  257. and k1, k2 ! Mask orignal SR value
  258. !
  259. mov k3, k0 ! Calculate IMASK-bits
  260. shlr2 k0
  261. and #0x3c, k0
  262. cmp/eq #0x3c, k0
  263. bt/s 6f
  264. shll2 k0
  265. mov g_imask, k0
  266. !
  267. 6: or k0, k2 ! Set the IMASK-bits
  268. ldc k2, ssr
  269. !
  270. #if defined(CONFIG_KGDB_NMI)
  271. ! Clear in_nmi
  272. mov.l 6f, k0
  273. mov #0, k1
  274. mov.b k1, @k0
  275. #endif
  276. mov.l @r15+, k2 ! restore EXPEVT
  277. mov k4, r15
  278. rte
  279. nop
  280. .align 2
  281. 5: .long 0x00001000 ! DSP
  282. 7: .long 0x30000000
  283. ! common exception handler
  284. #include "../../entry-common.S"
  285. ! Exception Vector Base
  286. !
  287. ! Should be aligned page boundary.
  288. !
  289. .balign 4096,0,4096
  290. ENTRY(vbr_base)
  291. .long 0
  292. !
  293. .balign 256,0,256
  294. general_exception:
  295. mov.l 1f, k2
  296. mov.l 2f, k3
  297. bra handle_exception
  298. mov.l @k2, k2
  299. .align 2
  300. 1: .long EXPEVT
  301. 2: .long ret_from_exception
  302. !
  303. !
  304. /* This code makes some assumptions to improve performance.
  305. * Make sure they are stil true. */
  306. #if PTRS_PER_PGD != PTRS_PER_PTE
  307. #error PGD and PTE sizes don't match
  308. #endif
  309. /* gas doesn't flag impossible values for mov #immediate as an error */
  310. #if (_PAGE_PRESENT >> 2) > 0x7f
  311. #error cannot load PAGE_PRESENT as an immediate
  312. #endif
  313. #if _PAGE_DIRTY > 0x7f
  314. #error cannot load PAGE_DIRTY as an immediate
  315. #endif
  316. #if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
  317. #error cannot derive PAGE_ACCESSED from PAGE_PRESENT
  318. #endif
  319. #if defined(CONFIG_CPU_SH4)
  320. #define ldmmupteh(r) mov.l 8f, r
  321. #else
  322. #define ldmmupteh(r) mov #MMU_PTEH, r
  323. #endif
  324. .balign 1024,0,1024
  325. tlb_miss:
  326. #ifdef COUNT_EXCEPTIONS
  327. ! Increment the counts
  328. mov.l 9f, k1
  329. mov.l @k1, k2
  330. add #1, k2
  331. mov.l k2, @k1
  332. #endif
  333. ! k0 scratch
  334. ! k1 pgd and pte pointers
  335. ! k2 faulting address
  336. ! k3 pgd and pte index masks
  337. ! k4 shift
  338. ! Load up the pgd entry (k1)
  339. ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
  340. mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
  341. mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
  342. mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
  343. mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
  344. mov k2, k0 ! 5 MT (latency=0)
  345. shld k4, k0 ! 99 EX
  346. and k3, k0 ! 78 EX
  347. mov.l @(k0, k1), k1 ! 21 LS (latency=2)
  348. mov #-(PAGE_SHIFT-2), k4 ! 6 EX
  349. ! Load up the pte entry (k2)
  350. mov k2, k0 ! 5 MT (latency=0)
  351. shld k4, k0 ! 99 EX
  352. tst k1, k1 ! 86 MT
  353. bt 20f ! 110 BR
  354. and k3, k0 ! 78 EX
  355. mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
  356. mov.l @(k0, k1), k2 ! 21 LS (latency=2)
  357. add k0, k1 ! 49 EX
  358. #ifdef CONFIG_CPU_HAS_PTEA
  359. ! Test the entry for present and _PAGE_ACCESSED
  360. mov #-28, k3 ! 6 EX
  361. mov k2, k0 ! 5 MT (latency=0)
  362. tst k4, k2 ! 68 MT
  363. shld k3, k0 ! 99 EX
  364. bt 20f ! 110 BR
  365. ! Set PTEA register
  366. ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
  367. !
  368. ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
  369. and #0xe, k0 ! 79 EX
  370. mov k0, k3 ! 5 MT (latency=0)
  371. mov k2, k0 ! 5 MT (latency=0)
  372. and #1, k0 ! 79 EX
  373. or k0, k3 ! 82 EX
  374. ldmmupteh(k0) ! 9 LS (latency=2)
  375. shll2 k4 ! 101 EX _PAGE_ACCESSED
  376. tst k4, k2 ! 68 MT
  377. mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
  378. mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
  379. ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
  380. #else
  381. ! Test the entry for present and _PAGE_ACCESSED
  382. mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
  383. tst k4, k2 ! 68 MT
  384. shll2 k4 ! 101 EX _PAGE_ACCESSED
  385. ldmmupteh(k0) ! 9 LS (latency=2)
  386. bt 20f ! 110 BR
  387. tst k4, k2 ! 68 MT
  388. ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
  389. #endif
  390. ! Set up the entry
  391. and k2, k3 ! 78 EX
  392. bt/s 10f ! 108 BR
  393. mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
  394. ldtlb ! 128 CO
  395. ! At least one instruction between ldtlb and rte
  396. nop ! 119 NOP
  397. rte ! 126 CO
  398. nop ! 119 NOP
  399. 10: or k4, k2 ! 82 EX
  400. ldtlb ! 128 CO
  401. ! At least one instruction between ldtlb and rte
  402. mov.l k2, @k1 ! 27 LS
  403. rte ! 126 CO
  404. ! Note we cannot execute mov here, because it is executed after
  405. ! restoring SSR, so would be executed in user space.
  406. nop ! 119 NOP
  407. .align 5
  408. ! Once cache line if possible...
  409. 1: .long swapper_pg_dir
  410. 4: .short (PTRS_PER_PGD-1) << 2
  411. 5: .short _PAGE_PRESENT
  412. 7: .long _PAGE_FLAGS_HARDWARE_MASK
  413. 8: .long MMU_PTEH
  414. #ifdef COUNT_EXCEPTIONS
  415. 9: .long exception_count_miss
  416. #endif
  417. ! Either pgd or pte not present
  418. 20: mov.l 1f, k2
  419. mov.l 4f, k3
  420. bra handle_exception
  421. mov.l @k2, k2
  422. !
  423. .balign 512,0,512
  424. interrupt:
  425. mov.l 2f, k2
  426. mov.l 3f, k3
  427. #if defined(CONFIG_KGDB_NMI)
  428. ! Debounce (filter nested NMI)
  429. mov.l @k2, k0
  430. mov.l 5f, k1
  431. cmp/eq k1, k0
  432. bf 0f
  433. mov.l 6f, k1
  434. tas.b @k1
  435. bt 0f
  436. rte
  437. nop
  438. .align 2
  439. 5: .long NMI_VEC
  440. 6: .long in_nmi
  441. 0:
  442. #endif /* defined(CONFIG_KGDB_NMI) */
  443. bra handle_exception
  444. mov #-1, k2 ! interrupt exception marker
  445. .align 2
  446. 1: .long EXPEVT
  447. 2: .long INTEVT
  448. 3: .long ret_from_irq
  449. 4: .long ret_from_exception
  450. !
  451. !
  452. .align 2
  453. ENTRY(handle_exception)
  454. ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
  455. ! save all registers onto stack.
  456. !
  457. stc ssr, k0 ! Is it from kernel space?
  458. shll k0 ! Check MD bit (bit30) by shifting it into...
  459. shll k0 ! ...the T bit
  460. bt/s 1f ! It's a kernel to kernel transition.
  461. mov r15, k0 ! save original stack to k0
  462. /* User space to kernel */
  463. mov #(THREAD_SIZE >> 10), k1
  464. shll8 k1 ! k1 := THREAD_SIZE
  465. shll2 k1
  466. add current, k1
  467. mov k1, r15 ! change to kernel stack
  468. !
  469. 1: mov.l 2f, k1
  470. !
  471. #ifdef CONFIG_SH_DSP
  472. mov.l r2, @-r15 ! Save r2, we need another reg
  473. stc sr, k4
  474. mov.l 1f, r2
  475. tst r2, k4 ! Check if in DSP mode
  476. mov.l @r15+, r2 ! Restore r2 now
  477. bt/s skip_save
  478. mov #0, k4 ! Set marker for no stack frame
  479. mov r2, k4 ! Backup r2 (in k4) for later
  480. ! Save DSP registers on stack
  481. stc.l mod, @-r15
  482. stc.l re, @-r15
  483. stc.l rs, @-r15
  484. sts.l dsr, @-r15
  485. sts.l y1, @-r15
  486. sts.l y0, @-r15
  487. sts.l x1, @-r15
  488. sts.l x0, @-r15
  489. sts.l a0, @-r15
  490. ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
  491. ! FIXME: Make sure that this is still the case with newer toolchains,
  492. ! as we're not at all interested in supporting ancient toolchains at
  493. ! this point. -- PFM.
  494. mov r15, r2
  495. .word 0xf653 ! movs.l a1, @-r2
  496. .word 0xf6f3 ! movs.l a0g, @-r2
  497. .word 0xf6d3 ! movs.l a1g, @-r2
  498. .word 0xf6c3 ! movs.l m0, @-r2
  499. .word 0xf6e3 ! movs.l m1, @-r2
  500. mov r2, r15
  501. mov k4, r2 ! Restore r2
  502. mov.l 1f, k4 ! Force DSP stack frame
  503. skip_save:
  504. mov.l k4, @-r15 ! Push DSP mode marker onto stack
  505. #endif
  506. ! Save the user registers on the stack.
  507. mov.l k2, @-r15 ! EXPEVT
  508. mov #-1, k4
  509. mov.l k4, @-r15 ! set TRA (default: -1)
  510. !
  511. sts.l macl, @-r15
  512. sts.l mach, @-r15
  513. stc.l gbr, @-r15
  514. stc.l ssr, @-r15
  515. sts.l pr, @-r15
  516. stc.l spc, @-r15
  517. !
  518. lds k3, pr ! Set the return address to pr
  519. !
  520. mov.l k0, @-r15 ! save orignal stack
  521. mov.l r14, @-r15
  522. mov.l r13, @-r15
  523. mov.l r12, @-r15
  524. mov.l r11, @-r15
  525. mov.l r10, @-r15
  526. mov.l r9, @-r15
  527. mov.l r8, @-r15
  528. !
  529. stc sr, r8 ! Back to normal register bank, and
  530. or k1, r8 ! Block all interrupts
  531. mov.l 3f, k1
  532. and k1, r8 ! ...
  533. ldc r8, sr ! ...changed here.
  534. !
  535. mov.l r7, @-r15
  536. mov.l r6, @-r15
  537. mov.l r5, @-r15
  538. mov.l r4, @-r15
  539. mov.l r3, @-r15
  540. mov.l r2, @-r15
  541. mov.l r1, @-r15
  542. mov.l r0, @-r15
  543. /*
  544. * This gets a bit tricky.. in the INTEVT case we don't want to use
  545. * the VBR offset as a destination in the jump call table, since all
  546. * of the destinations are the same. In this case, (interrupt) sets
  547. * a marker in r2 (now r2_bank since SR.RB changed), which we check
  548. * to determine the exception type. For all other exceptions, we
  549. * forcibly read EXPEVT from memory and fix up the jump address, in
  550. * the interrupt exception case we jump to do_IRQ() and defer the
  551. * INTEVT read until there. As a bonus, we can also clean up the SR.RB
  552. * checks that do_IRQ() was doing..
  553. */
  554. stc r2_bank, r8
  555. cmp/pz r8
  556. bf interrupt_exception
  557. shlr2 r8
  558. shlr r8
  559. #ifdef COUNT_EXCEPTIONS
  560. mov.l 5f, r9
  561. add r8, r9
  562. mov.l @r9, r10
  563. add #1, r10
  564. mov.l r10, @r9
  565. #endif
  566. mov.l 4f, r9
  567. add r8, r9
  568. mov.l @r9, r9
  569. jmp @r9
  570. nop
  571. rts
  572. nop
  573. .align 2
  574. 1: .long 0x00001000 ! DSP=1
  575. 2: .long 0x000080f0 ! FD=1, IMASK=15
  576. 3: .long 0xcfffffff ! RB=0, BL=0
  577. 4: .long exception_handling_table
  578. #ifdef COUNT_EXCEPTIONS
  579. 5: .long exception_count_table
  580. #endif
  581. interrupt_exception:
  582. mov.l 1f, r9
  583. jmp @r9
  584. nop
  585. rts
  586. nop
  587. .align 2
  588. 1: .long do_IRQ
  589. .align 2
  590. ENTRY(exception_none)
  591. rts
  592. nop