entry.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689
  1. /*
  2. * arch/sh/kernel/entry.S
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2003 - 2006 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/errno.h>
  13. #include <linux/linkage.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/thread_info.h>
  16. #include <asm/unistd.h>
  17. #include <asm/cpu/mmu_context.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/page.h>
  20. ! NOTE:
  21. ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
  22. ! to be jumped is too far, but it causes illegal slot exception.
  23. /*
  24. * entry.S contains the system-call and fault low-level handling routines.
  25. * This also contains the timer-interrupt handler, as well as all interrupts
  26. * and faults that can result in a task-switch.
  27. *
  28. * NOTE: This code handles signal-recognition, which happens every time
  29. * after a timer-interrupt and after each system call.
  30. *
  31. * NOTE: This code uses a convention that instructions in the delay slot
  32. * of a transfer-control instruction are indented by an extra space, thus:
  33. *
  34. * jmp @k0 ! control-transfer instruction
  35. * ldc k1, ssr ! delay slot
  36. *
  37. * Stack layout in 'ret_from_syscall':
  38. * ptrace needs to have all regs on the stack.
  39. * if the order here is changed, it needs to be
  40. * updated in ptrace.c and ptrace.h
  41. *
  42. * r0
  43. * ...
  44. * r15 = stack pointer
  45. * spc
  46. * pr
  47. * ssr
  48. * gbr
  49. * mach
  50. * macl
  51. * syscall #
  52. *
  53. */
  54. #if defined(CONFIG_KGDB_NMI)
  55. NMI_VEC = 0x1c0 ! Must catch early for debounce
  56. #endif
  57. /* Offsets to the stack */
  58. OFF_R0 = 0 /* Return value. New ABI also arg4 */
  59. OFF_R1 = 4 /* New ABI: arg5 */
  60. OFF_R2 = 8 /* New ABI: arg6 */
  61. OFF_R3 = 12 /* New ABI: syscall_nr */
  62. OFF_R4 = 16 /* New ABI: arg0 */
  63. OFF_R5 = 20 /* New ABI: arg1 */
  64. OFF_R6 = 24 /* New ABI: arg2 */
  65. OFF_R7 = 28 /* New ABI: arg3 */
  66. OFF_SP = (15*4)
  67. OFF_PC = (16*4)
  68. OFF_SR = (16*4+8)
  69. OFF_TRA = (16*4+6*4)
  70. #define k0 r0
  71. #define k1 r1
  72. #define k2 r2
  73. #define k3 r3
  74. #define k4 r4
  75. #define g_imask r6 /* r6_bank1 */
  76. #define k_g_imask r6_bank /* r6_bank1 */
  77. #define current r7 /* r7_bank1 */
  78. #include <asm/entry-macros.S>
  79. /*
  80. * Kernel mode register usage:
  81. * k0 scratch
  82. * k1 scratch
  83. * k2 scratch (Exception code)
  84. * k3 scratch (Return address)
  85. * k4 scratch
  86. * k5 reserved
  87. * k6 Global Interrupt Mask (0--15 << 4)
  88. * k7 CURRENT_THREAD_INFO (pointer to current thread info)
  89. */
  90. !
  91. ! TLB Miss / Initial Page write exception handling
  92. ! _and_
  93. ! TLB hits, but the access violate the protection.
  94. ! It can be valid access, such as stack grow and/or C-O-W.
  95. !
  96. !
  97. ! Find the pmd/pte entry and loadtlb
  98. ! If it's not found, cause address error (SEGV)
  99. !
  100. ! Although this could be written in assembly language (and it'd be faster),
  101. ! this first version depends *much* on C implementation.
  102. !
  103. #if defined(CONFIG_MMU)
  104. .align 2
  105. ENTRY(tlb_miss_load)
  106. bra call_dpf
  107. mov #0, r5
  108. .align 2
  109. ENTRY(tlb_miss_store)
  110. bra call_dpf
  111. mov #1, r5
  112. .align 2
  113. ENTRY(initial_page_write)
  114. bra call_dpf
  115. mov #1, r5
  116. .align 2
  117. ENTRY(tlb_protection_violation_load)
  118. bra call_dpf
  119. mov #0, r5
  120. .align 2
  121. ENTRY(tlb_protection_violation_store)
  122. bra call_dpf
  123. mov #1, r5
  124. call_dpf:
  125. mov.l 1f, r0
  126. mov.l @r0, r6 ! address
  127. mov.l 3f, r0
  128. jmp @r0
  129. mov r15, r4 ! regs
  130. .align 2
  131. 1: .long MMU_TEA
  132. 3: .long do_page_fault
  133. .align 2
  134. ENTRY(address_error_load)
  135. bra call_dae
  136. mov #0,r5 ! writeaccess = 0
  137. .align 2
  138. ENTRY(address_error_store)
  139. bra call_dae
  140. mov #1,r5 ! writeaccess = 1
  141. .align 2
  142. call_dae:
  143. mov.l 1f, r0
  144. mov.l @r0, r6 ! address
  145. mov.l 2f, r0
  146. jmp @r0
  147. mov r15, r4 ! regs
  148. .align 2
  149. 1: .long MMU_TEA
  150. 2: .long do_address_error
  151. #endif /* CONFIG_MMU */
  152. #if defined(CONFIG_SH_STANDARD_BIOS)
  153. /* Unwind the stack and jmp to the debug entry */
  154. ENTRY(sh_bios_handler)
  155. mov.l @r15+, r0
  156. mov.l @r15+, r1
  157. mov.l @r15+, r2
  158. mov.l @r15+, r3
  159. mov.l @r15+, r4
  160. mov.l @r15+, r5
  161. mov.l @r15+, r6
  162. mov.l @r15+, r7
  163. stc sr, r8
  164. mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F
  165. or r9, r8
  166. ldc r8, sr ! here, change the register bank
  167. mov.l @r15+, r8
  168. mov.l @r15+, r9
  169. mov.l @r15+, r10
  170. mov.l @r15+, r11
  171. mov.l @r15+, r12
  172. mov.l @r15+, r13
  173. mov.l @r15+, r14
  174. mov.l @r15+, k0
  175. ldc.l @r15+, spc
  176. lds.l @r15+, pr
  177. mov.l @r15+, k1
  178. ldc.l @r15+, gbr
  179. lds.l @r15+, mach
  180. lds.l @r15+, macl
  181. mov k0, r15
  182. !
  183. mov.l 2f, k0
  184. mov.l @k0, k0
  185. jmp @k0
  186. ldc k1, ssr
  187. .align 2
  188. 1: .long 0x300000f0
  189. 2: .long gdb_vbr_vector
  190. #endif /* CONFIG_SH_STANDARD_BIOS */
  191. restore_all:
  192. mov.l @r15+, r0
  193. mov.l @r15+, r1
  194. mov.l @r15+, r2
  195. mov.l @r15+, r3
  196. mov.l @r15+, r4
  197. mov.l @r15+, r5
  198. mov.l @r15+, r6
  199. mov.l @r15+, r7
  200. !
  201. stc sr, r8
  202. mov.l 7f, r9
  203. or r9, r8 ! BL =1, RB=1
  204. ldc r8, sr ! here, change the register bank
  205. !
  206. mov.l @r15+, r8
  207. mov.l @r15+, r9
  208. mov.l @r15+, r10
  209. mov.l @r15+, r11
  210. mov.l @r15+, r12
  211. mov.l @r15+, r13
  212. mov.l @r15+, r14
  213. mov.l @r15+, k4 ! original stack pointer
  214. ldc.l @r15+, spc
  215. lds.l @r15+, pr
  216. mov.l @r15+, k3 ! original SR
  217. ldc.l @r15+, gbr
  218. lds.l @r15+, mach
  219. lds.l @r15+, macl
  220. add #4, r15 ! Skip syscall number
  221. !
  222. #ifdef CONFIG_SH_DSP
  223. mov.l @r15+, k0 ! DSP mode marker
  224. mov.l 5f, k1
  225. cmp/eq k0, k1 ! Do we have a DSP stack frame?
  226. bf skip_restore
  227. stc sr, k0 ! Enable CPU DSP mode
  228. or k1, k0 ! (within kernel it may be disabled)
  229. ldc k0, sr
  230. mov r2, k0 ! Backup r2
  231. ! Restore DSP registers from stack
  232. mov r15, r2
  233. movs.l @r2+, a1
  234. movs.l @r2+, a0g
  235. movs.l @r2+, a1g
  236. movs.l @r2+, m0
  237. movs.l @r2+, m1
  238. mov r2, r15
  239. lds.l @r15+, a0
  240. lds.l @r15+, x0
  241. lds.l @r15+, x1
  242. lds.l @r15+, y0
  243. lds.l @r15+, y1
  244. lds.l @r15+, dsr
  245. ldc.l @r15+, rs
  246. ldc.l @r15+, re
  247. ldc.l @r15+, mod
  248. mov k0, r2 ! Restore r2
  249. skip_restore:
  250. #endif
  251. !
  252. ! Calculate new SR value
  253. mov k3, k2 ! original SR value
  254. mov #0xf0, k1
  255. extu.b k1, k1
  256. not k1, k1
  257. and k1, k2 ! Mask orignal SR value
  258. !
  259. mov k3, k0 ! Calculate IMASK-bits
  260. shlr2 k0
  261. and #0x3c, k0
  262. cmp/eq #0x3c, k0
  263. bt/s 6f
  264. shll2 k0
  265. mov g_imask, k0
  266. !
  267. 6: or k0, k2 ! Set the IMASK-bits
  268. ldc k2, ssr
  269. !
  270. #if defined(CONFIG_KGDB_NMI)
  271. ! Clear in_nmi
  272. mov.l 6f, k0
  273. mov #0, k1
  274. mov.b k1, @k0
  275. #endif
  276. mov.l @r15+, k2 ! restore EXPEVT
  277. mov k4, r15
  278. rte
  279. nop
  280. .align 2
  281. 5: .long 0x00001000 ! DSP
  282. 7: .long 0x30000000
  283. ! common exception handler
  284. #include "../../entry-common.S"
  285. ! Exception Vector Base
  286. !
  287. ! Should be aligned page boundary.
  288. !
  289. .balign 4096,0,4096
  290. ENTRY(vbr_base)
  291. .long 0
  292. !
  293. .balign 256,0,256
  294. general_exception:
  295. mov.l 1f, k2
  296. mov.l 2f, k3
  297. bra handle_exception
  298. mov.l @k2, k2
  299. .align 2
  300. 1: .long EXPEVT
  301. 2: .long ret_from_exception
  302. !
  303. !
  304. /* gas doesn't flag impossible values for mov #immediate as an error */
  305. #if (_PAGE_PRESENT >> 2) > 0x7f
  306. #error cannot load PAGE_PRESENT as an immediate
  307. #endif
  308. #if _PAGE_DIRTY > 0x7f
  309. #error cannot load PAGE_DIRTY as an immediate
  310. #endif
  311. #if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
  312. #error cannot derive PAGE_ACCESSED from PAGE_PRESENT
  313. #endif
  314. #if defined(CONFIG_CPU_SH4)
  315. #define ldmmupteh(r) mov.l 8f, r
  316. #else
  317. #define ldmmupteh(r) mov #MMU_PTEH, r
  318. #endif
  319. .balign 1024,0,1024
  320. tlb_miss:
  321. #ifdef COUNT_EXCEPTIONS
  322. ! Increment the counts
  323. mov.l 9f, k1
  324. mov.l @k1, k2
  325. add #1, k2
  326. mov.l k2, @k1
  327. #endif
  328. ! k0 scratch
  329. ! k1 pgd and pte pointers
  330. ! k2 faulting address
  331. ! k3 pgd and pte index masks
  332. ! k4 shift
  333. ! Load up the pgd entry (k1)
  334. ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
  335. mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
  336. mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
  337. mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
  338. mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
  339. mov k2, k0 ! 5 MT (latency=0)
  340. shld k4, k0 ! 99 EX
  341. and k3, k0 ! 78 EX
  342. mov.l @(k0, k1), k1 ! 21 LS (latency=2)
  343. mov #-(PAGE_SHIFT-2), k4 ! 6 EX
  344. ! Load up the pte entry (k2)
  345. mov k2, k0 ! 5 MT (latency=0)
  346. shld k4, k0 ! 99 EX
  347. tst k1, k1 ! 86 MT
  348. bt 20f ! 110 BR
  349. mov.w 3f, k3 ! 8 LS (latency=2) (PTRS_PER_PTE-1) << 2
  350. and k3, k0 ! 78 EX
  351. mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
  352. mov.l @(k0, k1), k2 ! 21 LS (latency=2)
  353. add k0, k1 ! 49 EX
  354. #ifdef CONFIG_CPU_HAS_PTEA
  355. ! Test the entry for present and _PAGE_ACCESSED
  356. mov #-28, k3 ! 6 EX
  357. mov k2, k0 ! 5 MT (latency=0)
  358. tst k4, k2 ! 68 MT
  359. shld k3, k0 ! 99 EX
  360. bt 20f ! 110 BR
  361. ! Set PTEA register
  362. ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
  363. !
  364. ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
  365. and #0xe, k0 ! 79 EX
  366. mov k0, k3 ! 5 MT (latency=0)
  367. mov k2, k0 ! 5 MT (latency=0)
  368. and #1, k0 ! 79 EX
  369. or k0, k3 ! 82 EX
  370. ldmmupteh(k0) ! 9 LS (latency=2)
  371. shll2 k4 ! 101 EX _PAGE_ACCESSED
  372. tst k4, k2 ! 68 MT
  373. mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
  374. mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
  375. ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
  376. #else
  377. ! Test the entry for present and _PAGE_ACCESSED
  378. mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
  379. tst k4, k2 ! 68 MT
  380. shll2 k4 ! 101 EX _PAGE_ACCESSED
  381. ldmmupteh(k0) ! 9 LS (latency=2)
  382. bt 20f ! 110 BR
  383. tst k4, k2 ! 68 MT
  384. ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
  385. #endif
  386. ! Set up the entry
  387. and k2, k3 ! 78 EX
  388. bt/s 10f ! 108 BR
  389. mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
  390. ldtlb ! 128 CO
  391. ! At least one instruction between ldtlb and rte
  392. nop ! 119 NOP
  393. rte ! 126 CO
  394. nop ! 119 NOP
  395. 10: or k4, k2 ! 82 EX
  396. ldtlb ! 128 CO
  397. ! At least one instruction between ldtlb and rte
  398. mov.l k2, @k1 ! 27 LS
  399. rte ! 126 CO
  400. ! Note we cannot execute mov here, because it is executed after
  401. ! restoring SSR, so would be executed in user space.
  402. nop ! 119 NOP
  403. .align 5
  404. ! Once cache line if possible...
  405. 1: .long swapper_pg_dir
  406. 3: .short (PTRS_PER_PTE-1) << 2
  407. 4: .short (PTRS_PER_PGD-1) << 2
  408. 5: .long _PAGE_PRESENT
  409. 7: .long _PAGE_FLAGS_HARDWARE_MASK
  410. 8: .long MMU_PTEH
  411. #ifdef COUNT_EXCEPTIONS
  412. 9: .long exception_count_miss
  413. #endif
  414. ! Either pgd or pte not present
  415. 20: mov.l 1f, k2
  416. mov.l 4f, k3
  417. bra handle_exception
  418. mov.l @k2, k2
  419. !
  420. .balign 512,0,512
  421. interrupt:
  422. mov.l 2f, k2
  423. mov.l 3f, k3
  424. #if defined(CONFIG_KGDB_NMI)
  425. ! Debounce (filter nested NMI)
  426. mov.l @k2, k0
  427. mov.l 5f, k1
  428. cmp/eq k1, k0
  429. bf 0f
  430. mov.l 6f, k1
  431. tas.b @k1
  432. bt 0f
  433. rte
  434. nop
  435. .align 2
  436. 5: .long NMI_VEC
  437. 6: .long in_nmi
  438. 0:
  439. #endif /* defined(CONFIG_KGDB_NMI) */
  440. bra handle_exception
  441. mov #-1, k2 ! interrupt exception marker
  442. .align 2
  443. 1: .long EXPEVT
  444. 2: .long INTEVT
  445. 3: .long ret_from_irq
  446. 4: .long ret_from_exception
  447. !
  448. !
  449. .align 2
  450. ENTRY(handle_exception)
  451. ! Using k0, k1 for scratch registers (r0_bank1, r1_bank),
  452. ! save all registers onto stack.
  453. !
  454. stc ssr, k0 ! Is it from kernel space?
  455. shll k0 ! Check MD bit (bit30) by shifting it into...
  456. shll k0 ! ...the T bit
  457. bt/s 1f ! It's a kernel to kernel transition.
  458. mov r15, k0 ! save original stack to k0
  459. /* User space to kernel */
  460. mov #(THREAD_SIZE >> 10), k1
  461. shll8 k1 ! k1 := THREAD_SIZE
  462. shll2 k1
  463. add current, k1
  464. mov k1, r15 ! change to kernel stack
  465. !
  466. 1: mov.l 2f, k1
  467. !
  468. #ifdef CONFIG_SH_DSP
  469. mov.l r2, @-r15 ! Save r2, we need another reg
  470. stc sr, k4
  471. mov.l 1f, r2
  472. tst r2, k4 ! Check if in DSP mode
  473. mov.l @r15+, r2 ! Restore r2 now
  474. bt/s skip_save
  475. mov #0, k4 ! Set marker for no stack frame
  476. mov r2, k4 ! Backup r2 (in k4) for later
  477. ! Save DSP registers on stack
  478. stc.l mod, @-r15
  479. stc.l re, @-r15
  480. stc.l rs, @-r15
  481. sts.l dsr, @-r15
  482. sts.l y1, @-r15
  483. sts.l y0, @-r15
  484. sts.l x1, @-r15
  485. sts.l x0, @-r15
  486. sts.l a0, @-r15
  487. ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr.
  488. ! FIXME: Make sure that this is still the case with newer toolchains,
  489. ! as we're not at all interested in supporting ancient toolchains at
  490. ! this point. -- PFM.
  491. mov r15, r2
  492. .word 0xf653 ! movs.l a1, @-r2
  493. .word 0xf6f3 ! movs.l a0g, @-r2
  494. .word 0xf6d3 ! movs.l a1g, @-r2
  495. .word 0xf6c3 ! movs.l m0, @-r2
  496. .word 0xf6e3 ! movs.l m1, @-r2
  497. mov r2, r15
  498. mov k4, r2 ! Restore r2
  499. mov.l 1f, k4 ! Force DSP stack frame
  500. skip_save:
  501. mov.l k4, @-r15 ! Push DSP mode marker onto stack
  502. #endif
  503. ! Save the user registers on the stack.
  504. mov.l k2, @-r15 ! EXPEVT
  505. mov #-1, k4
  506. mov.l k4, @-r15 ! set TRA (default: -1)
  507. !
  508. sts.l macl, @-r15
  509. sts.l mach, @-r15
  510. stc.l gbr, @-r15
  511. stc.l ssr, @-r15
  512. sts.l pr, @-r15
  513. stc.l spc, @-r15
  514. !
  515. lds k3, pr ! Set the return address to pr
  516. !
  517. mov.l k0, @-r15 ! save orignal stack
  518. mov.l r14, @-r15
  519. mov.l r13, @-r15
  520. mov.l r12, @-r15
  521. mov.l r11, @-r15
  522. mov.l r10, @-r15
  523. mov.l r9, @-r15
  524. mov.l r8, @-r15
  525. !
  526. stc sr, r8 ! Back to normal register bank, and
  527. or k1, r8 ! Block all interrupts
  528. mov.l 3f, k1
  529. and k1, r8 ! ...
  530. ldc r8, sr ! ...changed here.
  531. !
  532. mov.l r7, @-r15
  533. mov.l r6, @-r15
  534. mov.l r5, @-r15
  535. mov.l r4, @-r15
  536. mov.l r3, @-r15
  537. mov.l r2, @-r15
  538. mov.l r1, @-r15
  539. mov.l r0, @-r15
  540. /*
  541. * This gets a bit tricky.. in the INTEVT case we don't want to use
  542. * the VBR offset as a destination in the jump call table, since all
  543. * of the destinations are the same. In this case, (interrupt) sets
  544. * a marker in r2 (now r2_bank since SR.RB changed), which we check
  545. * to determine the exception type. For all other exceptions, we
  546. * forcibly read EXPEVT from memory and fix up the jump address, in
  547. * the interrupt exception case we jump to do_IRQ() and defer the
  548. * INTEVT read until there. As a bonus, we can also clean up the SR.RB
  549. * checks that do_IRQ() was doing..
  550. */
  551. stc r2_bank, r8
  552. cmp/pz r8
  553. bf interrupt_exception
  554. shlr2 r8
  555. shlr r8
  556. #ifdef COUNT_EXCEPTIONS
  557. mov.l 5f, r9
  558. add r8, r9
  559. mov.l @r9, r10
  560. add #1, r10
  561. mov.l r10, @r9
  562. #endif
  563. mov.l 4f, r9
  564. add r8, r9
  565. mov.l @r9, r9
  566. jmp @r9
  567. nop
  568. rts
  569. nop
  570. .align 2
  571. 1: .long 0x00001000 ! DSP=1
  572. 2: .long 0x000080f0 ! FD=1, IMASK=15
  573. 3: .long 0xcfffffff ! RB=0, BL=0
  574. 4: .long exception_handling_table
  575. #ifdef COUNT_EXCEPTIONS
  576. 5: .long exception_count_table
  577. #endif
  578. interrupt_exception:
  579. mov.l 1f, r9
  580. jmp @r9
  581. nop
  582. rts
  583. nop
  584. .align 2
  585. 1: .long do_IRQ
  586. .align 2
  587. ENTRY(exception_none)
  588. rts
  589. nop