entry.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. /*
  2. * arch/sh/kernel/cpu/sh3/entry.S
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2003 - 2006 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/errno.h>
  13. #include <linux/linkage.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/thread_info.h>
  16. #include <asm/unistd.h>
  17. #include <cpu/mmu_context.h>
  18. #include <asm/page.h>
  19. #include <asm/cache.h>
  20. ! NOTE:
  21. ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
  22. ! to be jumped is too far, but it causes illegal slot exception.
  23. /*
  24. * entry.S contains the system-call and fault low-level handling routines.
  25. * This also contains the timer-interrupt handler, as well as all interrupts
  26. * and faults that can result in a task-switch.
  27. *
  28. * NOTE: This code handles signal-recognition, which happens every time
  29. * after a timer-interrupt and after each system call.
  30. *
  31. * NOTE: This code uses a convention that instructions in the delay slot
  32. * of a transfer-control instruction are indented by an extra space, thus:
  33. *
  34. * jmp @k0 ! control-transfer instruction
  35. * ldc k1, ssr ! delay slot
  36. *
  37. * Stack layout in 'ret_from_syscall':
  38. * ptrace needs to have all regs on the stack.
  39. * if the order here is changed, it needs to be
  40. * updated in ptrace.c and ptrace.h
  41. *
  42. * r0
  43. * ...
  44. * r15 = stack pointer
  45. * spc
  46. * pr
  47. * ssr
  48. * gbr
  49. * mach
  50. * macl
  51. * syscall #
  52. *
  53. */
  54. /* Offsets to the stack */
  55. OFF_R0 = 0 /* Return value. New ABI also arg4 */
  56. OFF_R1 = 4 /* New ABI: arg5 */
  57. OFF_R2 = 8 /* New ABI: arg6 */
  58. OFF_R3 = 12 /* New ABI: syscall_nr */
  59. OFF_R4 = 16 /* New ABI: arg0 */
  60. OFF_R5 = 20 /* New ABI: arg1 */
  61. OFF_R6 = 24 /* New ABI: arg2 */
  62. OFF_R7 = 28 /* New ABI: arg3 */
  63. OFF_SP = (15*4)
  64. OFF_PC = (16*4)
  65. OFF_SR = (16*4+8)
  66. OFF_TRA = (16*4+6*4)
  67. #define k0 r0
  68. #define k1 r1
  69. #define k2 r2
  70. #define k3 r3
  71. #define k4 r4
  72. #define g_imask r6 /* r6_bank1 */
  73. #define k_g_imask r6_bank /* r6_bank1 */
  74. #define current r7 /* r7_bank1 */
  75. #include <asm/entry-macros.S>
  76. /*
  77. * Kernel mode register usage:
  78. * k0 scratch
  79. * k1 scratch
  80. * k2 scratch (Exception code)
  81. * k3 scratch (Return address)
  82. * k4 scratch
  83. * k5 reserved
  84. * k6 Global Interrupt Mask (0--15 << 4)
  85. * k7 CURRENT_THREAD_INFO (pointer to current thread info)
  86. */
  87. !
  88. ! TLB Miss / Initial Page write exception handling
  89. ! _and_
  90. ! TLB hits, but the access violate the protection.
  91. ! It can be valid access, such as stack grow and/or C-O-W.
  92. !
  93. !
  94. ! Find the pmd/pte entry and loadtlb
  95. ! If it's not found, cause address error (SEGV)
  96. !
  97. ! Although this could be written in assembly language (and it'd be faster),
  98. ! this first version depends *much* on C implementation.
  99. !
  100. #if defined(CONFIG_MMU)
  101. .align 2
  102. ENTRY(tlb_miss_load)
  103. bra call_handle_tlbmiss
  104. mov #0, r5
  105. .align 2
  106. ENTRY(tlb_miss_store)
  107. bra call_handle_tlbmiss
  108. mov #1, r5
  109. .align 2
  110. ENTRY(initial_page_write)
  111. bra call_handle_tlbmiss
  112. mov #2, r5
  113. .align 2
  114. ENTRY(tlb_protection_violation_load)
  115. bra call_do_page_fault
  116. mov #0, r5
  117. .align 2
  118. ENTRY(tlb_protection_violation_store)
  119. bra call_do_page_fault
  120. mov #1, r5
  121. call_handle_tlbmiss:
  122. setup_frame_reg
  123. mov.l 1f, r0
  124. mov r5, r8
  125. mov.l @r0, r6
  126. mov.l 2f, r0
  127. sts pr, r10
  128. jsr @r0
  129. mov r15, r4
  130. !
  131. tst r0, r0
  132. bf/s 0f
  133. lds r10, pr
  134. rts
  135. nop
  136. 0:
  137. mov r8, r5
  138. call_do_page_fault:
  139. mov.l 1f, r0
  140. mov.l @r0, r6
  141. mov.l 3f, r0
  142. mov.l 4f, r1
  143. mov r15, r4
  144. jmp @r0
  145. lds r1, pr
  146. .align 2
  147. 1: .long MMU_TEA
  148. 2: .long handle_tlbmiss
  149. 3: .long do_page_fault
  150. 4: .long ret_from_exception
  151. .align 2
  152. ENTRY(address_error_load)
  153. bra call_dae
  154. mov #0,r5 ! writeaccess = 0
  155. .align 2
  156. ENTRY(address_error_store)
  157. bra call_dae
  158. mov #1,r5 ! writeaccess = 1
  159. .align 2
  160. call_dae:
  161. mov.l 1f, r0
  162. mov.l @r0, r6 ! address
  163. mov.l 2f, r0
  164. jmp @r0
  165. mov r15, r4 ! regs
  166. .align 2
  167. 1: .long MMU_TEA
  168. 2: .long do_address_error
  169. #endif /* CONFIG_MMU */
  170. #if defined(CONFIG_SH_STANDARD_BIOS)
  171. /* Unwind the stack and jmp to the debug entry */
  172. ENTRY(sh_bios_handler)
  173. mov.l 1f, r8
  174. bsr restore_regs
  175. nop
  176. lds k2, pr ! restore pr
  177. mov k4, r15
  178. !
  179. mov.l 2f, k0
  180. mov.l @k0, k0
  181. jmp @k0
  182. ldc k3, ssr
  183. .align 2
  184. 1: .long 0x300000f0
  185. 2: .long gdb_vbr_vector
  186. #endif /* CONFIG_SH_STANDARD_BIOS */
  187. ! restore_regs()
  188. ! - restore r0, r1, r2, r3, r4, r5, r6, r7 from the stack
  189. ! - switch bank
  190. ! - restore r8, r9, r10, r11, r12, r13, r14, r15 from the stack
  191. ! - restore spc, pr*, ssr, gbr, mach, macl, skip default tra
  192. ! k2 returns original pr
  193. ! k3 returns original sr
  194. ! k4 returns original stack pointer
  195. ! r8 passes SR bitmask, overwritten with restored data on return
  196. ! r9 trashed
  197. ! BL=0 on entry, on exit BL=1 (depending on r8).
  198. ENTRY(restore_regs)
  199. mov.l @r15+, r0
  200. mov.l @r15+, r1
  201. mov.l @r15+, r2
  202. mov.l @r15+, r3
  203. mov.l @r15+, r4
  204. mov.l @r15+, r5
  205. mov.l @r15+, r6
  206. mov.l @r15+, r7
  207. !
  208. stc sr, r9
  209. or r8, r9
  210. ldc r9, sr
  211. !
  212. mov.l @r15+, r8
  213. mov.l @r15+, r9
  214. mov.l @r15+, r10
  215. mov.l @r15+, r11
  216. mov.l @r15+, r12
  217. mov.l @r15+, r13
  218. mov.l @r15+, r14
  219. mov.l @r15+, k4 ! original stack pointer
  220. ldc.l @r15+, spc
  221. mov.l @r15+, k2 ! original PR
  222. mov.l @r15+, k3 ! original SR
  223. ldc.l @r15+, gbr
  224. lds.l @r15+, mach
  225. lds.l @r15+, macl
  226. rts
  227. add #4, r15 ! Skip syscall number
  228. restore_all:
  229. mov.l 7f, r8
  230. bsr restore_regs
  231. nop
  232. lds k2, pr ! restore pr
  233. !
  234. ! Calculate new SR value
  235. mov k3, k2 ! original SR value
  236. mov #0xfffffff0, k1
  237. extu.b k1, k1
  238. not k1, k1
  239. and k1, k2 ! Mask original SR value
  240. !
  241. mov k3, k0 ! Calculate IMASK-bits
  242. shlr2 k0
  243. and #0x3c, k0
  244. cmp/eq #0x3c, k0
  245. bt/s 6f
  246. shll2 k0
  247. mov g_imask, k0
  248. !
  249. 6: or k0, k2 ! Set the IMASK-bits
  250. ldc k2, ssr
  251. !
  252. mov k4, r15
  253. rte
  254. nop
  255. .align 2
  256. 5: .long 0x00001000 ! DSP
  257. 7: .long 0x30000000
  258. ! common exception handler
  259. #include "../../entry-common.S"
  260. ! Exception Vector Base
  261. !
  262. ! Should be aligned page boundary.
  263. !
  264. .balign 4096,0,4096
  265. ENTRY(vbr_base)
  266. .long 0
  267. !
  268. ! 0x100: General exception vector
  269. !
  270. .balign 256,0,256
  271. general_exception:
  272. #ifndef CONFIG_CPU_SUBTYPE_SHX3
  273. bra handle_exception
  274. sts pr, k3 ! save original pr value in k3
  275. #else
  276. mov.l 1f, k4
  277. mov.l @k4, k4
  278. ! Is EXPEVT larger than 0x800?
  279. mov #0x8, k0
  280. shll8 k0
  281. cmp/hs k0, k4
  282. bf 0f
  283. ! then add 0x580 (k2 is 0xd80 or 0xda0)
  284. mov #0x58, k0
  285. shll2 k0
  286. shll2 k0
  287. add k0, k4
  288. 0:
  289. ! Setup stack and save DSP context (k0 contains original r15 on return)
  290. bsr prepare_stack
  291. nop
  292. ! Save registers / Switch to bank 0
  293. mov k4, k2 ! keep vector in k2
  294. mov.l 1f, k4 ! SR bits to clear in k4
  295. bsr save_regs ! needs original pr value in k3
  296. nop
  297. bra handle_exception_special
  298. nop
  299. .align 2
  300. 1: .long EXPEVT
  301. #endif
  302. ! prepare_stack()
  303. ! - roll back gRB
  304. ! - switch to kernel stack
  305. ! k0 returns original sp (after roll back)
  306. ! k1 trashed
  307. ! k2 trashed
  308. prepare_stack:
  309. #ifdef CONFIG_GUSA
  310. ! Check for roll back gRB (User and Kernel)
  311. mov r15, k0
  312. shll k0
  313. bf/s 1f
  314. shll k0
  315. bf/s 1f
  316. stc spc, k1
  317. stc r0_bank, k0
  318. cmp/hs k0, k1 ! test k1 (saved PC) >= k0 (saved r0)
  319. bt/s 2f
  320. stc r1_bank, k1
  321. add #-2, k0
  322. add r15, k0
  323. ldc k0, spc ! PC = saved r0 + r15 - 2
  324. 2: mov k1, r15 ! SP = r1
  325. 1:
  326. #endif
  327. ! Switch to kernel stack if needed
  328. stc ssr, k0 ! Is it from kernel space?
  329. shll k0 ! Check MD bit (bit30) by shifting it into...
  330. shll k0 ! ...the T bit
  331. bt/s 1f ! It's a kernel to kernel transition.
  332. mov r15, k0 ! save original stack to k0
  333. /* User space to kernel */
  334. mov #(THREAD_SIZE >> 10), k1
  335. shll8 k1 ! k1 := THREAD_SIZE
  336. shll2 k1
  337. add current, k1
  338. mov k1, r15 ! change to kernel stack
  339. !
  340. 1:
  341. rts
  342. nop
  343. !
  344. ! 0x400: Instruction and Data TLB miss exception vector
  345. !
  346. .balign 1024,0,1024
  347. tlb_miss:
  348. sts pr, k3 ! save original pr value in k3
  349. handle_exception:
  350. mova exception_data, k0
  351. ! Setup stack and save DSP context (k0 contains original r15 on return)
  352. bsr prepare_stack
  353. PREF(k0)
  354. ! Save registers / Switch to bank 0
  355. mov.l 5f, k2 ! vector register address
  356. mov.l 1f, k4 ! SR bits to clear in k4
  357. bsr save_regs ! needs original pr value in k3
  358. mov.l @k2, k2 ! read out vector and keep in k2
  359. handle_exception_special:
  360. ! Setup return address and jump to exception handler
  361. mov.l 7f, r9 ! fetch return address
  362. stc r2_bank, r0 ! k2 (vector)
  363. mov.l 6f, r10
  364. shlr2 r0
  365. shlr r0
  366. mov.l @(r0, r10), r10
  367. jmp @r10
  368. lds r9, pr ! put return address in pr
  369. .align L1_CACHE_SHIFT
  370. ! save_regs()
  371. ! - save default tra, macl, mach, gbr, ssr, pr* and spc on the stack
  372. ! - save r15*, r14, r13, r12, r11, r10, r9, r8 on the stack
  373. ! - switch bank
  374. ! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
  375. ! k0 contains original stack pointer*
  376. ! k1 trashed
  377. ! k3 passes original pr*
  378. ! k4 passes SR bitmask
  379. ! BL=1 on entry, on exit BL=0.
  380. ENTRY(save_regs)
  381. mov #-1, r1
  382. mov.l k1, @-r15 ! set TRA (default: -1)
  383. sts.l macl, @-r15
  384. sts.l mach, @-r15
  385. stc.l gbr, @-r15
  386. stc.l ssr, @-r15
  387. mov.l k3, @-r15 ! original pr in k3
  388. stc.l spc, @-r15
  389. mov.l k0, @-r15 ! original stack pointer in k0
  390. mov.l r14, @-r15
  391. mov.l r13, @-r15
  392. mov.l r12, @-r15
  393. mov.l r11, @-r15
  394. mov.l r10, @-r15
  395. mov.l r9, @-r15
  396. mov.l r8, @-r15
  397. mov.l 0f, k3 ! SR bits to set in k3
  398. ! fall-through
  399. ! save_low_regs()
  400. ! - modify SR for bank switch
  401. ! - save r7, r6, r5, r4, r3, r2, r1, r0 on the stack
  402. ! k3 passes bits to set in SR
  403. ! k4 passes bits to clear in SR
  404. ENTRY(save_low_regs)
  405. stc sr, r8
  406. or k3, r8
  407. and k4, r8
  408. ldc r8, sr
  409. mov.l r7, @-r15
  410. mov.l r6, @-r15
  411. mov.l r5, @-r15
  412. mov.l r4, @-r15
  413. mov.l r3, @-r15
  414. mov.l r2, @-r15
  415. mov.l r1, @-r15
  416. rts
  417. mov.l r0, @-r15
  418. !
  419. ! 0x600: Interrupt / NMI vector
  420. !
  421. .balign 512,0,512
  422. ENTRY(handle_interrupt)
  423. sts pr, k3 ! save original pr value in k3
  424. mova exception_data, k0
  425. ! Setup stack and save DSP context (k0 contains original r15 on return)
  426. bsr prepare_stack
  427. PREF(k0)
  428. ! Save registers / Switch to bank 0
  429. mov.l 1f, k4 ! SR bits to clear in k4
  430. bsr save_regs ! needs original pr value in k3
  431. mov #-1, k2 ! default vector kept in k2
  432. setup_frame_reg
  433. stc sr, r0 ! get status register
  434. shlr2 r0
  435. and #0x3c, r0
  436. cmp/eq #0x3c, r0
  437. bf 9f
  438. TRACE_IRQS_OFF
  439. 9:
  440. ! Setup return address and jump to do_IRQ
  441. mov.l 4f, r9 ! fetch return address
  442. lds r9, pr ! put return address in pr
  443. mov.l 2f, r4
  444. mov.l 3f, r9
  445. mov.l @r4, r4 ! pass INTEVT vector as arg0
  446. shlr2 r4
  447. shlr r4
  448. mov r4, r0 ! save vector->jmp table offset for later
  449. shlr2 r4 ! vector to IRQ# conversion
  450. add #-0x10, r4
  451. cmp/pz r4 ! is it a valid IRQ?
  452. bt 10f
  453. /*
  454. * We got here as a result of taking the INTEVT path for something
  455. * that isn't a valid hard IRQ, therefore we bypass the do_IRQ()
  456. * path and special case the event dispatch instead. This is the
  457. * expected path for the NMI (and any other brilliantly implemented
  458. * exception), which effectively wants regular exception dispatch
  459. * but is unfortunately reported through INTEVT rather than
  460. * EXPEVT. Grr.
  461. */
  462. mov.l 6f, r9
  463. mov.l @(r0, r9), r9
  464. jmp @r9
  465. mov r15, r8 ! trap handlers take saved regs in r8
  466. 10:
  467. jmp @r9 ! Off to do_IRQ() we go.
  468. mov r15, r5 ! pass saved registers as arg1
  469. ENTRY(exception_none)
  470. rts
  471. nop
  472. .align L1_CACHE_SHIFT
  473. exception_data:
  474. 0: .long 0x000080f0 ! FD=1, IMASK=15
  475. 1: .long 0xcfffffff ! RB=0, BL=0
  476. 2: .long INTEVT
  477. 3: .long do_IRQ
  478. 4: .long ret_from_irq
  479. 5: .long EXPEVT
  480. 6: .long exception_handling_table
  481. 7: .long ret_from_exception