entry-avr32b.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. /*
  9. * This file contains the low-level entry-points into the kernel, that is,
  10. * exception handlers, debug trap handlers, interrupt handlers and the
  11. * system call handler.
  12. */
  13. #include <linux/errno.h>
  14. #include <asm/asm.h>
  15. #include <asm/hardirq.h>
  16. #include <asm/irq.h>
  17. #include <asm/ocd.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/sysreg.h>
  22. #include <asm/thread_info.h>
  23. #include <asm/unistd.h>
  24. #ifdef CONFIG_PREEMPT
  25. # define preempt_stop mask_interrupts
  26. #else
  27. # define preempt_stop
  28. # define fault_resume_kernel fault_restore_all
  29. #endif
  30. #define __MASK(x) ((1 << (x)) - 1)
  31. #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
  32. (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
  33. .section .ex.text,"ax",@progbits
  34. .align 2
  35. exception_vectors:
  36. bral handle_critical
  37. .align 2
  38. bral handle_critical
  39. .align 2
  40. bral do_bus_error_write
  41. .align 2
  42. bral do_bus_error_read
  43. .align 2
  44. bral do_nmi_ll
  45. .align 2
  46. bral handle_address_fault
  47. .align 2
  48. bral handle_protection_fault
  49. .align 2
  50. bral handle_debug
  51. .align 2
  52. bral do_illegal_opcode_ll
  53. .align 2
  54. bral do_illegal_opcode_ll
  55. .align 2
  56. bral do_illegal_opcode_ll
  57. .align 2
  58. bral do_fpe_ll
  59. .align 2
  60. bral do_illegal_opcode_ll
  61. .align 2
  62. bral handle_address_fault
  63. .align 2
  64. bral handle_address_fault
  65. .align 2
  66. bral handle_protection_fault
  67. .align 2
  68. bral handle_protection_fault
  69. .align 2
  70. bral do_dtlb_modified
  71. /*
  72. * r0 : PGD/PT/PTE
  73. * r1 : Offending address
  74. * r2 : Scratch register
  75. * r3 : Cause (5, 12 or 13)
  76. */
  77. #define tlbmiss_save pushm r0-r3
  78. #define tlbmiss_restore popm r0-r3
  79. .org 0x50
  80. .global itlb_miss
  81. itlb_miss:
  82. tlbmiss_save
  83. rjmp tlb_miss_common
  84. .org 0x60
  85. dtlb_miss_read:
  86. tlbmiss_save
  87. rjmp tlb_miss_common
  88. .org 0x70
  89. dtlb_miss_write:
  90. tlbmiss_save
  91. .global tlb_miss_common
  92. .align 2
  93. tlb_miss_common:
  94. mfsr r0, SYSREG_TLBEAR
  95. mfsr r1, SYSREG_PTBR
  96. /* Is it the vmalloc space? */
  97. bld r0, 31
  98. brcs handle_vmalloc_miss
  99. /* First level lookup */
  100. pgtbl_lookup:
  101. lsr r2, r0, PGDIR_SHIFT
  102. ld.w r3, r1[r2 << 2]
  103. bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
  104. bld r3, _PAGE_BIT_PRESENT
  105. brcc page_table_not_present
  106. /* Translate to virtual address in P1. */
  107. andl r3, 0xf000
  108. sbr r3, 31
  109. /* Second level lookup */
  110. ld.w r2, r3[r1 << 2]
  111. mfsr r0, SYSREG_TLBARLO
  112. bld r2, _PAGE_BIT_PRESENT
  113. brcc page_not_present
  114. /* Mark the page as accessed */
  115. sbr r2, _PAGE_BIT_ACCESSED
  116. st.w r3[r1 << 2], r2
  117. /* Drop software flags */
  118. andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
  119. mtsr SYSREG_TLBELO, r2
  120. /* Figure out which entry we want to replace */
  121. mfsr r1, SYSREG_MMUCR
  122. clz r2, r0
  123. brcc 1f
  124. mov r3, -1 /* All entries have been accessed, */
  125. mov r2, 0 /* so start at 0 */
  126. mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
  127. 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
  128. mtsr SYSREG_MMUCR, r1
  129. tlbw
  130. tlbmiss_restore
  131. rete
  132. handle_vmalloc_miss:
  133. /* Simply do the lookup in init's page table */
  134. mov r1, lo(swapper_pg_dir)
  135. orh r1, hi(swapper_pg_dir)
  136. rjmp pgtbl_lookup
  137. /* --- System Call --- */
  138. .org 0x100
  139. system_call:
  140. #ifdef CONFIG_PREEMPT
  141. mask_interrupts
  142. #endif
  143. pushm r12 /* r12_orig */
  144. stmts --sp, r0-lr
  145. mfsr r0, SYSREG_RAR_SUP
  146. mfsr r1, SYSREG_RSR_SUP
  147. #ifdef CONFIG_PREEMPT
  148. unmask_interrupts
  149. #endif
  150. zero_fp
  151. stm --sp, r0-r1
  152. /* check for syscall tracing */
  153. get_thread_info r0
  154. ld.w r1, r0[TI_flags]
  155. bld r1, TIF_SYSCALL_TRACE
  156. brcs syscall_trace_enter
  157. syscall_trace_cont:
  158. cp.w r8, NR_syscalls
  159. brhs syscall_badsys
  160. lddpc lr, syscall_table_addr
  161. ld.w lr, lr[r8 << 2]
  162. mov r8, r5 /* 5th argument (6th is pushed by stub) */
  163. icall lr
  164. .global syscall_return
  165. syscall_return:
  166. get_thread_info r0
  167. mask_interrupts /* make sure we don't miss an interrupt
  168. setting need_resched or sigpending
  169. between sampling and the rets */
  170. /* Store the return value so that the correct value is loaded below */
  171. stdsp sp[REG_R12], r12
  172. ld.w r1, r0[TI_flags]
  173. andl r1, _TIF_ALLWORK_MASK, COH
  174. brne syscall_exit_work
  175. syscall_exit_cont:
  176. popm r8-r9
  177. mtsr SYSREG_RAR_SUP, r8
  178. mtsr SYSREG_RSR_SUP, r9
  179. ldmts sp++, r0-lr
  180. sub sp, -4 /* r12_orig */
  181. rets
  182. .align 2
  183. syscall_table_addr:
  184. .long sys_call_table
  185. syscall_badsys:
  186. mov r12, -ENOSYS
  187. rjmp syscall_return
  188. .global ret_from_fork
  189. ret_from_fork:
  190. rcall schedule_tail
  191. /* check for syscall tracing */
  192. get_thread_info r0
  193. ld.w r1, r0[TI_flags]
  194. andl r1, _TIF_ALLWORK_MASK, COH
  195. brne syscall_exit_work
  196. rjmp syscall_exit_cont
  197. syscall_trace_enter:
  198. pushm r8-r12
  199. rcall syscall_trace
  200. popm r8-r12
  201. rjmp syscall_trace_cont
  202. syscall_exit_work:
  203. bld r1, TIF_SYSCALL_TRACE
  204. brcc 1f
  205. unmask_interrupts
  206. rcall syscall_trace
  207. mask_interrupts
  208. ld.w r1, r0[TI_flags]
  209. 1: bld r1, TIF_NEED_RESCHED
  210. brcc 2f
  211. unmask_interrupts
  212. rcall schedule
  213. mask_interrupts
  214. ld.w r1, r0[TI_flags]
  215. rjmp 1b
  216. 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  217. tst r1, r2
  218. breq 3f
  219. unmask_interrupts
  220. mov r12, sp
  221. mov r11, r0
  222. rcall do_notify_resume
  223. mask_interrupts
  224. ld.w r1, r0[TI_flags]
  225. rjmp 1b
  226. 3: bld r1, TIF_BREAKPOINT
  227. brcc syscall_exit_cont
  228. rjmp enter_monitor_mode
  229. /* The slow path of the TLB miss handler */
  230. page_table_not_present:
  231. page_not_present:
  232. tlbmiss_restore
  233. sub sp, 4
  234. stmts --sp, r0-lr
  235. rcall save_full_context_ex
  236. mfsr r12, SYSREG_ECR
  237. mov r11, sp
  238. rcall do_page_fault
  239. rjmp ret_from_exception
  240. /* This function expects to find offending PC in SYSREG_RAR_EX */
  241. .type save_full_context_ex, @function
  242. .align 2
  243. save_full_context_ex:
  244. mfsr r11, SYSREG_RAR_EX
  245. sub r9, pc, . - debug_trampoline
  246. mfsr r8, SYSREG_RSR_EX
  247. cp.w r9, r11
  248. breq 3f
  249. mov r12, r8
  250. andh r8, (MODE_MASK >> 16), COH
  251. brne 2f
  252. 1: pushm r11, r12 /* PC and SR */
  253. unmask_exceptions
  254. ret r12
  255. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  256. stdsp sp[4], r10 /* replace saved SP */
  257. rjmp 1b
  258. /*
  259. * The debug handler set up a trampoline to make us
  260. * automatically enter monitor mode upon return, but since
  261. * we're saving the full context, we must assume that the
  262. * exception handler might want to alter the return address
  263. * and/or status register. So we need to restore the original
  264. * context and enter monitor mode manually after the exception
  265. * has been handled.
  266. */
  267. 3: get_thread_info r8
  268. ld.w r11, r8[TI_rar_saved]
  269. ld.w r12, r8[TI_rsr_saved]
  270. rjmp 1b
  271. .size save_full_context_ex, . - save_full_context_ex
  272. /* Low-level exception handlers */
  273. handle_critical:
  274. sub sp, 4
  275. stmts --sp, r0-lr
  276. rcall save_full_context_ex
  277. mfsr r12, SYSREG_ECR
  278. mov r11, sp
  279. rcall do_critical_exception
  280. /* We should never get here... */
  281. bad_return:
  282. sub r12, pc, (. - 1f)
  283. bral panic
  284. .align 2
  285. 1: .asciz "Return from critical exception!"
  286. .align 1
  287. do_bus_error_write:
  288. sub sp, 4
  289. stmts --sp, r0-lr
  290. rcall save_full_context_ex
  291. mov r11, 1
  292. rjmp 1f
  293. do_bus_error_read:
  294. sub sp, 4
  295. stmts --sp, r0-lr
  296. rcall save_full_context_ex
  297. mov r11, 0
  298. 1: mfsr r12, SYSREG_BEAR
  299. mov r10, sp
  300. rcall do_bus_error
  301. rjmp ret_from_exception
  302. .align 1
  303. do_nmi_ll:
  304. sub sp, 4
  305. stmts --sp, r0-lr
  306. mfsr r9, SYSREG_RSR_NMI
  307. mfsr r8, SYSREG_RAR_NMI
  308. bfextu r0, r9, MODE_SHIFT, 3
  309. brne 2f
  310. 1: pushm r8, r9 /* PC and SR */
  311. mfsr r12, SYSREG_ECR
  312. mov r11, sp
  313. rcall do_nmi
  314. popm r8-r9
  315. mtsr SYSREG_RAR_NMI, r8
  316. tst r0, r0
  317. mtsr SYSREG_RSR_NMI, r9
  318. brne 3f
  319. ldmts sp++, r0-lr
  320. sub sp, -4 /* skip r12_orig */
  321. rete
  322. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  323. stdsp sp[4], r10 /* replace saved SP */
  324. rjmp 1b
  325. 3: popm lr
  326. sub sp, -4 /* skip sp */
  327. popm r0-r12
  328. sub sp, -4 /* skip r12_orig */
  329. rete
  330. handle_address_fault:
  331. sub sp, 4
  332. stmts --sp, r0-lr
  333. rcall save_full_context_ex
  334. mfsr r12, SYSREG_ECR
  335. mov r11, sp
  336. rcall do_address_exception
  337. rjmp ret_from_exception
  338. handle_protection_fault:
  339. sub sp, 4
  340. stmts --sp, r0-lr
  341. rcall save_full_context_ex
  342. mfsr r12, SYSREG_ECR
  343. mov r11, sp
  344. rcall do_page_fault
  345. rjmp ret_from_exception
  346. .align 1
  347. do_illegal_opcode_ll:
  348. sub sp, 4
  349. stmts --sp, r0-lr
  350. rcall save_full_context_ex
  351. mfsr r12, SYSREG_ECR
  352. mov r11, sp
  353. rcall do_illegal_opcode
  354. rjmp ret_from_exception
  355. do_dtlb_modified:
  356. pushm r0-r3
  357. mfsr r1, SYSREG_TLBEAR
  358. mfsr r0, SYSREG_PTBR
  359. lsr r2, r1, PGDIR_SHIFT
  360. ld.w r0, r0[r2 << 2]
  361. lsl r1, (32 - PGDIR_SHIFT)
  362. lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
  363. /* Translate to virtual address in P1 */
  364. andl r0, 0xf000
  365. sbr r0, 31
  366. add r2, r0, r1 << 2
  367. ld.w r3, r2[0]
  368. sbr r3, _PAGE_BIT_DIRTY
  369. mov r0, r3
  370. st.w r2[0], r3
  371. /* The page table is up-to-date. Update the TLB entry as well */
  372. andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
  373. mtsr SYSREG_TLBELO, r0
  374. /* MMUCR[DRP] is updated automatically, so let's go... */
  375. tlbw
  376. popm r0-r3
  377. rete
  378. do_fpe_ll:
  379. sub sp, 4
  380. stmts --sp, r0-lr
  381. rcall save_full_context_ex
  382. unmask_interrupts
  383. mov r12, 26
  384. mov r11, sp
  385. rcall do_fpe
  386. rjmp ret_from_exception
  387. ret_from_exception:
  388. mask_interrupts
  389. lddsp r4, sp[REG_SR]
  390. andh r4, (MODE_MASK >> 16), COH
  391. brne fault_resume_kernel
  392. get_thread_info r0
  393. ld.w r1, r0[TI_flags]
  394. andl r1, _TIF_WORK_MASK, COH
  395. brne fault_exit_work
  396. fault_resume_user:
  397. popm r8-r9
  398. mask_exceptions
  399. mtsr SYSREG_RAR_EX, r8
  400. mtsr SYSREG_RSR_EX, r9
  401. ldmts sp++, r0-lr
  402. sub sp, -4
  403. rete
  404. fault_resume_kernel:
  405. #ifdef CONFIG_PREEMPT
  406. get_thread_info r0
  407. ld.w r2, r0[TI_preempt_count]
  408. cp.w r2, 0
  409. brne 1f
  410. ld.w r1, r0[TI_flags]
  411. bld r1, TIF_NEED_RESCHED
  412. brcc 1f
  413. lddsp r4, sp[REG_SR]
  414. bld r4, SYSREG_GM_OFFSET
  415. brcs 1f
  416. rcall preempt_schedule_irq
  417. 1:
  418. #endif
  419. popm r8-r9
  420. mask_exceptions
  421. mfsr r1, SYSREG_SR
  422. mtsr SYSREG_RAR_EX, r8
  423. mtsr SYSREG_RSR_EX, r9
  424. popm lr
  425. sub sp, -4 /* ignore SP */
  426. popm r0-r12
  427. sub sp, -4 /* ignore r12_orig */
  428. rete
  429. irq_exit_work:
  430. /* Switch to exception mode so that we can share the same code. */
  431. mfsr r8, SYSREG_SR
  432. cbr r8, SYSREG_M0_OFFSET
  433. orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
  434. mtsr SYSREG_SR, r8
  435. sub pc, -2
  436. get_thread_info r0
  437. ld.w r1, r0[TI_flags]
  438. fault_exit_work:
  439. bld r1, TIF_NEED_RESCHED
  440. brcc 1f
  441. unmask_interrupts
  442. rcall schedule
  443. mask_interrupts
  444. ld.w r1, r0[TI_flags]
  445. rjmp fault_exit_work
  446. 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  447. tst r1, r2
  448. breq 2f
  449. unmask_interrupts
  450. mov r12, sp
  451. mov r11, r0
  452. rcall do_notify_resume
  453. mask_interrupts
  454. ld.w r1, r0[TI_flags]
  455. rjmp fault_exit_work
  456. 2: bld r1, TIF_BREAKPOINT
  457. brcc fault_resume_user
  458. rjmp enter_monitor_mode
  459. .section .kprobes.text, "ax", @progbits
  460. .type handle_debug, @function
  461. handle_debug:
  462. sub sp, 4 /* r12_orig */
  463. stmts --sp, r0-lr
  464. mfsr r8, SYSREG_RAR_DBG
  465. mfsr r9, SYSREG_RSR_DBG
  466. unmask_exceptions
  467. pushm r8-r9
  468. bfextu r9, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  469. brne debug_fixup_regs
  470. .Ldebug_fixup_cont:
  471. #ifdef CONFIG_TRACE_IRQFLAGS
  472. rcall trace_hardirqs_off
  473. #endif
  474. mov r12, sp
  475. rcall do_debug
  476. mov sp, r12
  477. lddsp r2, sp[REG_SR]
  478. bfextu r3, r2, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  479. brne debug_resume_kernel
  480. get_thread_info r0
  481. ld.w r1, r0[TI_flags]
  482. mov r2, _TIF_DBGWORK_MASK
  483. tst r1, r2
  484. brne debug_exit_work
  485. bld r1, TIF_SINGLE_STEP
  486. brcc 1f
  487. mfdr r4, OCD_DC
  488. sbr r4, OCD_DC_SS_BIT
  489. mtdr OCD_DC, r4
  490. 1: popm r10,r11
  491. mask_exceptions
  492. mtsr SYSREG_RSR_DBG, r11
  493. mtsr SYSREG_RAR_DBG, r10
  494. #ifdef CONFIG_TRACE_IRQFLAGS
  495. rcall trace_hardirqs_on
  496. 1:
  497. #endif
  498. ldmts sp++, r0-lr
  499. sub sp, -4
  500. retd
  501. .size handle_debug, . - handle_debug
  502. /* Mode of the trapped context is in r9 */
  503. .type debug_fixup_regs, @function
  504. debug_fixup_regs:
  505. mfsr r8, SYSREG_SR
  506. mov r10, r8
  507. bfins r8, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  508. mtsr SYSREG_SR, r8
  509. sub pc, -2
  510. stdsp sp[REG_LR], lr
  511. mtsr SYSREG_SR, r10
  512. sub pc, -2
  513. sub r8, sp, -FRAME_SIZE_FULL
  514. stdsp sp[REG_SP], r8
  515. rjmp .Ldebug_fixup_cont
  516. .size debug_fixup_regs, . - debug_fixup_regs
  517. .type debug_resume_kernel, @function
  518. debug_resume_kernel:
  519. mask_exceptions
  520. popm r10, r11
  521. mtsr SYSREG_RAR_DBG, r10
  522. mtsr SYSREG_RSR_DBG, r11
  523. #ifdef CONFIG_TRACE_IRQFLAGS
  524. bld r11, SYSREG_GM_OFFSET
  525. brcc 1f
  526. rcall trace_hardirqs_on
  527. 1:
  528. #endif
  529. mfsr r2, SYSREG_SR
  530. mov r1, r2
  531. bfins r2, r3, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  532. mtsr SYSREG_SR, r2
  533. sub pc, -2
  534. popm lr
  535. mtsr SYSREG_SR, r1
  536. sub pc, -2
  537. sub sp, -4 /* skip SP */
  538. popm r0-r12
  539. sub sp, -4
  540. retd
  541. .size debug_resume_kernel, . - debug_resume_kernel
  542. .type debug_exit_work, @function
  543. debug_exit_work:
  544. /*
  545. * We must return from Monitor Mode using a retd, and we must
  546. * not schedule since that involves the D bit in SR getting
  547. * cleared by something other than the debug hardware. This
  548. * may cause undefined behaviour according to the Architecture
  549. * manual.
  550. *
  551. * So we fix up the return address and status and return to a
  552. * stub below in Exception mode. From there, we can follow the
  553. * normal exception return path.
  554. *
  555. * The real return address and status registers are stored on
  556. * the stack in the way the exception return path understands,
  557. * so no need to fix anything up there.
  558. */
  559. sub r8, pc, . - fault_exit_work
  560. mtsr SYSREG_RAR_DBG, r8
  561. mov r9, 0
  562. orh r9, hi(SR_EM | SR_GM | MODE_EXCEPTION)
  563. mtsr SYSREG_RSR_DBG, r9
  564. sub pc, -2
  565. retd
  566. .size debug_exit_work, . - debug_exit_work
  567. .set rsr_int0, SYSREG_RSR_INT0
  568. .set rsr_int1, SYSREG_RSR_INT1
  569. .set rsr_int2, SYSREG_RSR_INT2
  570. .set rsr_int3, SYSREG_RSR_INT3
  571. .set rar_int0, SYSREG_RAR_INT0
  572. .set rar_int1, SYSREG_RAR_INT1
  573. .set rar_int2, SYSREG_RAR_INT2
  574. .set rar_int3, SYSREG_RAR_INT3
  575. .macro IRQ_LEVEL level
  576. .type irq_level\level, @function
  577. irq_level\level:
  578. sub sp, 4 /* r12_orig */
  579. stmts --sp,r0-lr
  580. mfsr r8, rar_int\level
  581. mfsr r9, rsr_int\level
  582. #ifdef CONFIG_PREEMPT
  583. sub r11, pc, (. - system_call)
  584. cp.w r11, r8
  585. breq 4f
  586. #endif
  587. pushm r8-r9
  588. mov r11, sp
  589. mov r12, \level
  590. rcall do_IRQ
  591. lddsp r4, sp[REG_SR]
  592. bfextu r4, r4, SYSREG_M0_OFFSET, 3
  593. cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
  594. breq 2f
  595. cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
  596. #ifdef CONFIG_PREEMPT
  597. brne 3f
  598. #else
  599. brne 1f
  600. #endif
  601. get_thread_info r0
  602. ld.w r1, r0[TI_flags]
  603. andl r1, _TIF_WORK_MASK, COH
  604. brne irq_exit_work
  605. 1:
  606. #ifdef CONFIG_TRACE_IRQFLAGS
  607. rcall trace_hardirqs_on
  608. #endif
  609. popm r8-r9
  610. mtsr rar_int\level, r8
  611. mtsr rsr_int\level, r9
  612. ldmts sp++,r0-lr
  613. sub sp, -4 /* ignore r12_orig */
  614. rete
  615. #ifdef CONFIG_PREEMPT
  616. 4: mask_interrupts
  617. mfsr r8, rsr_int\level
  618. sbr r8, 16
  619. mtsr rsr_int\level, r8
  620. ldmts sp++, r0-lr
  621. sub sp, -4 /* ignore r12_orig */
  622. rete
  623. #endif
  624. 2: get_thread_info r0
  625. ld.w r1, r0[TI_flags]
  626. bld r1, TIF_CPU_GOING_TO_SLEEP
  627. #ifdef CONFIG_PREEMPT
  628. brcc 3f
  629. #else
  630. brcc 1b
  631. #endif
  632. sub r1, pc, . - cpu_idle_skip_sleep
  633. stdsp sp[REG_PC], r1
  634. #ifdef CONFIG_PREEMPT
  635. 3: get_thread_info r0
  636. ld.w r2, r0[TI_preempt_count]
  637. cp.w r2, 0
  638. brne 1b
  639. ld.w r1, r0[TI_flags]
  640. bld r1, TIF_NEED_RESCHED
  641. brcc 1b
  642. lddsp r4, sp[REG_SR]
  643. bld r4, SYSREG_GM_OFFSET
  644. brcs 1b
  645. rcall preempt_schedule_irq
  646. #endif
  647. rjmp 1b
  648. .endm
  649. .section .irq.text,"ax",@progbits
  650. .global irq_level0
  651. .global irq_level1
  652. .global irq_level2
  653. .global irq_level3
  654. IRQ_LEVEL 0
  655. IRQ_LEVEL 1
  656. IRQ_LEVEL 2
  657. IRQ_LEVEL 3
  658. .section .kprobes.text, "ax", @progbits
  659. .type enter_monitor_mode, @function
  660. enter_monitor_mode:
  661. /*
  662. * We need to enter monitor mode to do a single step. The
  663. * monitor code will alter the return address so that we
  664. * return directly to the user instead of returning here.
  665. */
  666. breakpoint
  667. rjmp breakpoint_failed
  668. .size enter_monitor_mode, . - enter_monitor_mode
  669. .type debug_trampoline, @function
  670. .global debug_trampoline
  671. debug_trampoline:
  672. /*
  673. * Save the registers on the stack so that the monitor code
  674. * can find them easily.
  675. */
  676. sub sp, 4 /* r12_orig */
  677. stmts --sp, r0-lr
  678. get_thread_info r0
  679. ld.w r8, r0[TI_rar_saved]
  680. ld.w r9, r0[TI_rsr_saved]
  681. pushm r8-r9
  682. /*
  683. * The monitor code will alter the return address so we don't
  684. * return here.
  685. */
  686. breakpoint
  687. rjmp breakpoint_failed
  688. .size debug_trampoline, . - debug_trampoline
  689. .type breakpoint_failed, @function
  690. breakpoint_failed:
  691. /*
  692. * Something went wrong. Perhaps the debug hardware isn't
  693. * enabled?
  694. */
  695. lda.w r12, msg_breakpoint_failed
  696. mov r11, sp
  697. mov r10, 9 /* SIGKILL */
  698. call die
  699. 1: rjmp 1b
  700. msg_breakpoint_failed:
  701. .asciz "Failed to enter Debug Mode"