entry-avr32b.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. /*
  9. * This file contains the low-level entry-points into the kernel, that is,
  10. * exception handlers, debug trap handlers, interrupt handlers and the
  11. * system call handler.
  12. */
  13. #include <linux/errno.h>
  14. #include <asm/asm.h>
  15. #include <asm/hardirq.h>
  16. #include <asm/irq.h>
  17. #include <asm/ocd.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/sysreg.h>
  22. #include <asm/thread_info.h>
  23. #include <asm/unistd.h>
  24. #ifdef CONFIG_PREEMPT
  25. # define preempt_stop mask_interrupts
  26. #else
  27. # define preempt_stop
  28. # define fault_resume_kernel fault_restore_all
  29. #endif
  30. #define __MASK(x) ((1 << (x)) - 1)
  31. #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
  32. (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
  33. .section .ex.text,"ax",@progbits
  34. .align 2
  35. exception_vectors:
  36. bral handle_critical
  37. .align 2
  38. bral handle_critical
  39. .align 2
  40. bral do_bus_error_write
  41. .align 2
  42. bral do_bus_error_read
  43. .align 2
  44. bral do_nmi_ll
  45. .align 2
  46. bral handle_address_fault
  47. .align 2
  48. bral handle_protection_fault
  49. .align 2
  50. bral handle_debug
  51. .align 2
  52. bral do_illegal_opcode_ll
  53. .align 2
  54. bral do_illegal_opcode_ll
  55. .align 2
  56. bral do_illegal_opcode_ll
  57. .align 2
  58. bral do_fpe_ll
  59. .align 2
  60. bral do_illegal_opcode_ll
  61. .align 2
  62. bral handle_address_fault
  63. .align 2
  64. bral handle_address_fault
  65. .align 2
  66. bral handle_protection_fault
  67. .align 2
  68. bral handle_protection_fault
  69. .align 2
  70. bral do_dtlb_modified
  71. /*
  72. * r0 : PGD/PT/PTE
  73. * r1 : Offending address
  74. * r2 : Scratch register
  75. * r3 : Cause (5, 12 or 13)
  76. */
  77. #define tlbmiss_save pushm r0-r3
  78. #define tlbmiss_restore popm r0-r3
  79. .section .tlbx.ex.text,"ax",@progbits
  80. .global itlb_miss
  81. itlb_miss:
  82. tlbmiss_save
  83. rjmp tlb_miss_common
  84. .section .tlbr.ex.text,"ax",@progbits
  85. dtlb_miss_read:
  86. tlbmiss_save
  87. rjmp tlb_miss_common
  88. .section .tlbw.ex.text,"ax",@progbits
  89. dtlb_miss_write:
  90. tlbmiss_save
  91. .global tlb_miss_common
  92. tlb_miss_common:
  93. mfsr r0, SYSREG_TLBEAR
  94. mfsr r1, SYSREG_PTBR
  95. /* Is it the vmalloc space? */
  96. bld r0, 31
  97. brcs handle_vmalloc_miss
  98. /* First level lookup */
  99. pgtbl_lookup:
  100. lsr r2, r0, PGDIR_SHIFT
  101. ld.w r3, r1[r2 << 2]
  102. bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
  103. bld r3, _PAGE_BIT_PRESENT
  104. brcc page_table_not_present
  105. /* Translate to virtual address in P1. */
  106. andl r3, 0xf000
  107. sbr r3, 31
  108. /* Second level lookup */
  109. ld.w r2, r3[r1 << 2]
  110. mfsr r0, SYSREG_TLBARLO
  111. bld r2, _PAGE_BIT_PRESENT
  112. brcc page_not_present
  113. /* Mark the page as accessed */
  114. sbr r2, _PAGE_BIT_ACCESSED
  115. st.w r3[r1 << 2], r2
  116. /* Drop software flags */
  117. andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
  118. mtsr SYSREG_TLBELO, r2
  119. /* Figure out which entry we want to replace */
  120. mfsr r1, SYSREG_MMUCR
  121. clz r2, r0
  122. brcc 1f
  123. mov r3, -1 /* All entries have been accessed, */
  124. mov r2, 0 /* so start at 0 */
  125. mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
  126. 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
  127. mtsr SYSREG_MMUCR, r1
  128. tlbw
  129. tlbmiss_restore
  130. rete
  131. handle_vmalloc_miss:
  132. /* Simply do the lookup in init's page table */
  133. mov r1, lo(swapper_pg_dir)
  134. orh r1, hi(swapper_pg_dir)
  135. rjmp pgtbl_lookup
  136. /* --- System Call --- */
  137. .section .scall.text,"ax",@progbits
  138. system_call:
  139. #ifdef CONFIG_PREEMPT
  140. mask_interrupts
  141. #endif
  142. pushm r12 /* r12_orig */
  143. stmts --sp, r0-lr
  144. mfsr r0, SYSREG_RAR_SUP
  145. mfsr r1, SYSREG_RSR_SUP
  146. #ifdef CONFIG_PREEMPT
  147. unmask_interrupts
  148. #endif
  149. zero_fp
  150. stm --sp, r0-r1
  151. /* check for syscall tracing */
  152. get_thread_info r0
  153. ld.w r1, r0[TI_flags]
  154. bld r1, TIF_SYSCALL_TRACE
  155. brcs syscall_trace_enter
  156. syscall_trace_cont:
  157. cp.w r8, NR_syscalls
  158. brhs syscall_badsys
  159. lddpc lr, syscall_table_addr
  160. ld.w lr, lr[r8 << 2]
  161. mov r8, r5 /* 5th argument (6th is pushed by stub) */
  162. icall lr
  163. .global syscall_return
  164. syscall_return:
  165. get_thread_info r0
  166. mask_interrupts /* make sure we don't miss an interrupt
  167. setting need_resched or sigpending
  168. between sampling and the rets */
  169. /* Store the return value so that the correct value is loaded below */
  170. stdsp sp[REG_R12], r12
  171. ld.w r1, r0[TI_flags]
  172. andl r1, _TIF_ALLWORK_MASK, COH
  173. brne syscall_exit_work
  174. syscall_exit_cont:
  175. popm r8-r9
  176. mtsr SYSREG_RAR_SUP, r8
  177. mtsr SYSREG_RSR_SUP, r9
  178. ldmts sp++, r0-lr
  179. sub sp, -4 /* r12_orig */
  180. rets
  181. .align 2
  182. syscall_table_addr:
  183. .long sys_call_table
  184. syscall_badsys:
  185. mov r12, -ENOSYS
  186. rjmp syscall_return
  187. .global ret_from_fork
  188. ret_from_fork:
  189. rcall schedule_tail
  190. /* check for syscall tracing */
  191. get_thread_info r0
  192. ld.w r1, r0[TI_flags]
  193. andl r1, _TIF_ALLWORK_MASK, COH
  194. brne syscall_exit_work
  195. rjmp syscall_exit_cont
  196. syscall_trace_enter:
  197. pushm r8-r12
  198. rcall syscall_trace
  199. popm r8-r12
  200. rjmp syscall_trace_cont
  201. syscall_exit_work:
  202. bld r1, TIF_SYSCALL_TRACE
  203. brcc 1f
  204. unmask_interrupts
  205. rcall syscall_trace
  206. mask_interrupts
  207. ld.w r1, r0[TI_flags]
  208. 1: bld r1, TIF_NEED_RESCHED
  209. brcc 2f
  210. unmask_interrupts
  211. rcall schedule
  212. mask_interrupts
  213. ld.w r1, r0[TI_flags]
  214. rjmp 1b
  215. 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  216. tst r1, r2
  217. breq 3f
  218. unmask_interrupts
  219. mov r12, sp
  220. mov r11, r0
  221. rcall do_notify_resume
  222. mask_interrupts
  223. ld.w r1, r0[TI_flags]
  224. rjmp 1b
  225. 3: bld r1, TIF_BREAKPOINT
  226. brcc syscall_exit_cont
  227. rjmp enter_monitor_mode
  228. /* The slow path of the TLB miss handler */
  229. page_table_not_present:
  230. page_not_present:
  231. tlbmiss_restore
  232. sub sp, 4
  233. stmts --sp, r0-lr
  234. rcall save_full_context_ex
  235. mfsr r12, SYSREG_ECR
  236. mov r11, sp
  237. rcall do_page_fault
  238. rjmp ret_from_exception
  239. /* This function expects to find offending PC in SYSREG_RAR_EX */
  240. .type save_full_context_ex, @function
  241. .align 2
  242. save_full_context_ex:
  243. mfsr r11, SYSREG_RAR_EX
  244. sub r9, pc, . - debug_trampoline
  245. mfsr r8, SYSREG_RSR_EX
  246. cp.w r9, r11
  247. breq 3f
  248. mov r12, r8
  249. andh r8, (MODE_MASK >> 16), COH
  250. brne 2f
  251. 1: pushm r11, r12 /* PC and SR */
  252. unmask_exceptions
  253. ret r12
  254. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  255. stdsp sp[4], r10 /* replace saved SP */
  256. rjmp 1b
  257. /*
  258. * The debug handler set up a trampoline to make us
  259. * automatically enter monitor mode upon return, but since
  260. * we're saving the full context, we must assume that the
  261. * exception handler might want to alter the return address
  262. * and/or status register. So we need to restore the original
  263. * context and enter monitor mode manually after the exception
  264. * has been handled.
  265. */
  266. 3: get_thread_info r8
  267. ld.w r11, r8[TI_rar_saved]
  268. ld.w r12, r8[TI_rsr_saved]
  269. rjmp 1b
  270. .size save_full_context_ex, . - save_full_context_ex
  271. /* Low-level exception handlers */
  272. handle_critical:
  273. sub sp, 4
  274. stmts --sp, r0-lr
  275. rcall save_full_context_ex
  276. mfsr r12, SYSREG_ECR
  277. mov r11, sp
  278. rcall do_critical_exception
  279. /* We should never get here... */
  280. bad_return:
  281. sub r12, pc, (. - 1f)
  282. bral panic
  283. .align 2
  284. 1: .asciz "Return from critical exception!"
  285. .align 1
  286. do_bus_error_write:
  287. sub sp, 4
  288. stmts --sp, r0-lr
  289. rcall save_full_context_ex
  290. mov r11, 1
  291. rjmp 1f
  292. do_bus_error_read:
  293. sub sp, 4
  294. stmts --sp, r0-lr
  295. rcall save_full_context_ex
  296. mov r11, 0
  297. 1: mfsr r12, SYSREG_BEAR
  298. mov r10, sp
  299. rcall do_bus_error
  300. rjmp ret_from_exception
  301. .align 1
  302. do_nmi_ll:
  303. sub sp, 4
  304. stmts --sp, r0-lr
  305. mfsr r9, SYSREG_RSR_NMI
  306. mfsr r8, SYSREG_RAR_NMI
  307. bfextu r0, r9, MODE_SHIFT, 3
  308. brne 2f
  309. 1: pushm r8, r9 /* PC and SR */
  310. mfsr r12, SYSREG_ECR
  311. mov r11, sp
  312. rcall do_nmi
  313. popm r8-r9
  314. mtsr SYSREG_RAR_NMI, r8
  315. tst r0, r0
  316. mtsr SYSREG_RSR_NMI, r9
  317. brne 3f
  318. ldmts sp++, r0-lr
  319. sub sp, -4 /* skip r12_orig */
  320. rete
  321. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  322. stdsp sp[4], r10 /* replace saved SP */
  323. rjmp 1b
  324. 3: popm lr
  325. sub sp, -4 /* skip sp */
  326. popm r0-r12
  327. sub sp, -4 /* skip r12_orig */
  328. rete
  329. handle_address_fault:
  330. sub sp, 4
  331. stmts --sp, r0-lr
  332. rcall save_full_context_ex
  333. mfsr r12, SYSREG_ECR
  334. mov r11, sp
  335. rcall do_address_exception
  336. rjmp ret_from_exception
  337. handle_protection_fault:
  338. sub sp, 4
  339. stmts --sp, r0-lr
  340. rcall save_full_context_ex
  341. mfsr r12, SYSREG_ECR
  342. mov r11, sp
  343. rcall do_page_fault
  344. rjmp ret_from_exception
  345. .align 1
  346. do_illegal_opcode_ll:
  347. sub sp, 4
  348. stmts --sp, r0-lr
  349. rcall save_full_context_ex
  350. mfsr r12, SYSREG_ECR
  351. mov r11, sp
  352. rcall do_illegal_opcode
  353. rjmp ret_from_exception
  354. do_dtlb_modified:
  355. pushm r0-r3
  356. mfsr r1, SYSREG_TLBEAR
  357. mfsr r0, SYSREG_PTBR
  358. lsr r2, r1, PGDIR_SHIFT
  359. ld.w r0, r0[r2 << 2]
  360. lsl r1, (32 - PGDIR_SHIFT)
  361. lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
  362. /* Translate to virtual address in P1 */
  363. andl r0, 0xf000
  364. sbr r0, 31
  365. add r2, r0, r1 << 2
  366. ld.w r3, r2[0]
  367. sbr r3, _PAGE_BIT_DIRTY
  368. mov r0, r3
  369. st.w r2[0], r3
  370. /* The page table is up-to-date. Update the TLB entry as well */
  371. andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
  372. mtsr SYSREG_TLBELO, r0
  373. /* MMUCR[DRP] is updated automatically, so let's go... */
  374. tlbw
  375. popm r0-r3
  376. rete
  377. do_fpe_ll:
  378. sub sp, 4
  379. stmts --sp, r0-lr
  380. rcall save_full_context_ex
  381. unmask_interrupts
  382. mov r12, 26
  383. mov r11, sp
  384. rcall do_fpe
  385. rjmp ret_from_exception
  386. ret_from_exception:
  387. mask_interrupts
  388. lddsp r4, sp[REG_SR]
  389. andh r4, (MODE_MASK >> 16), COH
  390. brne fault_resume_kernel
  391. get_thread_info r0
  392. ld.w r1, r0[TI_flags]
  393. andl r1, _TIF_WORK_MASK, COH
  394. brne fault_exit_work
  395. fault_resume_user:
  396. popm r8-r9
  397. mask_exceptions
  398. mtsr SYSREG_RAR_EX, r8
  399. mtsr SYSREG_RSR_EX, r9
  400. ldmts sp++, r0-lr
  401. sub sp, -4
  402. rete
  403. fault_resume_kernel:
  404. #ifdef CONFIG_PREEMPT
  405. get_thread_info r0
  406. ld.w r2, r0[TI_preempt_count]
  407. cp.w r2, 0
  408. brne 1f
  409. ld.w r1, r0[TI_flags]
  410. bld r1, TIF_NEED_RESCHED
  411. brcc 1f
  412. lddsp r4, sp[REG_SR]
  413. bld r4, SYSREG_GM_OFFSET
  414. brcs 1f
  415. rcall preempt_schedule_irq
  416. 1:
  417. #endif
  418. popm r8-r9
  419. mask_exceptions
  420. mfsr r1, SYSREG_SR
  421. mtsr SYSREG_RAR_EX, r8
  422. mtsr SYSREG_RSR_EX, r9
  423. popm lr
  424. sub sp, -4 /* ignore SP */
  425. popm r0-r12
  426. sub sp, -4 /* ignore r12_orig */
  427. rete
  428. irq_exit_work:
  429. /* Switch to exception mode so that we can share the same code. */
  430. mfsr r8, SYSREG_SR
  431. cbr r8, SYSREG_M0_OFFSET
  432. orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
  433. mtsr SYSREG_SR, r8
  434. sub pc, -2
  435. get_thread_info r0
  436. ld.w r1, r0[TI_flags]
  437. fault_exit_work:
  438. bld r1, TIF_NEED_RESCHED
  439. brcc 1f
  440. unmask_interrupts
  441. rcall schedule
  442. mask_interrupts
  443. ld.w r1, r0[TI_flags]
  444. rjmp fault_exit_work
  445. 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  446. tst r1, r2
  447. breq 2f
  448. unmask_interrupts
  449. mov r12, sp
  450. mov r11, r0
  451. rcall do_notify_resume
  452. mask_interrupts
  453. ld.w r1, r0[TI_flags]
  454. rjmp fault_exit_work
  455. 2: bld r1, TIF_BREAKPOINT
  456. brcc fault_resume_user
  457. rjmp enter_monitor_mode
  458. .section .kprobes.text, "ax", @progbits
  459. .type handle_debug, @function
  460. handle_debug:
  461. sub sp, 4 /* r12_orig */
  462. stmts --sp, r0-lr
  463. mfsr r8, SYSREG_RAR_DBG
  464. mfsr r9, SYSREG_RSR_DBG
  465. unmask_exceptions
  466. pushm r8-r9
  467. bfextu r9, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  468. brne debug_fixup_regs
  469. .Ldebug_fixup_cont:
  470. #ifdef CONFIG_TRACE_IRQFLAGS
  471. rcall trace_hardirqs_off
  472. #endif
  473. mov r12, sp
  474. rcall do_debug
  475. mov sp, r12
  476. lddsp r2, sp[REG_SR]
  477. bfextu r3, r2, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  478. brne debug_resume_kernel
  479. get_thread_info r0
  480. ld.w r1, r0[TI_flags]
  481. mov r2, _TIF_DBGWORK_MASK
  482. tst r1, r2
  483. brne debug_exit_work
  484. bld r1, TIF_SINGLE_STEP
  485. brcc 1f
  486. mfdr r4, OCD_DC
  487. sbr r4, OCD_DC_SS_BIT
  488. mtdr OCD_DC, r4
  489. 1: popm r10,r11
  490. mask_exceptions
  491. mtsr SYSREG_RSR_DBG, r11
  492. mtsr SYSREG_RAR_DBG, r10
  493. #ifdef CONFIG_TRACE_IRQFLAGS
  494. rcall trace_hardirqs_on
  495. 1:
  496. #endif
  497. ldmts sp++, r0-lr
  498. sub sp, -4
  499. retd
  500. .size handle_debug, . - handle_debug
  501. /* Mode of the trapped context is in r9 */
  502. .type debug_fixup_regs, @function
  503. debug_fixup_regs:
  504. mfsr r8, SYSREG_SR
  505. mov r10, r8
  506. bfins r8, r9, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  507. mtsr SYSREG_SR, r8
  508. sub pc, -2
  509. stdsp sp[REG_LR], lr
  510. mtsr SYSREG_SR, r10
  511. sub pc, -2
  512. sub r8, sp, -FRAME_SIZE_FULL
  513. stdsp sp[REG_SP], r8
  514. rjmp .Ldebug_fixup_cont
  515. .size debug_fixup_regs, . - debug_fixup_regs
  516. .type debug_resume_kernel, @function
  517. debug_resume_kernel:
  518. mask_exceptions
  519. popm r10, r11
  520. mtsr SYSREG_RAR_DBG, r10
  521. mtsr SYSREG_RSR_DBG, r11
  522. #ifdef CONFIG_TRACE_IRQFLAGS
  523. bld r11, SYSREG_GM_OFFSET
  524. brcc 1f
  525. rcall trace_hardirqs_on
  526. 1:
  527. #endif
  528. mfsr r2, SYSREG_SR
  529. mov r1, r2
  530. bfins r2, r3, SYSREG_MODE_OFFSET, SYSREG_MODE_SIZE
  531. mtsr SYSREG_SR, r2
  532. sub pc, -2
  533. popm lr
  534. mtsr SYSREG_SR, r1
  535. sub pc, -2
  536. sub sp, -4 /* skip SP */
  537. popm r0-r12
  538. sub sp, -4
  539. retd
  540. .size debug_resume_kernel, . - debug_resume_kernel
  541. .type debug_exit_work, @function
  542. debug_exit_work:
  543. /*
  544. * We must return from Monitor Mode using a retd, and we must
  545. * not schedule since that involves the D bit in SR getting
  546. * cleared by something other than the debug hardware. This
  547. * may cause undefined behaviour according to the Architecture
  548. * manual.
  549. *
  550. * So we fix up the return address and status and return to a
  551. * stub below in Exception mode. From there, we can follow the
  552. * normal exception return path.
  553. *
  554. * The real return address and status registers are stored on
  555. * the stack in the way the exception return path understands,
  556. * so no need to fix anything up there.
  557. */
  558. sub r8, pc, . - fault_exit_work
  559. mtsr SYSREG_RAR_DBG, r8
  560. mov r9, 0
  561. orh r9, hi(SR_EM | SR_GM | MODE_EXCEPTION)
  562. mtsr SYSREG_RSR_DBG, r9
  563. sub pc, -2
  564. retd
  565. .size debug_exit_work, . - debug_exit_work
  566. .set rsr_int0, SYSREG_RSR_INT0
  567. .set rsr_int1, SYSREG_RSR_INT1
  568. .set rsr_int2, SYSREG_RSR_INT2
  569. .set rsr_int3, SYSREG_RSR_INT3
  570. .set rar_int0, SYSREG_RAR_INT0
  571. .set rar_int1, SYSREG_RAR_INT1
  572. .set rar_int2, SYSREG_RAR_INT2
  573. .set rar_int3, SYSREG_RAR_INT3
  574. .macro IRQ_LEVEL level
  575. .type irq_level\level, @function
  576. irq_level\level:
  577. sub sp, 4 /* r12_orig */
  578. stmts --sp,r0-lr
  579. mfsr r8, rar_int\level
  580. mfsr r9, rsr_int\level
  581. #ifdef CONFIG_PREEMPT
  582. sub r11, pc, (. - system_call)
  583. cp.w r11, r8
  584. breq 4f
  585. #endif
  586. pushm r8-r9
  587. mov r11, sp
  588. mov r12, \level
  589. rcall do_IRQ
  590. lddsp r4, sp[REG_SR]
  591. bfextu r4, r4, SYSREG_M0_OFFSET, 3
  592. cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
  593. breq 2f
  594. cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
  595. #ifdef CONFIG_PREEMPT
  596. brne 3f
  597. #else
  598. brne 1f
  599. #endif
  600. get_thread_info r0
  601. ld.w r1, r0[TI_flags]
  602. andl r1, _TIF_WORK_MASK, COH
  603. brne irq_exit_work
  604. 1:
  605. #ifdef CONFIG_TRACE_IRQFLAGS
  606. rcall trace_hardirqs_on
  607. #endif
  608. popm r8-r9
  609. mtsr rar_int\level, r8
  610. mtsr rsr_int\level, r9
  611. ldmts sp++,r0-lr
  612. sub sp, -4 /* ignore r12_orig */
  613. rete
  614. #ifdef CONFIG_PREEMPT
  615. 4: mask_interrupts
  616. mfsr r8, rsr_int\level
  617. sbr r8, 16
  618. mtsr rsr_int\level, r8
  619. ldmts sp++, r0-lr
  620. sub sp, -4 /* ignore r12_orig */
  621. rete
  622. #endif
  623. 2: get_thread_info r0
  624. ld.w r1, r0[TI_flags]
  625. bld r1, TIF_CPU_GOING_TO_SLEEP
  626. #ifdef CONFIG_PREEMPT
  627. brcc 3f
  628. #else
  629. brcc 1b
  630. #endif
  631. sub r1, pc, . - cpu_idle_skip_sleep
  632. stdsp sp[REG_PC], r1
  633. #ifdef CONFIG_PREEMPT
  634. 3: get_thread_info r0
  635. ld.w r2, r0[TI_preempt_count]
  636. cp.w r2, 0
  637. brne 1b
  638. ld.w r1, r0[TI_flags]
  639. bld r1, TIF_NEED_RESCHED
  640. brcc 1b
  641. lddsp r4, sp[REG_SR]
  642. bld r4, SYSREG_GM_OFFSET
  643. brcs 1b
  644. rcall preempt_schedule_irq
  645. #endif
  646. rjmp 1b
  647. .endm
  648. .section .irq.text,"ax",@progbits
  649. .global cpu_idle_sleep
  650. cpu_idle_sleep:
  651. mask_interrupts
  652. get_thread_info r8
  653. ld.w r9, r8[TI_flags]
  654. bld r9, TIF_NEED_RESCHED
  655. brcs cpu_idle_enable_int_and_exit
  656. sbr r9, TIF_CPU_GOING_TO_SLEEP
  657. st.w r8[TI_flags], r9
  658. unmask_interrupts
  659. sleep 0
  660. cpu_idle_skip_sleep:
  661. mask_interrupts
  662. ld.w r9, r8[TI_flags]
  663. cbr r9, TIF_CPU_GOING_TO_SLEEP
  664. st.w r8[TI_flags], r9
  665. cpu_idle_enable_int_and_exit:
  666. unmask_interrupts
  667. retal r12
  668. .global irq_level0
  669. .global irq_level1
  670. .global irq_level2
  671. .global irq_level3
  672. IRQ_LEVEL 0
  673. IRQ_LEVEL 1
  674. IRQ_LEVEL 2
  675. IRQ_LEVEL 3
  676. .section .kprobes.text, "ax", @progbits
  677. .type enter_monitor_mode, @function
  678. enter_monitor_mode:
  679. /*
  680. * We need to enter monitor mode to do a single step. The
  681. * monitor code will alter the return address so that we
  682. * return directly to the user instead of returning here.
  683. */
  684. breakpoint
  685. rjmp breakpoint_failed
  686. .size enter_monitor_mode, . - enter_monitor_mode
  687. .type debug_trampoline, @function
  688. .global debug_trampoline
  689. debug_trampoline:
  690. /*
  691. * Save the registers on the stack so that the monitor code
  692. * can find them easily.
  693. */
  694. sub sp, 4 /* r12_orig */
  695. stmts --sp, r0-lr
  696. get_thread_info r0
  697. ld.w r8, r0[TI_rar_saved]
  698. ld.w r9, r0[TI_rsr_saved]
  699. pushm r8-r9
  700. /*
  701. * The monitor code will alter the return address so we don't
  702. * return here.
  703. */
  704. breakpoint
  705. rjmp breakpoint_failed
  706. .size debug_trampoline, . - debug_trampoline
  707. .type breakpoint_failed, @function
  708. breakpoint_failed:
  709. /*
  710. * Something went wrong. Perhaps the debug hardware isn't
  711. * enabled?
  712. */
  713. lda.w r12, msg_breakpoint_failed
  714. mov r11, sp
  715. mov r10, 9 /* SIGKILL */
  716. call die
  717. 1: rjmp 1b
  718. msg_breakpoint_failed:
  719. .asciz "Failed to enter Debug Mode"