entry-avr32b.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. /*
  9. * This file contains the low-level entry-points into the kernel, that is,
  10. * exception handlers, debug trap handlers, interrupt handlers and the
  11. * system call handler.
  12. */
  13. #include <linux/errno.h>
  14. #include <asm/asm.h>
  15. #include <asm/hardirq.h>
  16. #include <asm/irq.h>
  17. #include <asm/ocd.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/sysreg.h>
  22. #include <asm/thread_info.h>
  23. #include <asm/unistd.h>
  24. #ifdef CONFIG_PREEMPT
  25. # define preempt_stop mask_interrupts
  26. #else
  27. # define preempt_stop
  28. # define fault_resume_kernel fault_restore_all
  29. #endif
  30. #define __MASK(x) ((1 << (x)) - 1)
  31. #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
  32. (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
  33. .section .ex.text,"ax",@progbits
  34. .align 2
  35. exception_vectors:
  36. bral handle_critical
  37. .align 2
  38. bral handle_critical
  39. .align 2
  40. bral do_bus_error_write
  41. .align 2
  42. bral do_bus_error_read
  43. .align 2
  44. bral do_nmi_ll
  45. .align 2
  46. bral handle_address_fault
  47. .align 2
  48. bral handle_protection_fault
  49. .align 2
  50. bral handle_debug
  51. .align 2
  52. bral do_illegal_opcode_ll
  53. .align 2
  54. bral do_illegal_opcode_ll
  55. .align 2
  56. bral do_illegal_opcode_ll
  57. .align 2
  58. bral do_fpe_ll
  59. .align 2
  60. bral do_illegal_opcode_ll
  61. .align 2
  62. bral handle_address_fault
  63. .align 2
  64. bral handle_address_fault
  65. .align 2
  66. bral handle_protection_fault
  67. .align 2
  68. bral handle_protection_fault
  69. .align 2
  70. bral do_dtlb_modified
  71. /*
  72. * r0 : PGD/PT/PTE
  73. * r1 : Offending address
  74. * r2 : Scratch register
  75. * r3 : Cause (5, 12 or 13)
  76. */
  77. #define tlbmiss_save pushm r0-r3
  78. #define tlbmiss_restore popm r0-r3
  79. .section .tlbx.ex.text,"ax",@progbits
  80. .global itlb_miss
  81. itlb_miss:
  82. tlbmiss_save
  83. rjmp tlb_miss_common
  84. .section .tlbr.ex.text,"ax",@progbits
  85. dtlb_miss_read:
  86. tlbmiss_save
  87. rjmp tlb_miss_common
  88. .section .tlbw.ex.text,"ax",@progbits
  89. dtlb_miss_write:
  90. tlbmiss_save
  91. .global tlb_miss_common
  92. tlb_miss_common:
  93. mfsr r0, SYSREG_PTBR
  94. mfsr r1, SYSREG_TLBEAR
  95. /* Is it the vmalloc space? */
  96. bld r1, 31
  97. brcs handle_vmalloc_miss
  98. /* First level lookup */
  99. pgtbl_lookup:
  100. lsr r2, r1, PGDIR_SHIFT
  101. ld.w r0, r0[r2 << 2]
  102. bld r0, _PAGE_BIT_PRESENT
  103. brcc page_table_not_present
  104. /* TODO: Check access rights on page table if necessary */
  105. /* Translate to virtual address in P1. */
  106. andl r0, 0xf000
  107. sbr r0, 31
  108. /* Second level lookup */
  109. lsl r1, (32 - PGDIR_SHIFT)
  110. lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
  111. add r2, r0, r1 << 2
  112. ld.w r1, r2[0]
  113. bld r1, _PAGE_BIT_PRESENT
  114. brcc page_not_present
  115. /* Mark the page as accessed */
  116. sbr r1, _PAGE_BIT_ACCESSED
  117. st.w r2[0], r1
  118. /* Drop software flags */
  119. andl r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
  120. mtsr SYSREG_TLBELO, r1
  121. /* Figure out which entry we want to replace */
  122. mfsr r0, SYSREG_TLBARLO
  123. clz r2, r0
  124. brcc 1f
  125. mov r1, -1 /* All entries have been accessed, */
  126. mtsr SYSREG_TLBARLO, r1 /* so reset TLBAR */
  127. mov r2, 0 /* and start at 0 */
  128. 1: mfsr r1, SYSREG_MMUCR
  129. lsl r2, 14
  130. andl r1, 0x3fff, COH
  131. or r1, r2
  132. mtsr SYSREG_MMUCR, r1
  133. tlbw
  134. tlbmiss_restore
  135. rete
  136. handle_vmalloc_miss:
  137. /* Simply do the lookup in init's page table */
  138. mov r0, lo(swapper_pg_dir)
  139. orh r0, hi(swapper_pg_dir)
  140. rjmp pgtbl_lookup
  141. /* --- System Call --- */
  142. .section .scall.text,"ax",@progbits
  143. system_call:
  144. pushm r12 /* r12_orig */
  145. stmts --sp, r0-lr
  146. zero_fp
  147. mfsr r0, SYSREG_RAR_SUP
  148. mfsr r1, SYSREG_RSR_SUP
  149. stm --sp, r0-r1
  150. /* check for syscall tracing */
  151. get_thread_info r0
  152. ld.w r1, r0[TI_flags]
  153. bld r1, TIF_SYSCALL_TRACE
  154. brcs syscall_trace_enter
  155. syscall_trace_cont:
  156. cp.w r8, NR_syscalls
  157. brhs syscall_badsys
  158. lddpc lr, syscall_table_addr
  159. ld.w lr, lr[r8 << 2]
  160. mov r8, r5 /* 5th argument (6th is pushed by stub) */
  161. icall lr
  162. .global syscall_return
  163. syscall_return:
  164. get_thread_info r0
  165. mask_interrupts /* make sure we don't miss an interrupt
  166. setting need_resched or sigpending
  167. between sampling and the rets */
  168. /* Store the return value so that the correct value is loaded below */
  169. stdsp sp[REG_R12], r12
  170. ld.w r1, r0[TI_flags]
  171. andl r1, _TIF_ALLWORK_MASK, COH
  172. brne syscall_exit_work
  173. syscall_exit_cont:
  174. popm r8-r9
  175. mtsr SYSREG_RAR_SUP, r8
  176. mtsr SYSREG_RSR_SUP, r9
  177. ldmts sp++, r0-lr
  178. sub sp, -4 /* r12_orig */
  179. rets
  180. .align 2
  181. syscall_table_addr:
  182. .long sys_call_table
  183. syscall_badsys:
  184. mov r12, -ENOSYS
  185. rjmp syscall_return
  186. .global ret_from_fork
  187. ret_from_fork:
  188. rcall schedule_tail
  189. /* check for syscall tracing */
  190. get_thread_info r0
  191. ld.w r1, r0[TI_flags]
  192. andl r1, _TIF_ALLWORK_MASK, COH
  193. brne syscall_exit_work
  194. rjmp syscall_exit_cont
  195. syscall_trace_enter:
  196. pushm r8-r12
  197. rcall syscall_trace
  198. popm r8-r12
  199. rjmp syscall_trace_cont
  200. syscall_exit_work:
  201. bld r1, TIF_SYSCALL_TRACE
  202. brcc 1f
  203. unmask_interrupts
  204. rcall syscall_trace
  205. mask_interrupts
  206. ld.w r1, r0[TI_flags]
  207. 1: bld r1, TIF_NEED_RESCHED
  208. brcc 2f
  209. unmask_interrupts
  210. rcall schedule
  211. mask_interrupts
  212. ld.w r1, r0[TI_flags]
  213. rjmp 1b
  214. 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  215. tst r1, r2
  216. breq 3f
  217. unmask_interrupts
  218. mov r12, sp
  219. mov r11, r0
  220. rcall do_notify_resume
  221. mask_interrupts
  222. ld.w r1, r0[TI_flags]
  223. rjmp 1b
  224. 3: bld r1, TIF_BREAKPOINT
  225. brcc syscall_exit_cont
  226. mfsr r3, SYSREG_TLBEHI
  227. lddsp r2, sp[REG_PC]
  228. andl r3, 0xff, COH
  229. lsl r3, 1
  230. sbr r3, 30
  231. sbr r3, 0
  232. mtdr DBGREG_BWA2A, r2
  233. mtdr DBGREG_BWC2A, r3
  234. rjmp syscall_exit_cont
  235. /* The slow path of the TLB miss handler */
  236. page_table_not_present:
  237. page_not_present:
  238. tlbmiss_restore
  239. sub sp, 4
  240. stmts --sp, r0-lr
  241. rcall save_full_context_ex
  242. mfsr r12, SYSREG_ECR
  243. mov r11, sp
  244. rcall do_page_fault
  245. rjmp ret_from_exception
  246. /* This function expects to find offending PC in SYSREG_RAR_EX */
  247. save_full_context_ex:
  248. mfsr r8, SYSREG_RSR_EX
  249. mov r12, r8
  250. andh r8, (MODE_MASK >> 16), COH
  251. mfsr r11, SYSREG_RAR_EX
  252. brne 2f
  253. 1: pushm r11, r12 /* PC and SR */
  254. unmask_exceptions
  255. ret r12
  256. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  257. stdsp sp[4], r10 /* replace saved SP */
  258. rjmp 1b
  259. /* Low-level exception handlers */
  260. handle_critical:
  261. pushm r12
  262. pushm r0-r12
  263. rcall save_full_context_ex
  264. mfsr r12, SYSREG_ECR
  265. mov r11, sp
  266. rcall do_critical_exception
  267. /* We should never get here... */
  268. bad_return:
  269. sub r12, pc, (. - 1f)
  270. bral panic
  271. .align 2
  272. 1: .asciz "Return from critical exception!"
  273. .align 1
  274. do_bus_error_write:
  275. sub sp, 4
  276. stmts --sp, r0-lr
  277. rcall save_full_context_ex
  278. mov r11, 1
  279. rjmp 1f
  280. do_bus_error_read:
  281. sub sp, 4
  282. stmts --sp, r0-lr
  283. rcall save_full_context_ex
  284. mov r11, 0
  285. 1: mfsr r12, SYSREG_BEAR
  286. mov r10, sp
  287. rcall do_bus_error
  288. rjmp ret_from_exception
  289. .align 1
  290. do_nmi_ll:
  291. sub sp, 4
  292. stmts --sp, r0-lr
  293. mfsr r9, SYSREG_RSR_NMI
  294. mfsr r8, SYSREG_RAR_NMI
  295. bfextu r0, r9, MODE_SHIFT, 3
  296. brne 2f
  297. 1: pushm r8, r9 /* PC and SR */
  298. mfsr r12, SYSREG_ECR
  299. mov r11, sp
  300. rcall do_nmi
  301. popm r8-r9
  302. mtsr SYSREG_RAR_NMI, r8
  303. tst r0, r0
  304. mtsr SYSREG_RSR_NMI, r9
  305. brne 3f
  306. ldmts sp++, r0-lr
  307. sub sp, -4 /* skip r12_orig */
  308. rete
  309. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  310. stdsp sp[4], r10 /* replace saved SP */
  311. rjmp 1b
  312. 3: popm lr
  313. sub sp, -4 /* skip sp */
  314. popm r0-r12
  315. sub sp, -4 /* skip r12_orig */
  316. rete
  317. handle_address_fault:
  318. sub sp, 4
  319. stmts --sp, r0-lr
  320. rcall save_full_context_ex
  321. mfsr r12, SYSREG_ECR
  322. mov r11, sp
  323. rcall do_address_exception
  324. rjmp ret_from_exception
  325. handle_protection_fault:
  326. sub sp, 4
  327. stmts --sp, r0-lr
  328. rcall save_full_context_ex
  329. mfsr r12, SYSREG_ECR
  330. mov r11, sp
  331. rcall do_page_fault
  332. rjmp ret_from_exception
  333. .align 1
  334. do_illegal_opcode_ll:
  335. sub sp, 4
  336. stmts --sp, r0-lr
  337. rcall save_full_context_ex
  338. mfsr r12, SYSREG_ECR
  339. mov r11, sp
  340. rcall do_illegal_opcode
  341. rjmp ret_from_exception
  342. do_dtlb_modified:
  343. pushm r0-r3
  344. mfsr r1, SYSREG_TLBEAR
  345. mfsr r0, SYSREG_PTBR
  346. lsr r2, r1, PGDIR_SHIFT
  347. ld.w r0, r0[r2 << 2]
  348. lsl r1, (32 - PGDIR_SHIFT)
  349. lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
  350. /* Translate to virtual address in P1 */
  351. andl r0, 0xf000
  352. sbr r0, 31
  353. add r2, r0, r1 << 2
  354. ld.w r3, r2[0]
  355. sbr r3, _PAGE_BIT_DIRTY
  356. mov r0, r3
  357. st.w r2[0], r3
  358. /* The page table is up-to-date. Update the TLB entry as well */
  359. andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
  360. mtsr SYSREG_TLBELO, r0
  361. /* MMUCR[DRP] is updated automatically, so let's go... */
  362. tlbw
  363. popm r0-r3
  364. rete
  365. do_fpe_ll:
  366. sub sp, 4
  367. stmts --sp, r0-lr
  368. rcall save_full_context_ex
  369. unmask_interrupts
  370. mov r12, 26
  371. mov r11, sp
  372. rcall do_fpe
  373. rjmp ret_from_exception
  374. ret_from_exception:
  375. mask_interrupts
  376. lddsp r4, sp[REG_SR]
  377. andh r4, (MODE_MASK >> 16), COH
  378. brne fault_resume_kernel
  379. get_thread_info r0
  380. ld.w r1, r0[TI_flags]
  381. andl r1, _TIF_WORK_MASK, COH
  382. brne fault_exit_work
  383. fault_resume_user:
  384. popm r8-r9
  385. mask_exceptions
  386. mtsr SYSREG_RAR_EX, r8
  387. mtsr SYSREG_RSR_EX, r9
  388. ldmts sp++, r0-lr
  389. sub sp, -4
  390. rete
  391. fault_resume_kernel:
  392. #ifdef CONFIG_PREEMPT
  393. get_thread_info r0
  394. ld.w r2, r0[TI_preempt_count]
  395. cp.w r2, 0
  396. brne 1f
  397. ld.w r1, r0[TI_flags]
  398. bld r1, TIF_NEED_RESCHED
  399. brcc 1f
  400. lddsp r4, sp[REG_SR]
  401. bld r4, SYSREG_GM_OFFSET
  402. brcs 1f
  403. rcall preempt_schedule_irq
  404. 1:
  405. #endif
  406. popm r8-r9
  407. mask_exceptions
  408. mfsr r1, SYSREG_SR
  409. mtsr SYSREG_RAR_EX, r8
  410. mtsr SYSREG_RSR_EX, r9
  411. popm lr
  412. sub sp, -4 /* ignore SP */
  413. popm r0-r12
  414. sub sp, -4 /* ignore r12_orig */
  415. rete
  416. irq_exit_work:
  417. /* Switch to exception mode so that we can share the same code. */
  418. mfsr r8, SYSREG_SR
  419. cbr r8, SYSREG_M0_OFFSET
  420. orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
  421. mtsr SYSREG_SR, r8
  422. sub pc, -2
  423. get_thread_info r0
  424. ld.w r1, r0[TI_flags]
  425. fault_exit_work:
  426. bld r1, TIF_NEED_RESCHED
  427. brcc 1f
  428. unmask_interrupts
  429. rcall schedule
  430. mask_interrupts
  431. ld.w r1, r0[TI_flags]
  432. rjmp fault_exit_work
  433. 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  434. tst r1, r2
  435. breq 2f
  436. unmask_interrupts
  437. mov r12, sp
  438. mov r11, r0
  439. rcall do_notify_resume
  440. mask_interrupts
  441. ld.w r1, r0[TI_flags]
  442. rjmp fault_exit_work
  443. 2: bld r1, TIF_BREAKPOINT
  444. brcc fault_resume_user
  445. mfsr r3, SYSREG_TLBEHI
  446. lddsp r2, sp[REG_PC]
  447. andl r3, 0xff, COH
  448. lsl r3, 1
  449. sbr r3, 30
  450. sbr r3, 0
  451. mtdr DBGREG_BWA2A, r2
  452. mtdr DBGREG_BWC2A, r3
  453. rjmp fault_resume_user
  454. /* If we get a debug trap from privileged context we end up here */
  455. handle_debug_priv:
  456. /* Fix up LR and SP in regs. r11 contains the mode we came from */
  457. mfsr r8, SYSREG_SR
  458. mov r9, r8
  459. andh r8, hi(~MODE_MASK)
  460. or r8, r11
  461. mtsr SYSREG_SR, r8
  462. sub pc, -2
  463. stdsp sp[REG_LR], lr
  464. mtsr SYSREG_SR, r9
  465. sub pc, -2
  466. sub r10, sp, -FRAME_SIZE_FULL
  467. stdsp sp[REG_SP], r10
  468. mov r12, sp
  469. rcall do_debug_priv
  470. /* Now, put everything back */
  471. ssrf SR_EM_BIT
  472. popm r10, r11
  473. mtsr SYSREG_RAR_DBG, r10
  474. mtsr SYSREG_RSR_DBG, r11
  475. mfsr r8, SYSREG_SR
  476. mov r9, r8
  477. andh r8, hi(~MODE_MASK)
  478. andh r11, hi(MODE_MASK)
  479. or r8, r11
  480. mtsr SYSREG_SR, r8
  481. sub pc, -2
  482. popm lr
  483. mtsr SYSREG_SR, r9
  484. sub pc, -2
  485. sub sp, -4 /* skip SP */
  486. popm r0-r12
  487. sub sp, -4
  488. retd
  489. /*
  490. * At this point, everything is masked, that is, interrupts,
  491. * exceptions and debugging traps. We might get called from
  492. * interrupt or exception context in some rare cases, but this
  493. * will be taken care of by do_debug(), so we're not going to
  494. * do a 100% correct context save here.
  495. */
  496. handle_debug:
  497. sub sp, 4 /* r12_orig */
  498. stmts --sp, r0-lr
  499. mfsr r10, SYSREG_RAR_DBG
  500. mfsr r11, SYSREG_RSR_DBG
  501. unmask_exceptions
  502. pushm r10,r11
  503. andh r11, (MODE_MASK >> 16), COH
  504. brne handle_debug_priv
  505. mov r12, sp
  506. rcall do_debug
  507. lddsp r10, sp[REG_SR]
  508. andh r10, (MODE_MASK >> 16), COH
  509. breq debug_resume_user
  510. debug_restore_all:
  511. popm r10,r11
  512. mask_exceptions
  513. mtsr SYSREG_RSR_DBG, r11
  514. mtsr SYSREG_RAR_DBG, r10
  515. ldmts sp++, r0-lr
  516. sub sp, -4
  517. retd
  518. debug_resume_user:
  519. get_thread_info r0
  520. mask_interrupts
  521. ld.w r1, r0[TI_flags]
  522. andl r1, _TIF_DBGWORK_MASK, COH
  523. breq debug_restore_all
  524. 1: bld r1, TIF_NEED_RESCHED
  525. brcc 2f
  526. unmask_interrupts
  527. rcall schedule
  528. mask_interrupts
  529. ld.w r1, r0[TI_flags]
  530. rjmp 1b
  531. 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  532. tst r1, r2
  533. breq 3f
  534. unmask_interrupts
  535. mov r12, sp
  536. mov r11, r0
  537. rcall do_notify_resume
  538. mask_interrupts
  539. ld.w r1, r0[TI_flags]
  540. rjmp 1b
  541. 3: bld r1, TIF_SINGLE_STEP
  542. brcc debug_restore_all
  543. mfdr r2, DBGREG_DC
  544. sbr r2, DC_SS_BIT
  545. mtdr DBGREG_DC, r2
  546. rjmp debug_restore_all
  547. .set rsr_int0, SYSREG_RSR_INT0
  548. .set rsr_int1, SYSREG_RSR_INT1
  549. .set rsr_int2, SYSREG_RSR_INT2
  550. .set rsr_int3, SYSREG_RSR_INT3
  551. .set rar_int0, SYSREG_RAR_INT0
  552. .set rar_int1, SYSREG_RAR_INT1
  553. .set rar_int2, SYSREG_RAR_INT2
  554. .set rar_int3, SYSREG_RAR_INT3
  555. .macro IRQ_LEVEL level
  556. .type irq_level\level, @function
  557. irq_level\level:
  558. sub sp, 4 /* r12_orig */
  559. stmts --sp,r0-lr
  560. mfsr r8, rar_int\level
  561. mfsr r9, rsr_int\level
  562. pushm r8-r9
  563. mov r11, sp
  564. mov r12, \level
  565. rcall do_IRQ
  566. lddsp r4, sp[REG_SR]
  567. bfextu r4, r4, SYSREG_M0_OFFSET, 3
  568. cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
  569. breq 2f
  570. cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
  571. #ifdef CONFIG_PREEMPT
  572. brne 3f
  573. #else
  574. brne 1f
  575. #endif
  576. get_thread_info r0
  577. ld.w r1, r0[TI_flags]
  578. andl r1, _TIF_WORK_MASK, COH
  579. brne irq_exit_work
  580. 1: popm r8-r9
  581. mtsr rar_int\level, r8
  582. mtsr rsr_int\level, r9
  583. ldmts sp++,r0-lr
  584. sub sp, -4 /* ignore r12_orig */
  585. rete
  586. 2: get_thread_info r0
  587. ld.w r1, r0[TI_flags]
  588. bld r1, TIF_CPU_GOING_TO_SLEEP
  589. #ifdef CONFIG_PREEMPT
  590. brcc 3f
  591. #else
  592. brcc 1b
  593. #endif
  594. sub r1, pc, . - cpu_idle_skip_sleep
  595. stdsp sp[REG_PC], r1
  596. #ifdef CONFIG_PREEMPT
  597. 3: get_thread_info r0
  598. ld.w r2, r0[TI_preempt_count]
  599. cp.w r2, 0
  600. brne 1b
  601. ld.w r1, r0[TI_flags]
  602. bld r1, TIF_NEED_RESCHED
  603. brcc 1b
  604. lddsp r4, sp[REG_SR]
  605. bld r4, SYSREG_GM_OFFSET
  606. brcs 1b
  607. rcall preempt_schedule_irq
  608. #endif
  609. rjmp 1b
  610. .endm
  611. .section .irq.text,"ax",@progbits
  612. .global cpu_idle_sleep
  613. cpu_idle_sleep:
  614. mask_interrupts
  615. get_thread_info r8
  616. ld.w r9, r8[TI_flags]
  617. bld r9, TIF_NEED_RESCHED
  618. brcs cpu_idle_enable_int_and_exit
  619. sbr r9, TIF_CPU_GOING_TO_SLEEP
  620. st.w r8[TI_flags], r9
  621. unmask_interrupts
  622. sleep 0
  623. cpu_idle_skip_sleep:
  624. mask_interrupts
  625. ld.w r9, r8[TI_flags]
  626. cbr r9, TIF_CPU_GOING_TO_SLEEP
  627. st.w r8[TI_flags], r9
  628. cpu_idle_enable_int_and_exit:
  629. unmask_interrupts
  630. retal r12
  631. .global irq_level0
  632. .global irq_level1
  633. .global irq_level2
  634. .global irq_level3
  635. IRQ_LEVEL 0
  636. IRQ_LEVEL 1
  637. IRQ_LEVEL 2
  638. IRQ_LEVEL 3