entry-avr32b.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. /*
  2. * Copyright (C) 2004-2006 Atmel Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. /*
  9. * This file contains the low-level entry-points into the kernel, that is,
  10. * exception handlers, debug trap handlers, interrupt handlers and the
  11. * system call handler.
  12. */
  13. #include <linux/errno.h>
  14. #include <asm/asm.h>
  15. #include <asm/hardirq.h>
  16. #include <asm/irq.h>
  17. #include <asm/ocd.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/sysreg.h>
  22. #include <asm/thread_info.h>
  23. #include <asm/unistd.h>
  24. #ifdef CONFIG_PREEMPT
  25. # define preempt_stop mask_interrupts
  26. #else
  27. # define preempt_stop
  28. # define fault_resume_kernel fault_restore_all
  29. #endif
  30. #define __MASK(x) ((1 << (x)) - 1)
  31. #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
  32. (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
  33. .section .ex.text,"ax",@progbits
  34. .align 2
  35. exception_vectors:
  36. bral handle_critical
  37. .align 2
  38. bral handle_critical
  39. .align 2
  40. bral do_bus_error_write
  41. .align 2
  42. bral do_bus_error_read
  43. .align 2
  44. bral do_nmi_ll
  45. .align 2
  46. bral handle_address_fault
  47. .align 2
  48. bral handle_protection_fault
  49. .align 2
  50. bral handle_debug
  51. .align 2
  52. bral do_illegal_opcode_ll
  53. .align 2
  54. bral do_illegal_opcode_ll
  55. .align 2
  56. bral do_illegal_opcode_ll
  57. .align 2
  58. bral do_fpe_ll
  59. .align 2
  60. bral do_illegal_opcode_ll
  61. .align 2
  62. bral handle_address_fault
  63. .align 2
  64. bral handle_address_fault
  65. .align 2
  66. bral handle_protection_fault
  67. .align 2
  68. bral handle_protection_fault
  69. .align 2
  70. bral do_dtlb_modified
  71. /*
  72. * r0 : PGD/PT/PTE
  73. * r1 : Offending address
  74. * r2 : Scratch register
  75. * r3 : Cause (5, 12 or 13)
  76. */
  77. #define tlbmiss_save pushm r0-r3
  78. #define tlbmiss_restore popm r0-r3
  79. .section .tlbx.ex.text,"ax",@progbits
  80. .global itlb_miss
  81. itlb_miss:
  82. tlbmiss_save
  83. rjmp tlb_miss_common
  84. .section .tlbr.ex.text,"ax",@progbits
  85. dtlb_miss_read:
  86. tlbmiss_save
  87. rjmp tlb_miss_common
  88. .section .tlbw.ex.text,"ax",@progbits
  89. dtlb_miss_write:
  90. tlbmiss_save
  91. .global tlb_miss_common
  92. tlb_miss_common:
  93. mfsr r0, SYSREG_TLBEAR
  94. mfsr r1, SYSREG_PTBR
  95. /* Is it the vmalloc space? */
  96. bld r0, 31
  97. brcs handle_vmalloc_miss
  98. /* First level lookup */
  99. pgtbl_lookup:
  100. lsr r2, r0, PGDIR_SHIFT
  101. ld.w r3, r1[r2 << 2]
  102. bfextu r1, r0, PAGE_SHIFT, PGDIR_SHIFT - PAGE_SHIFT
  103. bld r3, _PAGE_BIT_PRESENT
  104. brcc page_table_not_present
  105. /* Translate to virtual address in P1. */
  106. andl r3, 0xf000
  107. sbr r3, 31
  108. /* Second level lookup */
  109. ld.w r2, r3[r1 << 2]
  110. mfsr r0, SYSREG_TLBARLO
  111. bld r2, _PAGE_BIT_PRESENT
  112. brcc page_not_present
  113. /* Mark the page as accessed */
  114. sbr r2, _PAGE_BIT_ACCESSED
  115. st.w r3[r1 << 2], r2
  116. /* Drop software flags */
  117. andl r2, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
  118. mtsr SYSREG_TLBELO, r2
  119. /* Figure out which entry we want to replace */
  120. mfsr r1, SYSREG_MMUCR
  121. clz r2, r0
  122. brcc 1f
  123. mov r3, -1 /* All entries have been accessed, */
  124. mov r2, 0 /* so start at 0 */
  125. mtsr SYSREG_TLBARLO, r3 /* and reset TLBAR */
  126. 1: bfins r1, r2, SYSREG_DRP_OFFSET, SYSREG_DRP_SIZE
  127. mtsr SYSREG_MMUCR, r1
  128. tlbw
  129. tlbmiss_restore
  130. rete
  131. handle_vmalloc_miss:
  132. /* Simply do the lookup in init's page table */
  133. mov r1, lo(swapper_pg_dir)
  134. orh r1, hi(swapper_pg_dir)
  135. rjmp pgtbl_lookup
  136. /* --- System Call --- */
  137. .section .scall.text,"ax",@progbits
  138. system_call:
  139. pushm r12 /* r12_orig */
  140. stmts --sp, r0-lr
  141. zero_fp
  142. mfsr r0, SYSREG_RAR_SUP
  143. mfsr r1, SYSREG_RSR_SUP
  144. stm --sp, r0-r1
  145. /* check for syscall tracing */
  146. get_thread_info r0
  147. ld.w r1, r0[TI_flags]
  148. bld r1, TIF_SYSCALL_TRACE
  149. brcs syscall_trace_enter
  150. syscall_trace_cont:
  151. cp.w r8, NR_syscalls
  152. brhs syscall_badsys
  153. lddpc lr, syscall_table_addr
  154. ld.w lr, lr[r8 << 2]
  155. mov r8, r5 /* 5th argument (6th is pushed by stub) */
  156. icall lr
  157. .global syscall_return
  158. syscall_return:
  159. get_thread_info r0
  160. mask_interrupts /* make sure we don't miss an interrupt
  161. setting need_resched or sigpending
  162. between sampling and the rets */
  163. /* Store the return value so that the correct value is loaded below */
  164. stdsp sp[REG_R12], r12
  165. ld.w r1, r0[TI_flags]
  166. andl r1, _TIF_ALLWORK_MASK, COH
  167. brne syscall_exit_work
  168. syscall_exit_cont:
  169. popm r8-r9
  170. mtsr SYSREG_RAR_SUP, r8
  171. mtsr SYSREG_RSR_SUP, r9
  172. ldmts sp++, r0-lr
  173. sub sp, -4 /* r12_orig */
  174. rets
  175. .align 2
  176. syscall_table_addr:
  177. .long sys_call_table
  178. syscall_badsys:
  179. mov r12, -ENOSYS
  180. rjmp syscall_return
  181. .global ret_from_fork
  182. ret_from_fork:
  183. rcall schedule_tail
  184. /* check for syscall tracing */
  185. get_thread_info r0
  186. ld.w r1, r0[TI_flags]
  187. andl r1, _TIF_ALLWORK_MASK, COH
  188. brne syscall_exit_work
  189. rjmp syscall_exit_cont
  190. syscall_trace_enter:
  191. pushm r8-r12
  192. rcall syscall_trace
  193. popm r8-r12
  194. rjmp syscall_trace_cont
  195. syscall_exit_work:
  196. bld r1, TIF_SYSCALL_TRACE
  197. brcc 1f
  198. unmask_interrupts
  199. rcall syscall_trace
  200. mask_interrupts
  201. ld.w r1, r0[TI_flags]
  202. 1: bld r1, TIF_NEED_RESCHED
  203. brcc 2f
  204. unmask_interrupts
  205. rcall schedule
  206. mask_interrupts
  207. ld.w r1, r0[TI_flags]
  208. rjmp 1b
  209. 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  210. tst r1, r2
  211. breq 3f
  212. unmask_interrupts
  213. mov r12, sp
  214. mov r11, r0
  215. rcall do_notify_resume
  216. mask_interrupts
  217. ld.w r1, r0[TI_flags]
  218. rjmp 1b
  219. 3: bld r1, TIF_BREAKPOINT
  220. brcc syscall_exit_cont
  221. mfsr r3, SYSREG_TLBEHI
  222. lddsp r2, sp[REG_PC]
  223. andl r3, 0xff, COH
  224. lsl r3, 1
  225. sbr r3, 30
  226. sbr r3, 0
  227. mtdr DBGREG_BWA2A, r2
  228. mtdr DBGREG_BWC2A, r3
  229. rjmp syscall_exit_cont
  230. /* The slow path of the TLB miss handler */
  231. page_table_not_present:
  232. page_not_present:
  233. tlbmiss_restore
  234. sub sp, 4
  235. stmts --sp, r0-lr
  236. rcall save_full_context_ex
  237. mfsr r12, SYSREG_ECR
  238. mov r11, sp
  239. rcall do_page_fault
  240. rjmp ret_from_exception
  241. /* This function expects to find offending PC in SYSREG_RAR_EX */
  242. save_full_context_ex:
  243. mfsr r8, SYSREG_RSR_EX
  244. mov r12, r8
  245. andh r8, (MODE_MASK >> 16), COH
  246. mfsr r11, SYSREG_RAR_EX
  247. brne 2f
  248. 1: pushm r11, r12 /* PC and SR */
  249. unmask_exceptions
  250. ret r12
  251. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  252. stdsp sp[4], r10 /* replace saved SP */
  253. rjmp 1b
  254. /* Low-level exception handlers */
  255. handle_critical:
  256. pushm r12
  257. pushm r0-r12
  258. rcall save_full_context_ex
  259. mfsr r12, SYSREG_ECR
  260. mov r11, sp
  261. rcall do_critical_exception
  262. /* We should never get here... */
  263. bad_return:
  264. sub r12, pc, (. - 1f)
  265. bral panic
  266. .align 2
  267. 1: .asciz "Return from critical exception!"
  268. .align 1
  269. do_bus_error_write:
  270. sub sp, 4
  271. stmts --sp, r0-lr
  272. rcall save_full_context_ex
  273. mov r11, 1
  274. rjmp 1f
  275. do_bus_error_read:
  276. sub sp, 4
  277. stmts --sp, r0-lr
  278. rcall save_full_context_ex
  279. mov r11, 0
  280. 1: mfsr r12, SYSREG_BEAR
  281. mov r10, sp
  282. rcall do_bus_error
  283. rjmp ret_from_exception
  284. .align 1
  285. do_nmi_ll:
  286. sub sp, 4
  287. stmts --sp, r0-lr
  288. mfsr r9, SYSREG_RSR_NMI
  289. mfsr r8, SYSREG_RAR_NMI
  290. bfextu r0, r9, MODE_SHIFT, 3
  291. brne 2f
  292. 1: pushm r8, r9 /* PC and SR */
  293. mfsr r12, SYSREG_ECR
  294. mov r11, sp
  295. rcall do_nmi
  296. popm r8-r9
  297. mtsr SYSREG_RAR_NMI, r8
  298. tst r0, r0
  299. mtsr SYSREG_RSR_NMI, r9
  300. brne 3f
  301. ldmts sp++, r0-lr
  302. sub sp, -4 /* skip r12_orig */
  303. rete
  304. 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
  305. stdsp sp[4], r10 /* replace saved SP */
  306. rjmp 1b
  307. 3: popm lr
  308. sub sp, -4 /* skip sp */
  309. popm r0-r12
  310. sub sp, -4 /* skip r12_orig */
  311. rete
  312. handle_address_fault:
  313. sub sp, 4
  314. stmts --sp, r0-lr
  315. rcall save_full_context_ex
  316. mfsr r12, SYSREG_ECR
  317. mov r11, sp
  318. rcall do_address_exception
  319. rjmp ret_from_exception
  320. handle_protection_fault:
  321. sub sp, 4
  322. stmts --sp, r0-lr
  323. rcall save_full_context_ex
  324. mfsr r12, SYSREG_ECR
  325. mov r11, sp
  326. rcall do_page_fault
  327. rjmp ret_from_exception
  328. .align 1
  329. do_illegal_opcode_ll:
  330. sub sp, 4
  331. stmts --sp, r0-lr
  332. rcall save_full_context_ex
  333. mfsr r12, SYSREG_ECR
  334. mov r11, sp
  335. rcall do_illegal_opcode
  336. rjmp ret_from_exception
  337. do_dtlb_modified:
  338. pushm r0-r3
  339. mfsr r1, SYSREG_TLBEAR
  340. mfsr r0, SYSREG_PTBR
  341. lsr r2, r1, PGDIR_SHIFT
  342. ld.w r0, r0[r2 << 2]
  343. lsl r1, (32 - PGDIR_SHIFT)
  344. lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
  345. /* Translate to virtual address in P1 */
  346. andl r0, 0xf000
  347. sbr r0, 31
  348. add r2, r0, r1 << 2
  349. ld.w r3, r2[0]
  350. sbr r3, _PAGE_BIT_DIRTY
  351. mov r0, r3
  352. st.w r2[0], r3
  353. /* The page table is up-to-date. Update the TLB entry as well */
  354. andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
  355. mtsr SYSREG_TLBELO, r0
  356. /* MMUCR[DRP] is updated automatically, so let's go... */
  357. tlbw
  358. popm r0-r3
  359. rete
  360. do_fpe_ll:
  361. sub sp, 4
  362. stmts --sp, r0-lr
  363. rcall save_full_context_ex
  364. unmask_interrupts
  365. mov r12, 26
  366. mov r11, sp
  367. rcall do_fpe
  368. rjmp ret_from_exception
  369. ret_from_exception:
  370. mask_interrupts
  371. lddsp r4, sp[REG_SR]
  372. andh r4, (MODE_MASK >> 16), COH
  373. brne fault_resume_kernel
  374. get_thread_info r0
  375. ld.w r1, r0[TI_flags]
  376. andl r1, _TIF_WORK_MASK, COH
  377. brne fault_exit_work
  378. fault_resume_user:
  379. popm r8-r9
  380. mask_exceptions
  381. mtsr SYSREG_RAR_EX, r8
  382. mtsr SYSREG_RSR_EX, r9
  383. ldmts sp++, r0-lr
  384. sub sp, -4
  385. rete
  386. fault_resume_kernel:
  387. #ifdef CONFIG_PREEMPT
  388. get_thread_info r0
  389. ld.w r2, r0[TI_preempt_count]
  390. cp.w r2, 0
  391. brne 1f
  392. ld.w r1, r0[TI_flags]
  393. bld r1, TIF_NEED_RESCHED
  394. brcc 1f
  395. lddsp r4, sp[REG_SR]
  396. bld r4, SYSREG_GM_OFFSET
  397. brcs 1f
  398. rcall preempt_schedule_irq
  399. 1:
  400. #endif
  401. popm r8-r9
  402. mask_exceptions
  403. mfsr r1, SYSREG_SR
  404. mtsr SYSREG_RAR_EX, r8
  405. mtsr SYSREG_RSR_EX, r9
  406. popm lr
  407. sub sp, -4 /* ignore SP */
  408. popm r0-r12
  409. sub sp, -4 /* ignore r12_orig */
  410. rete
  411. irq_exit_work:
  412. /* Switch to exception mode so that we can share the same code. */
  413. mfsr r8, SYSREG_SR
  414. cbr r8, SYSREG_M0_OFFSET
  415. orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
  416. mtsr SYSREG_SR, r8
  417. sub pc, -2
  418. get_thread_info r0
  419. ld.w r1, r0[TI_flags]
  420. fault_exit_work:
  421. bld r1, TIF_NEED_RESCHED
  422. brcc 1f
  423. unmask_interrupts
  424. rcall schedule
  425. mask_interrupts
  426. ld.w r1, r0[TI_flags]
  427. rjmp fault_exit_work
  428. 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  429. tst r1, r2
  430. breq 2f
  431. unmask_interrupts
  432. mov r12, sp
  433. mov r11, r0
  434. rcall do_notify_resume
  435. mask_interrupts
  436. ld.w r1, r0[TI_flags]
  437. rjmp fault_exit_work
  438. 2: bld r1, TIF_BREAKPOINT
  439. brcc fault_resume_user
  440. mfsr r3, SYSREG_TLBEHI
  441. lddsp r2, sp[REG_PC]
  442. andl r3, 0xff, COH
  443. lsl r3, 1
  444. sbr r3, 30
  445. sbr r3, 0
  446. mtdr DBGREG_BWA2A, r2
  447. mtdr DBGREG_BWC2A, r3
  448. rjmp fault_resume_user
  449. /* If we get a debug trap from privileged context we end up here */
  450. handle_debug_priv:
  451. /* Fix up LR and SP in regs. r11 contains the mode we came from */
  452. mfsr r8, SYSREG_SR
  453. mov r9, r8
  454. andh r8, hi(~MODE_MASK)
  455. or r8, r11
  456. mtsr SYSREG_SR, r8
  457. sub pc, -2
  458. stdsp sp[REG_LR], lr
  459. mtsr SYSREG_SR, r9
  460. sub pc, -2
  461. sub r10, sp, -FRAME_SIZE_FULL
  462. stdsp sp[REG_SP], r10
  463. mov r12, sp
  464. rcall do_debug_priv
  465. /* Now, put everything back */
  466. ssrf SR_EM_BIT
  467. popm r10, r11
  468. mtsr SYSREG_RAR_DBG, r10
  469. mtsr SYSREG_RSR_DBG, r11
  470. mfsr r8, SYSREG_SR
  471. mov r9, r8
  472. andh r8, hi(~MODE_MASK)
  473. andh r11, hi(MODE_MASK)
  474. or r8, r11
  475. mtsr SYSREG_SR, r8
  476. sub pc, -2
  477. popm lr
  478. mtsr SYSREG_SR, r9
  479. sub pc, -2
  480. sub sp, -4 /* skip SP */
  481. popm r0-r12
  482. sub sp, -4
  483. retd
  484. /*
  485. * At this point, everything is masked, that is, interrupts,
  486. * exceptions and debugging traps. We might get called from
  487. * interrupt or exception context in some rare cases, but this
  488. * will be taken care of by do_debug(), so we're not going to
  489. * do a 100% correct context save here.
  490. */
  491. handle_debug:
  492. sub sp, 4 /* r12_orig */
  493. stmts --sp, r0-lr
  494. mfsr r10, SYSREG_RAR_DBG
  495. mfsr r11, SYSREG_RSR_DBG
  496. unmask_exceptions
  497. pushm r10,r11
  498. andh r11, (MODE_MASK >> 16), COH
  499. brne handle_debug_priv
  500. mov r12, sp
  501. rcall do_debug
  502. lddsp r10, sp[REG_SR]
  503. andh r10, (MODE_MASK >> 16), COH
  504. breq debug_resume_user
  505. debug_restore_all:
  506. popm r10,r11
  507. mask_exceptions
  508. mtsr SYSREG_RSR_DBG, r11
  509. mtsr SYSREG_RAR_DBG, r10
  510. ldmts sp++, r0-lr
  511. sub sp, -4
  512. retd
  513. debug_resume_user:
  514. get_thread_info r0
  515. mask_interrupts
  516. ld.w r1, r0[TI_flags]
  517. andl r1, _TIF_DBGWORK_MASK, COH
  518. breq debug_restore_all
  519. 1: bld r1, TIF_NEED_RESCHED
  520. brcc 2f
  521. unmask_interrupts
  522. rcall schedule
  523. mask_interrupts
  524. ld.w r1, r0[TI_flags]
  525. rjmp 1b
  526. 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
  527. tst r1, r2
  528. breq 3f
  529. unmask_interrupts
  530. mov r12, sp
  531. mov r11, r0
  532. rcall do_notify_resume
  533. mask_interrupts
  534. ld.w r1, r0[TI_flags]
  535. rjmp 1b
  536. 3: bld r1, TIF_SINGLE_STEP
  537. brcc debug_restore_all
  538. mfdr r2, DBGREG_DC
  539. sbr r2, DC_SS_BIT
  540. mtdr DBGREG_DC, r2
  541. rjmp debug_restore_all
  542. .set rsr_int0, SYSREG_RSR_INT0
  543. .set rsr_int1, SYSREG_RSR_INT1
  544. .set rsr_int2, SYSREG_RSR_INT2
  545. .set rsr_int3, SYSREG_RSR_INT3
  546. .set rar_int0, SYSREG_RAR_INT0
  547. .set rar_int1, SYSREG_RAR_INT1
  548. .set rar_int2, SYSREG_RAR_INT2
  549. .set rar_int3, SYSREG_RAR_INT3
  550. .macro IRQ_LEVEL level
  551. .type irq_level\level, @function
  552. irq_level\level:
  553. sub sp, 4 /* r12_orig */
  554. stmts --sp,r0-lr
  555. mfsr r8, rar_int\level
  556. mfsr r9, rsr_int\level
  557. pushm r8-r9
  558. mov r11, sp
  559. mov r12, \level
  560. rcall do_IRQ
  561. lddsp r4, sp[REG_SR]
  562. bfextu r4, r4, SYSREG_M0_OFFSET, 3
  563. cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
  564. breq 2f
  565. cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
  566. #ifdef CONFIG_PREEMPT
  567. brne 3f
  568. #else
  569. brne 1f
  570. #endif
  571. get_thread_info r0
  572. ld.w r1, r0[TI_flags]
  573. andl r1, _TIF_WORK_MASK, COH
  574. brne irq_exit_work
  575. 1: popm r8-r9
  576. mtsr rar_int\level, r8
  577. mtsr rsr_int\level, r9
  578. ldmts sp++,r0-lr
  579. sub sp, -4 /* ignore r12_orig */
  580. rete
  581. 2: get_thread_info r0
  582. ld.w r1, r0[TI_flags]
  583. bld r1, TIF_CPU_GOING_TO_SLEEP
  584. #ifdef CONFIG_PREEMPT
  585. brcc 3f
  586. #else
  587. brcc 1b
  588. #endif
  589. sub r1, pc, . - cpu_idle_skip_sleep
  590. stdsp sp[REG_PC], r1
  591. #ifdef CONFIG_PREEMPT
  592. 3: get_thread_info r0
  593. ld.w r2, r0[TI_preempt_count]
  594. cp.w r2, 0
  595. brne 1b
  596. ld.w r1, r0[TI_flags]
  597. bld r1, TIF_NEED_RESCHED
  598. brcc 1b
  599. lddsp r4, sp[REG_SR]
  600. bld r4, SYSREG_GM_OFFSET
  601. brcs 1b
  602. rcall preempt_schedule_irq
  603. #endif
  604. rjmp 1b
  605. .endm
  606. .section .irq.text,"ax",@progbits
  607. .global cpu_idle_sleep
  608. cpu_idle_sleep:
  609. mask_interrupts
  610. get_thread_info r8
  611. ld.w r9, r8[TI_flags]
  612. bld r9, TIF_NEED_RESCHED
  613. brcs cpu_idle_enable_int_and_exit
  614. sbr r9, TIF_CPU_GOING_TO_SLEEP
  615. st.w r8[TI_flags], r9
  616. unmask_interrupts
  617. sleep 0
  618. cpu_idle_skip_sleep:
  619. mask_interrupts
  620. ld.w r9, r8[TI_flags]
  621. cbr r9, TIF_CPU_GOING_TO_SLEEP
  622. st.w r8[TI_flags], r9
  623. cpu_idle_enable_int_and_exit:
  624. unmask_interrupts
  625. retal r12
  626. .global irq_level0
  627. .global irq_level1
  628. .global irq_level2
  629. .global irq_level3
  630. IRQ_LEVEL 0
  631. IRQ_LEVEL 1
  632. IRQ_LEVEL 2
  633. IRQ_LEVEL 3