entry.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /*
  2. * Low-level exception handling code
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  6. * Will Deacon <will.deacon@arm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/linkage.h>
  22. #include <asm/assembler.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/errno.h>
  25. #include <asm/thread_info.h>
  26. #include <asm/unistd.h>
  27. /*
  28. * Bad Abort numbers
  29. *-----------------
  30. */
  31. #define BAD_SYNC 0
  32. #define BAD_IRQ 1
  33. #define BAD_FIQ 2
  34. #define BAD_ERROR 3
  35. .macro kernel_entry, el, regsize = 64
  36. sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
  37. .if \regsize == 32
  38. mov w0, w0 // zero upper 32 bits of x0
  39. .endif
  40. push x28, x29
  41. push x26, x27
  42. push x24, x25
  43. push x22, x23
  44. push x20, x21
  45. push x18, x19
  46. push x16, x17
  47. push x14, x15
  48. push x12, x13
  49. push x10, x11
  50. push x8, x9
  51. push x6, x7
  52. push x4, x5
  53. push x2, x3
  54. push x0, x1
  55. .if \el == 0
  56. mrs x21, sp_el0
  57. .else
  58. add x21, sp, #S_FRAME_SIZE
  59. .endif
  60. mrs x22, elr_el1
  61. mrs x23, spsr_el1
  62. stp lr, x21, [sp, #S_LR]
  63. stp x22, x23, [sp, #S_PC]
  64. /*
  65. * Set syscallno to -1 by default (overridden later if real syscall).
  66. */
  67. .if \el == 0
  68. mvn x21, xzr
  69. str x21, [sp, #S_SYSCALLNO]
  70. .endif
  71. /*
  72. * Registers that may be useful after this macro is invoked:
  73. *
  74. * x21 - aborted SP
  75. * x22 - aborted PC
  76. * x23 - aborted PSTATE
  77. */
  78. .endm
  79. .macro kernel_exit, el, ret = 0
  80. ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
  81. .if \el == 0
  82. ldr x23, [sp, #S_SP] // load return stack pointer
  83. .endif
  84. .if \ret
  85. ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
  86. add sp, sp, S_X2
  87. .else
  88. pop x0, x1
  89. .endif
  90. pop x2, x3 // load the rest of the registers
  91. pop x4, x5
  92. pop x6, x7
  93. pop x8, x9
  94. msr elr_el1, x21 // set up the return data
  95. msr spsr_el1, x22
  96. .if \el == 0
  97. msr sp_el0, x23
  98. .endif
  99. pop x10, x11
  100. pop x12, x13
  101. pop x14, x15
  102. pop x16, x17
  103. pop x18, x19
  104. pop x20, x21
  105. pop x22, x23
  106. pop x24, x25
  107. pop x26, x27
  108. pop x28, x29
  109. ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
  110. eret // return to kernel
  111. .endm
  112. .macro get_thread_info, rd
  113. mov \rd, sp
  114. and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
  115. .endm
  116. /*
  117. * These are the registers used in the syscall handler, and allow us to
  118. * have in theory up to 7 arguments to a function - x0 to x6.
  119. *
  120. * x7 is reserved for the system call number in 32-bit mode.
  121. */
  122. sc_nr .req x25 // number of system calls
  123. scno .req x26 // syscall number
  124. stbl .req x27 // syscall table pointer
  125. tsk .req x28 // current thread_info
  126. /*
  127. * Interrupt handling.
  128. */
  129. .macro irq_handler
  130. ldr x1, handle_arch_irq
  131. mov x0, sp
  132. blr x1
  133. .endm
  134. .text
  135. /*
  136. * Exception vectors.
  137. */
  138. .macro ventry label
  139. .align 7
  140. b \label
  141. .endm
  142. .align 11
  143. ENTRY(vectors)
  144. ventry el1_sync_invalid // Synchronous EL1t
  145. ventry el1_irq_invalid // IRQ EL1t
  146. ventry el1_fiq_invalid // FIQ EL1t
  147. ventry el1_error_invalid // Error EL1t
  148. ventry el1_sync // Synchronous EL1h
  149. ventry el1_irq // IRQ EL1h
  150. ventry el1_fiq_invalid // FIQ EL1h
  151. ventry el1_error_invalid // Error EL1h
  152. ventry el0_sync // Synchronous 64-bit EL0
  153. ventry el0_irq // IRQ 64-bit EL0
  154. ventry el0_fiq_invalid // FIQ 64-bit EL0
  155. ventry el0_error_invalid // Error 64-bit EL0
  156. #ifdef CONFIG_COMPAT
  157. ventry el0_sync_compat // Synchronous 32-bit EL0
  158. ventry el0_irq_compat // IRQ 32-bit EL0
  159. ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
  160. ventry el0_error_invalid_compat // Error 32-bit EL0
  161. #else
  162. ventry el0_sync_invalid // Synchronous 32-bit EL0
  163. ventry el0_irq_invalid // IRQ 32-bit EL0
  164. ventry el0_fiq_invalid // FIQ 32-bit EL0
  165. ventry el0_error_invalid // Error 32-bit EL0
  166. #endif
  167. END(vectors)
  168. /*
  169. * Invalid mode handlers
  170. */
  171. .macro inv_entry, el, reason, regsize = 64
  172. kernel_entry el, \regsize
  173. mov x0, sp
  174. mov x1, #\reason
  175. mrs x2, esr_el1
  176. b bad_mode
  177. .endm
  178. el0_sync_invalid:
  179. inv_entry 0, BAD_SYNC
  180. ENDPROC(el0_sync_invalid)
  181. el0_irq_invalid:
  182. inv_entry 0, BAD_IRQ
  183. ENDPROC(el0_irq_invalid)
  184. el0_fiq_invalid:
  185. inv_entry 0, BAD_FIQ
  186. ENDPROC(el0_fiq_invalid)
  187. el0_error_invalid:
  188. inv_entry 0, BAD_ERROR
  189. ENDPROC(el0_error_invalid)
  190. #ifdef CONFIG_COMPAT
  191. el0_fiq_invalid_compat:
  192. inv_entry 0, BAD_FIQ, 32
  193. ENDPROC(el0_fiq_invalid_compat)
  194. el0_error_invalid_compat:
  195. inv_entry 0, BAD_ERROR, 32
  196. ENDPROC(el0_error_invalid_compat)
  197. #endif
  198. el1_sync_invalid:
  199. inv_entry 1, BAD_SYNC
  200. ENDPROC(el1_sync_invalid)
  201. el1_irq_invalid:
  202. inv_entry 1, BAD_IRQ
  203. ENDPROC(el1_irq_invalid)
  204. el1_fiq_invalid:
  205. inv_entry 1, BAD_FIQ
  206. ENDPROC(el1_fiq_invalid)
  207. el1_error_invalid:
  208. inv_entry 1, BAD_ERROR
  209. ENDPROC(el1_error_invalid)
  210. /*
  211. * EL1 mode handlers.
  212. */
  213. .align 6
  214. el1_sync:
  215. kernel_entry 1
  216. mrs x1, esr_el1 // read the syndrome register
  217. lsr x24, x1, #26 // exception class
  218. cmp x24, #0x25 // data abort in EL1
  219. b.eq el1_da
  220. cmp x24, #0x18 // configurable trap
  221. b.eq el1_undef
  222. cmp x24, #0x26 // stack alignment exception
  223. b.eq el1_sp_pc
  224. cmp x24, #0x22 // pc alignment exception
  225. b.eq el1_sp_pc
  226. cmp x24, #0x00 // unknown exception in EL1
  227. b.eq el1_undef
  228. cmp x24, #0x30 // debug exception in EL1
  229. b.ge el1_dbg
  230. b el1_inv
  231. el1_da:
  232. /*
  233. * Data abort handling
  234. */
  235. mrs x0, far_el1
  236. enable_dbg_if_not_stepping x2
  237. // re-enable interrupts if they were enabled in the aborted context
  238. tbnz x23, #7, 1f // PSR_I_BIT
  239. enable_irq
  240. 1:
  241. mov x2, sp // struct pt_regs
  242. bl do_mem_abort
  243. // disable interrupts before pulling preserved data off the stack
  244. disable_irq
  245. kernel_exit 1
  246. el1_sp_pc:
  247. /*
  248. * Stack or PC alignment exception handling
  249. */
  250. mrs x0, far_el1
  251. mov x1, x25
  252. mov x2, sp
  253. b do_sp_pc_abort
  254. el1_undef:
  255. /*
  256. * Undefined instruction
  257. */
  258. mov x0, sp
  259. b do_undefinstr
  260. el1_dbg:
  261. /*
  262. * Debug exception handling
  263. */
  264. tbz x24, #0, el1_inv // EL1 only
  265. mrs x0, far_el1
  266. mov x2, sp // struct pt_regs
  267. bl do_debug_exception
  268. kernel_exit 1
  269. el1_inv:
  270. // TODO: add support for undefined instructions in kernel mode
  271. mov x0, sp
  272. mov x1, #BAD_SYNC
  273. mrs x2, esr_el1
  274. b bad_mode
  275. ENDPROC(el1_sync)
  276. .align 6
  277. el1_irq:
  278. kernel_entry 1
  279. enable_dbg_if_not_stepping x0
  280. #ifdef CONFIG_TRACE_IRQFLAGS
  281. bl trace_hardirqs_off
  282. #endif
  283. #ifdef CONFIG_PREEMPT
  284. get_thread_info tsk
  285. ldr x24, [tsk, #TI_PREEMPT] // get preempt count
  286. add x0, x24, #1 // increment it
  287. str x0, [tsk, #TI_PREEMPT]
  288. #endif
  289. irq_handler
  290. #ifdef CONFIG_PREEMPT
  291. str x24, [tsk, #TI_PREEMPT] // restore preempt count
  292. cbnz x24, 1f // preempt count != 0
  293. ldr x0, [tsk, #TI_FLAGS] // get flags
  294. tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
  295. bl el1_preempt
  296. 1:
  297. #endif
  298. #ifdef CONFIG_TRACE_IRQFLAGS
  299. bl trace_hardirqs_on
  300. #endif
  301. kernel_exit 1
  302. ENDPROC(el1_irq)
  303. #ifdef CONFIG_PREEMPT
  304. el1_preempt:
  305. mov x24, lr
  306. 1: enable_dbg
  307. bl preempt_schedule_irq // irq en/disable is done inside
  308. ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
  309. tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
  310. ret x24
  311. #endif
  312. /*
  313. * EL0 mode handlers.
  314. */
  315. .align 6
  316. el0_sync:
  317. kernel_entry 0
  318. mrs x25, esr_el1 // read the syndrome register
  319. lsr x24, x25, #26 // exception class
  320. cmp x24, #0x15 // SVC in 64-bit state
  321. b.eq el0_svc
  322. adr lr, ret_from_exception
  323. cmp x24, #0x24 // data abort in EL0
  324. b.eq el0_da
  325. cmp x24, #0x20 // instruction abort in EL0
  326. b.eq el0_ia
  327. cmp x24, #0x07 // FP/ASIMD access
  328. b.eq el0_fpsimd_acc
  329. cmp x24, #0x2c // FP/ASIMD exception
  330. b.eq el0_fpsimd_exc
  331. cmp x24, #0x18 // configurable trap
  332. b.eq el0_undef
  333. cmp x24, #0x26 // stack alignment exception
  334. b.eq el0_sp_pc
  335. cmp x24, #0x22 // pc alignment exception
  336. b.eq el0_sp_pc
  337. cmp x24, #0x00 // unknown exception in EL0
  338. b.eq el0_undef
  339. cmp x24, #0x30 // debug exception in EL0
  340. b.ge el0_dbg
  341. b el0_inv
  342. #ifdef CONFIG_COMPAT
  343. .align 6
  344. el0_sync_compat:
  345. kernel_entry 0, 32
  346. mrs x25, esr_el1 // read the syndrome register
  347. lsr x24, x25, #26 // exception class
  348. cmp x24, #0x11 // SVC in 32-bit state
  349. b.eq el0_svc_compat
  350. adr lr, ret_from_exception
  351. cmp x24, #0x24 // data abort in EL0
  352. b.eq el0_da
  353. cmp x24, #0x20 // instruction abort in EL0
  354. b.eq el0_ia
  355. cmp x24, #0x07 // FP/ASIMD access
  356. b.eq el0_fpsimd_acc
  357. cmp x24, #0x28 // FP/ASIMD exception
  358. b.eq el0_fpsimd_exc
  359. cmp x24, #0x00 // unknown exception in EL0
  360. b.eq el0_undef
  361. cmp x24, #0x30 // debug exception in EL0
  362. b.ge el0_dbg
  363. b el0_inv
  364. el0_svc_compat:
  365. /*
  366. * AArch32 syscall handling
  367. */
  368. adr stbl, compat_sys_call_table // load compat syscall table pointer
  369. uxtw scno, w7 // syscall number in w7 (r7)
  370. mov sc_nr, #__NR_compat_syscalls
  371. b el0_svc_naked
  372. .align 6
  373. el0_irq_compat:
  374. kernel_entry 0, 32
  375. b el0_irq_naked
  376. #endif
  377. el0_da:
  378. /*
  379. * Data abort handling
  380. */
  381. mrs x0, far_el1
  382. disable_step x1
  383. isb
  384. enable_dbg
  385. // enable interrupts before calling the main handler
  386. enable_irq
  387. mov x1, x25
  388. mov x2, sp
  389. b do_mem_abort
  390. el0_ia:
  391. /*
  392. * Instruction abort handling
  393. */
  394. mrs x0, far_el1
  395. disable_step x1
  396. isb
  397. enable_dbg
  398. // enable interrupts before calling the main handler
  399. enable_irq
  400. orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
  401. mov x2, sp
  402. b do_mem_abort
  403. el0_fpsimd_acc:
  404. /*
  405. * Floating Point or Advanced SIMD access
  406. */
  407. mov x0, x25
  408. mov x1, sp
  409. b do_fpsimd_acc
  410. el0_fpsimd_exc:
  411. /*
  412. * Floating Point or Advanced SIMD exception
  413. */
  414. mov x0, x25
  415. mov x1, sp
  416. b do_fpsimd_exc
  417. el0_sp_pc:
  418. /*
  419. * Stack or PC alignment exception handling
  420. */
  421. mrs x0, far_el1
  422. disable_step x1
  423. isb
  424. enable_dbg
  425. // enable interrupts before calling the main handler
  426. enable_irq
  427. mov x1, x25
  428. mov x2, sp
  429. b do_sp_pc_abort
  430. el0_undef:
  431. /*
  432. * Undefined instruction
  433. */
  434. mov x0, sp
  435. b do_undefinstr
  436. el0_dbg:
  437. /*
  438. * Debug exception handling
  439. */
  440. tbnz x24, #0, el0_inv // EL0 only
  441. mrs x0, far_el1
  442. disable_step x1
  443. mov x1, x25
  444. mov x2, sp
  445. b do_debug_exception
  446. el0_inv:
  447. mov x0, sp
  448. mov x1, #BAD_SYNC
  449. mrs x2, esr_el1
  450. b bad_mode
  451. ENDPROC(el0_sync)
  452. .align 6
  453. el0_irq:
  454. kernel_entry 0
  455. el0_irq_naked:
  456. disable_step x1
  457. isb
  458. enable_dbg
  459. #ifdef CONFIG_TRACE_IRQFLAGS
  460. bl trace_hardirqs_off
  461. #endif
  462. get_thread_info tsk
  463. #ifdef CONFIG_PREEMPT
  464. ldr x24, [tsk, #TI_PREEMPT] // get preempt count
  465. add x23, x24, #1 // increment it
  466. str x23, [tsk, #TI_PREEMPT]
  467. #endif
  468. irq_handler
  469. #ifdef CONFIG_PREEMPT
  470. ldr x0, [tsk, #TI_PREEMPT]
  471. str x24, [tsk, #TI_PREEMPT]
  472. cmp x0, x23
  473. b.eq 1f
  474. mov x1, #0
  475. str x1, [x1] // BUG
  476. 1:
  477. #endif
  478. #ifdef CONFIG_TRACE_IRQFLAGS
  479. bl trace_hardirqs_on
  480. #endif
  481. b ret_to_user
  482. ENDPROC(el0_irq)
  483. /*
  484. * This is the return code to user mode for abort handlers
  485. */
  486. ret_from_exception:
  487. get_thread_info tsk
  488. b ret_to_user
  489. ENDPROC(ret_from_exception)
  490. /*
  491. * Register switch for AArch64. The callee-saved registers need to be saved
  492. * and restored. On entry:
  493. * x0 = previous task_struct (must be preserved across the switch)
  494. * x1 = next task_struct
  495. * Previous and next are guaranteed not to be the same.
  496. *
  497. */
  498. ENTRY(cpu_switch_to)
  499. add x8, x0, #THREAD_CPU_CONTEXT
  500. mov x9, sp
  501. stp x19, x20, [x8], #16 // store callee-saved registers
  502. stp x21, x22, [x8], #16
  503. stp x23, x24, [x8], #16
  504. stp x25, x26, [x8], #16
  505. stp x27, x28, [x8], #16
  506. stp x29, x9, [x8], #16
  507. str lr, [x8]
  508. add x8, x1, #THREAD_CPU_CONTEXT
  509. ldp x19, x20, [x8], #16 // restore callee-saved registers
  510. ldp x21, x22, [x8], #16
  511. ldp x23, x24, [x8], #16
  512. ldp x25, x26, [x8], #16
  513. ldp x27, x28, [x8], #16
  514. ldp x29, x9, [x8], #16
  515. ldr lr, [x8]
  516. mov sp, x9
  517. ret
  518. ENDPROC(cpu_switch_to)
  519. /*
  520. * This is the fast syscall return path. We do as little as possible here,
  521. * and this includes saving x0 back into the kernel stack.
  522. */
  523. ret_fast_syscall:
  524. disable_irq // disable interrupts
  525. ldr x1, [tsk, #TI_FLAGS]
  526. and x2, x1, #_TIF_WORK_MASK
  527. cbnz x2, fast_work_pending
  528. tbz x1, #TIF_SINGLESTEP, fast_exit
  529. disable_dbg
  530. enable_step x2
  531. fast_exit:
  532. kernel_exit 0, ret = 1
  533. /*
  534. * Ok, we need to do extra processing, enter the slow path.
  535. */
  536. fast_work_pending:
  537. str x0, [sp, #S_X0] // returned x0
  538. work_pending:
  539. tbnz x1, #TIF_NEED_RESCHED, work_resched
  540. /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
  541. ldr x2, [sp, #S_PSTATE]
  542. mov x0, sp // 'regs'
  543. tst x2, #PSR_MODE_MASK // user mode regs?
  544. b.ne no_work_pending // returning to kernel
  545. bl do_notify_resume
  546. b ret_to_user
  547. work_resched:
  548. enable_dbg
  549. bl schedule
  550. /*
  551. * "slow" syscall return path.
  552. */
  553. ENTRY(ret_to_user)
  554. disable_irq // disable interrupts
  555. ldr x1, [tsk, #TI_FLAGS]
  556. and x2, x1, #_TIF_WORK_MASK
  557. cbnz x2, work_pending
  558. tbz x1, #TIF_SINGLESTEP, no_work_pending
  559. disable_dbg
  560. enable_step x2
  561. no_work_pending:
  562. kernel_exit 0, ret = 0
  563. ENDPROC(ret_to_user)
  564. /*
  565. * This is how we return from a fork.
  566. */
  567. ENTRY(ret_from_fork)
  568. bl schedule_tail
  569. get_thread_info tsk
  570. b ret_to_user
  571. ENDPROC(ret_from_fork)
  572. /*
  573. * SVC handler.
  574. */
  575. .align 6
  576. el0_svc:
  577. adrp stbl, sys_call_table // load syscall table pointer
  578. uxtw scno, w8 // syscall number in w8
  579. mov sc_nr, #__NR_syscalls
  580. el0_svc_naked: // compat entry point
  581. stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
  582. disable_step x16
  583. isb
  584. enable_dbg
  585. enable_irq
  586. get_thread_info tsk
  587. ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
  588. tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
  589. adr lr, ret_fast_syscall // return address
  590. cmp scno, sc_nr // check upper syscall limit
  591. b.hs ni_sys
  592. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  593. br x16 // call sys_* routine
  594. ni_sys:
  595. mov x0, sp
  596. b do_ni_syscall
  597. ENDPROC(el0_svc)
  598. /*
  599. * This is the really slow path. We're going to be doing context
  600. * switches, and waiting for our parent to respond.
  601. */
  602. __sys_trace:
  603. mov x1, sp
  604. mov w0, #0 // trace entry
  605. bl syscall_trace
  606. adr lr, __sys_trace_return // return address
  607. uxtw scno, w0 // syscall number (possibly new)
  608. mov x1, sp // pointer to regs
  609. cmp scno, sc_nr // check upper syscall limit
  610. b.hs ni_sys
  611. ldp x0, x1, [sp] // restore the syscall args
  612. ldp x2, x3, [sp, #S_X2]
  613. ldp x4, x5, [sp, #S_X4]
  614. ldp x6, x7, [sp, #S_X6]
  615. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  616. br x16 // call sys_* routine
  617. __sys_trace_return:
  618. str x0, [sp] // save returned x0
  619. mov x1, sp
  620. mov w0, #1 // trace exit
  621. bl syscall_trace
  622. b ret_to_user
  623. /*
  624. * Special system call wrappers.
  625. */
  626. ENTRY(sys_execve_wrapper)
  627. mov x3, sp
  628. b sys_execve
  629. ENDPROC(sys_execve_wrapper)
  630. ENTRY(sys_clone_wrapper)
  631. mov x5, sp
  632. b sys_clone
  633. ENDPROC(sys_clone_wrapper)
  634. ENTRY(sys_rt_sigreturn_wrapper)
  635. mov x0, sp
  636. b sys_rt_sigreturn
  637. ENDPROC(sys_rt_sigreturn_wrapper)
  638. ENTRY(sys_sigaltstack_wrapper)
  639. ldr x2, [sp, #S_SP]
  640. b sys_sigaltstack
  641. ENDPROC(sys_sigaltstack_wrapper)
  642. ENTRY(handle_arch_irq)
  643. .quad 0