entry.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. /*
  2. * Low-level exception handling code
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  6. * Will Deacon <will.deacon@arm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/linkage.h>
  22. #include <asm/assembler.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/errno.h>
  25. #include <asm/thread_info.h>
  26. #include <asm/unistd.h>
  27. #include <asm/unistd32.h>
  28. /*
  29. * Bad Abort numbers
  30. *-----------------
  31. */
  32. #define BAD_SYNC 0
  33. #define BAD_IRQ 1
  34. #define BAD_FIQ 2
  35. #define BAD_ERROR 3
  36. .macro kernel_entry, el, regsize = 64
  37. sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
  38. .if \regsize == 32
  39. mov w0, w0 // zero upper 32 bits of x0
  40. .endif
  41. push x28, x29
  42. push x26, x27
  43. push x24, x25
  44. push x22, x23
  45. push x20, x21
  46. push x18, x19
  47. push x16, x17
  48. push x14, x15
  49. push x12, x13
  50. push x10, x11
  51. push x8, x9
  52. push x6, x7
  53. push x4, x5
  54. push x2, x3
  55. push x0, x1
  56. .if \el == 0
  57. mrs x21, sp_el0
  58. .else
  59. add x21, sp, #S_FRAME_SIZE
  60. .endif
  61. mrs x22, elr_el1
  62. mrs x23, spsr_el1
  63. stp lr, x21, [sp, #S_LR]
  64. stp x22, x23, [sp, #S_PC]
  65. /*
  66. * Set syscallno to -1 by default (overridden later if real syscall).
  67. */
  68. .if \el == 0
  69. mvn x21, xzr
  70. str x21, [sp, #S_SYSCALLNO]
  71. .endif
  72. /*
  73. * Registers that may be useful after this macro is invoked:
  74. *
  75. * x21 - aborted SP
  76. * x22 - aborted PC
  77. * x23 - aborted PSTATE
  78. */
  79. .endm
  80. .macro kernel_exit, el, ret = 0
  81. ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
  82. .if \el == 0
  83. ldr x23, [sp, #S_SP] // load return stack pointer
  84. .endif
  85. .if \ret
  86. ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
  87. add sp, sp, S_X2
  88. .else
  89. pop x0, x1
  90. .endif
  91. pop x2, x3 // load the rest of the registers
  92. pop x4, x5
  93. pop x6, x7
  94. pop x8, x9
  95. msr elr_el1, x21 // set up the return data
  96. msr spsr_el1, x22
  97. .if \el == 0
  98. msr sp_el0, x23
  99. .endif
  100. pop x10, x11
  101. pop x12, x13
  102. pop x14, x15
  103. pop x16, x17
  104. pop x18, x19
  105. pop x20, x21
  106. pop x22, x23
  107. pop x24, x25
  108. pop x26, x27
  109. pop x28, x29
  110. ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
  111. eret // return to kernel
  112. .endm
  113. .macro get_thread_info, rd
  114. mov \rd, sp
  115. and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
  116. .endm
  117. /*
  118. * These are the registers used in the syscall handler, and allow us to
  119. * have in theory up to 7 arguments to a function - x0 to x6.
  120. *
  121. * x7 is reserved for the system call number in 32-bit mode.
  122. */
  123. sc_nr .req x25 // number of system calls
  124. scno .req x26 // syscall number
  125. stbl .req x27 // syscall table pointer
  126. tsk .req x28 // current thread_info
  127. /*
  128. * Interrupt handling.
  129. */
  130. .macro irq_handler
  131. ldr x1, handle_arch_irq
  132. mov x0, sp
  133. blr x1
  134. .endm
  135. .text
  136. /*
  137. * Exception vectors.
  138. */
  139. .macro ventry label
  140. .align 7
  141. b \label
  142. .endm
  143. .align 11
  144. ENTRY(vectors)
  145. ventry el1_sync_invalid // Synchronous EL1t
  146. ventry el1_irq_invalid // IRQ EL1t
  147. ventry el1_fiq_invalid // FIQ EL1t
  148. ventry el1_error_invalid // Error EL1t
  149. ventry el1_sync // Synchronous EL1h
  150. ventry el1_irq // IRQ EL1h
  151. ventry el1_fiq_invalid // FIQ EL1h
  152. ventry el1_error_invalid // Error EL1h
  153. ventry el0_sync // Synchronous 64-bit EL0
  154. ventry el0_irq // IRQ 64-bit EL0
  155. ventry el0_fiq_invalid // FIQ 64-bit EL0
  156. ventry el0_error_invalid // Error 64-bit EL0
  157. #ifdef CONFIG_COMPAT
  158. ventry el0_sync_compat // Synchronous 32-bit EL0
  159. ventry el0_irq_compat // IRQ 32-bit EL0
  160. ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
  161. ventry el0_error_invalid_compat // Error 32-bit EL0
  162. #else
  163. ventry el0_sync_invalid // Synchronous 32-bit EL0
  164. ventry el0_irq_invalid // IRQ 32-bit EL0
  165. ventry el0_fiq_invalid // FIQ 32-bit EL0
  166. ventry el0_error_invalid // Error 32-bit EL0
  167. #endif
  168. END(vectors)
  169. /*
  170. * Invalid mode handlers
  171. */
  172. .macro inv_entry, el, reason, regsize = 64
  173. kernel_entry el, \regsize
  174. mov x0, sp
  175. mov x1, #\reason
  176. mrs x2, esr_el1
  177. b bad_mode
  178. .endm
  179. el0_sync_invalid:
  180. inv_entry 0, BAD_SYNC
  181. ENDPROC(el0_sync_invalid)
  182. el0_irq_invalid:
  183. inv_entry 0, BAD_IRQ
  184. ENDPROC(el0_irq_invalid)
  185. el0_fiq_invalid:
  186. inv_entry 0, BAD_FIQ
  187. ENDPROC(el0_fiq_invalid)
  188. el0_error_invalid:
  189. inv_entry 0, BAD_ERROR
  190. ENDPROC(el0_error_invalid)
  191. #ifdef CONFIG_COMPAT
  192. el0_fiq_invalid_compat:
  193. inv_entry 0, BAD_FIQ, 32
  194. ENDPROC(el0_fiq_invalid_compat)
  195. el0_error_invalid_compat:
  196. inv_entry 0, BAD_ERROR, 32
  197. ENDPROC(el0_error_invalid_compat)
  198. #endif
  199. el1_sync_invalid:
  200. inv_entry 1, BAD_SYNC
  201. ENDPROC(el1_sync_invalid)
  202. el1_irq_invalid:
  203. inv_entry 1, BAD_IRQ
  204. ENDPROC(el1_irq_invalid)
  205. el1_fiq_invalid:
  206. inv_entry 1, BAD_FIQ
  207. ENDPROC(el1_fiq_invalid)
  208. el1_error_invalid:
  209. inv_entry 1, BAD_ERROR
  210. ENDPROC(el1_error_invalid)
  211. /*
  212. * EL1 mode handlers.
  213. */
  214. .align 6
  215. el1_sync:
  216. kernel_entry 1
  217. mrs x1, esr_el1 // read the syndrome register
  218. lsr x24, x1, #26 // exception class
  219. cmp x24, #0x25 // data abort in EL1
  220. b.eq el1_da
  221. cmp x24, #0x18 // configurable trap
  222. b.eq el1_undef
  223. cmp x24, #0x26 // stack alignment exception
  224. b.eq el1_sp_pc
  225. cmp x24, #0x22 // pc alignment exception
  226. b.eq el1_sp_pc
  227. cmp x24, #0x00 // unknown exception in EL1
  228. b.eq el1_undef
  229. cmp x24, #0x30 // debug exception in EL1
  230. b.ge el1_dbg
  231. b el1_inv
  232. el1_da:
  233. /*
  234. * Data abort handling
  235. */
  236. mrs x0, far_el1
  237. enable_dbg_if_not_stepping x2
  238. // re-enable interrupts if they were enabled in the aborted context
  239. tbnz x23, #7, 1f // PSR_I_BIT
  240. enable_irq
  241. 1:
  242. mov x2, sp // struct pt_regs
  243. bl do_mem_abort
  244. // disable interrupts before pulling preserved data off the stack
  245. disable_irq
  246. kernel_exit 1
  247. el1_sp_pc:
  248. /*
  249. * Stack or PC alignment exception handling
  250. */
  251. mrs x0, far_el1
  252. mov x1, x25
  253. mov x2, sp
  254. b do_sp_pc_abort
  255. el1_undef:
  256. /*
  257. * Undefined instruction
  258. */
  259. mov x0, sp
  260. b do_undefinstr
  261. el1_dbg:
  262. /*
  263. * Debug exception handling
  264. */
  265. tbz x24, #0, el1_inv // EL1 only
  266. mrs x0, far_el1
  267. mov x2, sp // struct pt_regs
  268. bl do_debug_exception
  269. kernel_exit 1
  270. el1_inv:
  271. // TODO: add support for undefined instructions in kernel mode
  272. mov x0, sp
  273. mov x1, #BAD_SYNC
  274. mrs x2, esr_el1
  275. b bad_mode
  276. ENDPROC(el1_sync)
  277. .align 6
  278. el1_irq:
  279. kernel_entry 1
  280. enable_dbg_if_not_stepping x0
  281. #ifdef CONFIG_TRACE_IRQFLAGS
  282. bl trace_hardirqs_off
  283. #endif
  284. #ifdef CONFIG_PREEMPT
  285. get_thread_info tsk
  286. ldr x24, [tsk, #TI_PREEMPT] // get preempt count
  287. add x0, x24, #1 // increment it
  288. str x0, [tsk, #TI_PREEMPT]
  289. #endif
  290. irq_handler
  291. #ifdef CONFIG_PREEMPT
  292. str x24, [tsk, #TI_PREEMPT] // restore preempt count
  293. cbnz x24, 1f // preempt count != 0
  294. ldr x0, [tsk, #TI_FLAGS] // get flags
  295. tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
  296. bl el1_preempt
  297. 1:
  298. #endif
  299. #ifdef CONFIG_TRACE_IRQFLAGS
  300. bl trace_hardirqs_on
  301. #endif
  302. kernel_exit 1
  303. ENDPROC(el1_irq)
  304. #ifdef CONFIG_PREEMPT
  305. el1_preempt:
  306. mov x24, lr
  307. 1: enable_dbg
  308. bl preempt_schedule_irq // irq en/disable is done inside
  309. ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
  310. tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
  311. ret x24
  312. #endif
  313. /*
  314. * EL0 mode handlers.
  315. */
  316. .align 6
  317. el0_sync:
  318. kernel_entry 0
  319. mrs x25, esr_el1 // read the syndrome register
  320. lsr x24, x25, #26 // exception class
  321. cmp x24, #0x15 // SVC in 64-bit state
  322. b.eq el0_svc
  323. adr lr, ret_from_exception
  324. cmp x24, #0x24 // data abort in EL0
  325. b.eq el0_da
  326. cmp x24, #0x20 // instruction abort in EL0
  327. b.eq el0_ia
  328. cmp x24, #0x07 // FP/ASIMD access
  329. b.eq el0_fpsimd_acc
  330. cmp x24, #0x2c // FP/ASIMD exception
  331. b.eq el0_fpsimd_exc
  332. cmp x24, #0x18 // configurable trap
  333. b.eq el0_undef
  334. cmp x24, #0x26 // stack alignment exception
  335. b.eq el0_sp_pc
  336. cmp x24, #0x22 // pc alignment exception
  337. b.eq el0_sp_pc
  338. cmp x24, #0x00 // unknown exception in EL0
  339. b.eq el0_undef
  340. cmp x24, #0x30 // debug exception in EL0
  341. b.ge el0_dbg
  342. b el0_inv
  343. #ifdef CONFIG_COMPAT
  344. .align 6
  345. el0_sync_compat:
  346. kernel_entry 0, 32
  347. mrs x25, esr_el1 // read the syndrome register
  348. lsr x24, x25, #26 // exception class
  349. cmp x24, #0x11 // SVC in 32-bit state
  350. b.eq el0_svc_compat
  351. adr lr, ret_from_exception
  352. cmp x24, #0x24 // data abort in EL0
  353. b.eq el0_da
  354. cmp x24, #0x20 // instruction abort in EL0
  355. b.eq el0_ia
  356. cmp x24, #0x07 // FP/ASIMD access
  357. b.eq el0_fpsimd_acc
  358. cmp x24, #0x28 // FP/ASIMD exception
  359. b.eq el0_fpsimd_exc
  360. cmp x24, #0x00 // unknown exception in EL0
  361. b.eq el0_undef
  362. cmp x24, #0x30 // debug exception in EL0
  363. b.ge el0_dbg
  364. b el0_inv
  365. el0_svc_compat:
  366. /*
  367. * AArch32 syscall handling
  368. */
  369. adr stbl, compat_sys_call_table // load compat syscall table pointer
  370. uxtw scno, w7 // syscall number in w7 (r7)
  371. mov sc_nr, #__NR_compat_syscalls
  372. b el0_svc_naked
  373. .align 6
  374. el0_irq_compat:
  375. kernel_entry 0, 32
  376. b el0_irq_naked
  377. #endif
  378. el0_da:
  379. /*
  380. * Data abort handling
  381. */
  382. mrs x0, far_el1
  383. disable_step x1
  384. isb
  385. enable_dbg
  386. // enable interrupts before calling the main handler
  387. enable_irq
  388. mov x1, x25
  389. mov x2, sp
  390. b do_mem_abort
  391. el0_ia:
  392. /*
  393. * Instruction abort handling
  394. */
  395. mrs x0, far_el1
  396. disable_step x1
  397. isb
  398. enable_dbg
  399. // enable interrupts before calling the main handler
  400. enable_irq
  401. orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
  402. mov x2, sp
  403. b do_mem_abort
  404. el0_fpsimd_acc:
  405. /*
  406. * Floating Point or Advanced SIMD access
  407. */
  408. mov x0, x25
  409. mov x1, sp
  410. b do_fpsimd_acc
  411. el0_fpsimd_exc:
  412. /*
  413. * Floating Point or Advanced SIMD exception
  414. */
  415. mov x0, x25
  416. mov x1, sp
  417. b do_fpsimd_exc
  418. el0_sp_pc:
  419. /*
  420. * Stack or PC alignment exception handling
  421. */
  422. mrs x0, far_el1
  423. disable_step x1
  424. isb
  425. enable_dbg
  426. // enable interrupts before calling the main handler
  427. enable_irq
  428. mov x1, x25
  429. mov x2, sp
  430. b do_sp_pc_abort
  431. el0_undef:
  432. /*
  433. * Undefined instruction
  434. */
  435. mov x0, sp
  436. b do_undefinstr
  437. el0_dbg:
  438. /*
  439. * Debug exception handling
  440. */
  441. tbnz x24, #0, el0_inv // EL0 only
  442. mrs x0, far_el1
  443. disable_step x1
  444. mov x1, x25
  445. mov x2, sp
  446. b do_debug_exception
  447. el0_inv:
  448. mov x0, sp
  449. mov x1, #BAD_SYNC
  450. mrs x2, esr_el1
  451. b bad_mode
  452. ENDPROC(el0_sync)
  453. .align 6
  454. el0_irq:
  455. kernel_entry 0
  456. el0_irq_naked:
  457. disable_step x1
  458. isb
  459. enable_dbg
  460. #ifdef CONFIG_TRACE_IRQFLAGS
  461. bl trace_hardirqs_off
  462. #endif
  463. get_thread_info tsk
  464. #ifdef CONFIG_PREEMPT
  465. ldr x24, [tsk, #TI_PREEMPT] // get preempt count
  466. add x23, x24, #1 // increment it
  467. str x23, [tsk, #TI_PREEMPT]
  468. #endif
  469. irq_handler
  470. #ifdef CONFIG_PREEMPT
  471. ldr x0, [tsk, #TI_PREEMPT]
  472. str x24, [tsk, #TI_PREEMPT]
  473. cmp x0, x23
  474. b.eq 1f
  475. mov x1, #0
  476. str x1, [x1] // BUG
  477. 1:
  478. #endif
  479. #ifdef CONFIG_TRACE_IRQFLAGS
  480. bl trace_hardirqs_on
  481. #endif
  482. b ret_to_user
  483. ENDPROC(el0_irq)
  484. /*
  485. * This is the return code to user mode for abort handlers
  486. */
  487. ret_from_exception:
  488. get_thread_info tsk
  489. b ret_to_user
  490. ENDPROC(ret_from_exception)
  491. /*
  492. * Register switch for AArch64. The callee-saved registers need to be saved
  493. * and restored. On entry:
  494. * x0 = previous task_struct (must be preserved across the switch)
  495. * x1 = next task_struct
  496. * Previous and next are guaranteed not to be the same.
  497. *
  498. */
  499. ENTRY(cpu_switch_to)
  500. add x8, x0, #THREAD_CPU_CONTEXT
  501. mov x9, sp
  502. stp x19, x20, [x8], #16 // store callee-saved registers
  503. stp x21, x22, [x8], #16
  504. stp x23, x24, [x8], #16
  505. stp x25, x26, [x8], #16
  506. stp x27, x28, [x8], #16
  507. stp x29, x9, [x8], #16
  508. str lr, [x8]
  509. add x8, x1, #THREAD_CPU_CONTEXT
  510. ldp x19, x20, [x8], #16 // restore callee-saved registers
  511. ldp x21, x22, [x8], #16
  512. ldp x23, x24, [x8], #16
  513. ldp x25, x26, [x8], #16
  514. ldp x27, x28, [x8], #16
  515. ldp x29, x9, [x8], #16
  516. ldr lr, [x8]
  517. mov sp, x9
  518. ret
  519. ENDPROC(cpu_switch_to)
  520. /*
  521. * This is the fast syscall return path. We do as little as possible here,
  522. * and this includes saving x0 back into the kernel stack.
  523. */
  524. ret_fast_syscall:
  525. disable_irq // disable interrupts
  526. ldr x1, [tsk, #TI_FLAGS]
  527. and x2, x1, #_TIF_WORK_MASK
  528. cbnz x2, fast_work_pending
  529. tbz x1, #TIF_SINGLESTEP, fast_exit
  530. disable_dbg
  531. enable_step x2
  532. fast_exit:
  533. kernel_exit 0, ret = 1
  534. /*
  535. * Ok, we need to do extra processing, enter the slow path.
  536. */
  537. fast_work_pending:
  538. str x0, [sp, #S_X0] // returned x0
  539. work_pending:
  540. tbnz x1, #TIF_NEED_RESCHED, work_resched
  541. /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
  542. ldr x2, [sp, #S_PSTATE]
  543. mov x0, sp // 'regs'
  544. tst x2, #PSR_MODE_MASK // user mode regs?
  545. b.ne no_work_pending // returning to kernel
  546. enable_irq // enable interrupts for do_notify_resume()
  547. bl do_notify_resume
  548. b ret_to_user
  549. work_resched:
  550. enable_dbg
  551. bl schedule
  552. /*
  553. * "slow" syscall return path.
  554. */
  555. ENTRY(ret_to_user)
  556. disable_irq // disable interrupts
  557. ldr x1, [tsk, #TI_FLAGS]
  558. and x2, x1, #_TIF_WORK_MASK
  559. cbnz x2, work_pending
  560. tbz x1, #TIF_SINGLESTEP, no_work_pending
  561. disable_dbg
  562. enable_step x2
  563. no_work_pending:
  564. kernel_exit 0, ret = 0
  565. ENDPROC(ret_to_user)
  566. /*
  567. * This is how we return from a fork.
  568. */
  569. ENTRY(ret_from_fork)
  570. bl schedule_tail
  571. get_thread_info tsk
  572. b ret_to_user
  573. ENDPROC(ret_from_fork)
  574. /*
  575. * SVC handler.
  576. */
  577. .align 6
  578. el0_svc:
  579. adrp stbl, sys_call_table // load syscall table pointer
  580. uxtw scno, w8 // syscall number in w8
  581. mov sc_nr, #__NR_syscalls
  582. el0_svc_naked: // compat entry point
  583. stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
  584. disable_step x16
  585. isb
  586. enable_dbg
  587. enable_irq
  588. get_thread_info tsk
  589. ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
  590. tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
  591. adr lr, ret_fast_syscall // return address
  592. cmp scno, sc_nr // check upper syscall limit
  593. b.hs ni_sys
  594. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  595. br x16 // call sys_* routine
  596. ni_sys:
  597. mov x0, sp
  598. b do_ni_syscall
  599. ENDPROC(el0_svc)
  600. /*
  601. * This is the really slow path. We're going to be doing context
  602. * switches, and waiting for our parent to respond.
  603. */
  604. __sys_trace:
  605. mov x1, sp
  606. mov w0, #0 // trace entry
  607. bl syscall_trace
  608. adr lr, __sys_trace_return // return address
  609. uxtw scno, w0 // syscall number (possibly new)
  610. mov x1, sp // pointer to regs
  611. cmp scno, sc_nr // check upper syscall limit
  612. b.hs ni_sys
  613. ldp x0, x1, [sp] // restore the syscall args
  614. ldp x2, x3, [sp, #S_X2]
  615. ldp x4, x5, [sp, #S_X4]
  616. ldp x6, x7, [sp, #S_X6]
  617. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  618. br x16 // call sys_* routine
  619. __sys_trace_return:
  620. str x0, [sp] // save returned x0
  621. mov x1, sp
  622. mov w0, #1 // trace exit
  623. bl syscall_trace
  624. b ret_to_user
  625. /*
  626. * Special system call wrappers.
  627. */
  628. ENTRY(sys_execve_wrapper)
  629. mov x3, sp
  630. b sys_execve
  631. ENDPROC(sys_execve_wrapper)
  632. ENTRY(sys_clone_wrapper)
  633. mov x5, sp
  634. b sys_clone
  635. ENDPROC(sys_clone_wrapper)
  636. ENTRY(sys_rt_sigreturn_wrapper)
  637. mov x0, sp
  638. b sys_rt_sigreturn
  639. ENDPROC(sys_rt_sigreturn_wrapper)
  640. ENTRY(sys_sigaltstack_wrapper)
  641. ldr x2, [sp, #S_SP]
  642. b sys_sigaltstack
  643. ENDPROC(sys_sigaltstack_wrapper)
  644. ENTRY(handle_arch_irq)
  645. .quad 0