entry.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /*
  2. * Low-level exception handling code
  3. *
  4. * Copyright (C) 2012 ARM Ltd.
  5. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  6. * Will Deacon <will.deacon@arm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #include <linux/init.h>
  21. #include <linux/linkage.h>
  22. #include <asm/assembler.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/errno.h>
  25. #include <asm/esr.h>
  26. #include <asm/thread_info.h>
  27. #include <asm/unistd.h>
  28. #include <asm/unistd32.h>
  29. /*
  30. * Bad Abort numbers
  31. *-----------------
  32. */
  33. #define BAD_SYNC 0
  34. #define BAD_IRQ 1
  35. #define BAD_FIQ 2
  36. #define BAD_ERROR 3
  37. .macro kernel_entry, el, regsize = 64
  38. sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
  39. .if \regsize == 32
  40. mov w0, w0 // zero upper 32 bits of x0
  41. .endif
  42. push x28, x29
  43. push x26, x27
  44. push x24, x25
  45. push x22, x23
  46. push x20, x21
  47. push x18, x19
  48. push x16, x17
  49. push x14, x15
  50. push x12, x13
  51. push x10, x11
  52. push x8, x9
  53. push x6, x7
  54. push x4, x5
  55. push x2, x3
  56. push x0, x1
  57. .if \el == 0
  58. mrs x21, sp_el0
  59. .else
  60. add x21, sp, #S_FRAME_SIZE
  61. .endif
  62. mrs x22, elr_el1
  63. mrs x23, spsr_el1
  64. stp lr, x21, [sp, #S_LR]
  65. stp x22, x23, [sp, #S_PC]
  66. /*
  67. * Set syscallno to -1 by default (overridden later if real syscall).
  68. */
  69. .if \el == 0
  70. mvn x21, xzr
  71. str x21, [sp, #S_SYSCALLNO]
  72. .endif
  73. /*
  74. * Registers that may be useful after this macro is invoked:
  75. *
  76. * x21 - aborted SP
  77. * x22 - aborted PC
  78. * x23 - aborted PSTATE
  79. */
  80. .endm
  81. .macro kernel_exit, el, ret = 0
  82. ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
  83. .if \el == 0
  84. ldr x23, [sp, #S_SP] // load return stack pointer
  85. .endif
  86. .if \ret
  87. ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
  88. add sp, sp, S_X2
  89. .else
  90. pop x0, x1
  91. .endif
  92. pop x2, x3 // load the rest of the registers
  93. pop x4, x5
  94. pop x6, x7
  95. pop x8, x9
  96. msr elr_el1, x21 // set up the return data
  97. msr spsr_el1, x22
  98. .if \el == 0
  99. msr sp_el0, x23
  100. .endif
  101. pop x10, x11
  102. pop x12, x13
  103. pop x14, x15
  104. pop x16, x17
  105. pop x18, x19
  106. pop x20, x21
  107. pop x22, x23
  108. pop x24, x25
  109. pop x26, x27
  110. pop x28, x29
  111. ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
  112. eret // return to kernel
  113. .endm
  114. .macro get_thread_info, rd
  115. mov \rd, sp
  116. and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
  117. .endm
  118. /*
  119. * These are the registers used in the syscall handler, and allow us to
  120. * have in theory up to 7 arguments to a function - x0 to x6.
  121. *
  122. * x7 is reserved for the system call number in 32-bit mode.
  123. */
  124. sc_nr .req x25 // number of system calls
  125. scno .req x26 // syscall number
  126. stbl .req x27 // syscall table pointer
  127. tsk .req x28 // current thread_info
  128. /*
  129. * Interrupt handling.
  130. */
  131. .macro irq_handler
  132. ldr x1, handle_arch_irq
  133. mov x0, sp
  134. blr x1
  135. .endm
  136. .text
  137. /*
  138. * Exception vectors.
  139. */
  140. .align 11
  141. ENTRY(vectors)
  142. ventry el1_sync_invalid // Synchronous EL1t
  143. ventry el1_irq_invalid // IRQ EL1t
  144. ventry el1_fiq_invalid // FIQ EL1t
  145. ventry el1_error_invalid // Error EL1t
  146. ventry el1_sync // Synchronous EL1h
  147. ventry el1_irq // IRQ EL1h
  148. ventry el1_fiq_invalid // FIQ EL1h
  149. ventry el1_error_invalid // Error EL1h
  150. ventry el0_sync // Synchronous 64-bit EL0
  151. ventry el0_irq // IRQ 64-bit EL0
  152. ventry el0_fiq_invalid // FIQ 64-bit EL0
  153. ventry el0_error_invalid // Error 64-bit EL0
  154. #ifdef CONFIG_COMPAT
  155. ventry el0_sync_compat // Synchronous 32-bit EL0
  156. ventry el0_irq_compat // IRQ 32-bit EL0
  157. ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
  158. ventry el0_error_invalid_compat // Error 32-bit EL0
  159. #else
  160. ventry el0_sync_invalid // Synchronous 32-bit EL0
  161. ventry el0_irq_invalid // IRQ 32-bit EL0
  162. ventry el0_fiq_invalid // FIQ 32-bit EL0
  163. ventry el0_error_invalid // Error 32-bit EL0
  164. #endif
  165. END(vectors)
  166. /*
  167. * Invalid mode handlers
  168. */
  169. .macro inv_entry, el, reason, regsize = 64
  170. kernel_entry el, \regsize
  171. mov x0, sp
  172. mov x1, #\reason
  173. mrs x2, esr_el1
  174. b bad_mode
  175. .endm
  176. el0_sync_invalid:
  177. inv_entry 0, BAD_SYNC
  178. ENDPROC(el0_sync_invalid)
  179. el0_irq_invalid:
  180. inv_entry 0, BAD_IRQ
  181. ENDPROC(el0_irq_invalid)
  182. el0_fiq_invalid:
  183. inv_entry 0, BAD_FIQ
  184. ENDPROC(el0_fiq_invalid)
  185. el0_error_invalid:
  186. inv_entry 0, BAD_ERROR
  187. ENDPROC(el0_error_invalid)
  188. #ifdef CONFIG_COMPAT
  189. el0_fiq_invalid_compat:
  190. inv_entry 0, BAD_FIQ, 32
  191. ENDPROC(el0_fiq_invalid_compat)
  192. el0_error_invalid_compat:
  193. inv_entry 0, BAD_ERROR, 32
  194. ENDPROC(el0_error_invalid_compat)
  195. #endif
  196. el1_sync_invalid:
  197. inv_entry 1, BAD_SYNC
  198. ENDPROC(el1_sync_invalid)
  199. el1_irq_invalid:
  200. inv_entry 1, BAD_IRQ
  201. ENDPROC(el1_irq_invalid)
  202. el1_fiq_invalid:
  203. inv_entry 1, BAD_FIQ
  204. ENDPROC(el1_fiq_invalid)
  205. el1_error_invalid:
  206. inv_entry 1, BAD_ERROR
  207. ENDPROC(el1_error_invalid)
  208. /*
  209. * EL1 mode handlers.
  210. */
  211. .align 6
  212. el1_sync:
  213. kernel_entry 1
  214. mrs x1, esr_el1 // read the syndrome register
  215. lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
  216. cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
  217. b.eq el1_da
  218. cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
  219. b.eq el1_undef
  220. cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
  221. b.eq el1_sp_pc
  222. cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
  223. b.eq el1_sp_pc
  224. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
  225. b.eq el1_undef
  226. cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
  227. b.ge el1_dbg
  228. b el1_inv
  229. el1_da:
  230. /*
  231. * Data abort handling
  232. */
  233. mrs x0, far_el1
  234. enable_dbg_if_not_stepping x2
  235. // re-enable interrupts if they were enabled in the aborted context
  236. tbnz x23, #7, 1f // PSR_I_BIT
  237. enable_irq
  238. 1:
  239. mov x2, sp // struct pt_regs
  240. bl do_mem_abort
  241. // disable interrupts before pulling preserved data off the stack
  242. disable_irq
  243. kernel_exit 1
  244. el1_sp_pc:
  245. /*
  246. * Stack or PC alignment exception handling
  247. */
  248. mrs x0, far_el1
  249. mov x1, x25
  250. mov x2, sp
  251. b do_sp_pc_abort
  252. el1_undef:
  253. /*
  254. * Undefined instruction
  255. */
  256. mov x0, sp
  257. b do_undefinstr
  258. el1_dbg:
  259. /*
  260. * Debug exception handling
  261. */
  262. tbz x24, #0, el1_inv // EL1 only
  263. mrs x0, far_el1
  264. mov x2, sp // struct pt_regs
  265. bl do_debug_exception
  266. kernel_exit 1
  267. el1_inv:
  268. // TODO: add support for undefined instructions in kernel mode
  269. mov x0, sp
  270. mov x1, #BAD_SYNC
  271. mrs x2, esr_el1
  272. b bad_mode
  273. ENDPROC(el1_sync)
  274. .align 6
  275. el1_irq:
  276. kernel_entry 1
  277. enable_dbg_if_not_stepping x0
  278. #ifdef CONFIG_TRACE_IRQFLAGS
  279. bl trace_hardirqs_off
  280. #endif
  281. #ifdef CONFIG_PREEMPT
  282. get_thread_info tsk
  283. ldr x24, [tsk, #TI_PREEMPT] // get preempt count
  284. add x0, x24, #1 // increment it
  285. str x0, [tsk, #TI_PREEMPT]
  286. #endif
  287. irq_handler
  288. #ifdef CONFIG_PREEMPT
  289. str x24, [tsk, #TI_PREEMPT] // restore preempt count
  290. cbnz x24, 1f // preempt count != 0
  291. ldr x0, [tsk, #TI_FLAGS] // get flags
  292. tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
  293. bl el1_preempt
  294. 1:
  295. #endif
  296. #ifdef CONFIG_TRACE_IRQFLAGS
  297. bl trace_hardirqs_on
  298. #endif
  299. kernel_exit 1
  300. ENDPROC(el1_irq)
  301. #ifdef CONFIG_PREEMPT
  302. el1_preempt:
  303. mov x24, lr
  304. 1: enable_dbg
  305. bl preempt_schedule_irq // irq en/disable is done inside
  306. ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
  307. tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
  308. ret x24
  309. #endif
  310. /*
  311. * EL0 mode handlers.
  312. */
  313. .align 6
  314. el0_sync:
  315. kernel_entry 0
  316. mrs x25, esr_el1 // read the syndrome register
  317. lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
  318. cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
  319. b.eq el0_svc
  320. adr lr, ret_from_exception
  321. cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
  322. b.eq el0_da
  323. cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
  324. b.eq el0_ia
  325. cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
  326. b.eq el0_fpsimd_acc
  327. cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
  328. b.eq el0_fpsimd_exc
  329. cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
  330. b.eq el0_undef
  331. cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
  332. b.eq el0_sp_pc
  333. cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
  334. b.eq el0_sp_pc
  335. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
  336. b.eq el0_undef
  337. cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
  338. b.ge el0_dbg
  339. b el0_inv
  340. #ifdef CONFIG_COMPAT
  341. .align 6
  342. el0_sync_compat:
  343. kernel_entry 0, 32
  344. mrs x25, esr_el1 // read the syndrome register
  345. lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
  346. cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
  347. b.eq el0_svc_compat
  348. adr lr, ret_from_exception
  349. cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
  350. b.eq el0_da
  351. cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
  352. b.eq el0_ia
  353. cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
  354. b.eq el0_fpsimd_acc
  355. cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
  356. b.eq el0_fpsimd_exc
  357. cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
  358. b.eq el0_undef
  359. cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
  360. b.eq el0_undef
  361. cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
  362. b.eq el0_undef
  363. cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
  364. b.eq el0_undef
  365. cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
  366. b.eq el0_undef
  367. cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
  368. b.eq el0_undef
  369. cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
  370. b.ge el0_dbg
  371. b el0_inv
  372. el0_svc_compat:
  373. /*
  374. * AArch32 syscall handling
  375. */
  376. adr stbl, compat_sys_call_table // load compat syscall table pointer
  377. uxtw scno, w7 // syscall number in w7 (r7)
  378. mov sc_nr, #__NR_compat_syscalls
  379. b el0_svc_naked
  380. .align 6
  381. el0_irq_compat:
  382. kernel_entry 0, 32
  383. b el0_irq_naked
  384. #endif
  385. el0_da:
  386. /*
  387. * Data abort handling
  388. */
  389. mrs x0, far_el1
  390. bic x0, x0, #(0xff << 56)
  391. disable_step x1
  392. isb
  393. enable_dbg
  394. // enable interrupts before calling the main handler
  395. enable_irq
  396. mov x1, x25
  397. mov x2, sp
  398. b do_mem_abort
  399. el0_ia:
  400. /*
  401. * Instruction abort handling
  402. */
  403. mrs x0, far_el1
  404. disable_step x1
  405. isb
  406. enable_dbg
  407. // enable interrupts before calling the main handler
  408. enable_irq
  409. orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
  410. mov x2, sp
  411. b do_mem_abort
  412. el0_fpsimd_acc:
  413. /*
  414. * Floating Point or Advanced SIMD access
  415. */
  416. mov x0, x25
  417. mov x1, sp
  418. b do_fpsimd_acc
  419. el0_fpsimd_exc:
  420. /*
  421. * Floating Point or Advanced SIMD exception
  422. */
  423. mov x0, x25
  424. mov x1, sp
  425. b do_fpsimd_exc
  426. el0_sp_pc:
  427. /*
  428. * Stack or PC alignment exception handling
  429. */
  430. mrs x0, far_el1
  431. disable_step x1
  432. isb
  433. enable_dbg
  434. // enable interrupts before calling the main handler
  435. enable_irq
  436. mov x1, x25
  437. mov x2, sp
  438. b do_sp_pc_abort
  439. el0_undef:
  440. /*
  441. * Undefined instruction
  442. */
  443. mov x0, sp
  444. // enable interrupts before calling the main handler
  445. enable_irq
  446. b do_undefinstr
  447. el0_dbg:
  448. /*
  449. * Debug exception handling
  450. */
  451. tbnz x24, #0, el0_inv // EL0 only
  452. mrs x0, far_el1
  453. disable_step x1
  454. mov x1, x25
  455. mov x2, sp
  456. b do_debug_exception
  457. el0_inv:
  458. mov x0, sp
  459. mov x1, #BAD_SYNC
  460. mrs x2, esr_el1
  461. b bad_mode
  462. ENDPROC(el0_sync)
  463. .align 6
  464. el0_irq:
  465. kernel_entry 0
  466. el0_irq_naked:
  467. disable_step x1
  468. isb
  469. enable_dbg
  470. #ifdef CONFIG_TRACE_IRQFLAGS
  471. bl trace_hardirqs_off
  472. #endif
  473. get_thread_info tsk
  474. #ifdef CONFIG_PREEMPT
  475. ldr x24, [tsk, #TI_PREEMPT] // get preempt count
  476. add x23, x24, #1 // increment it
  477. str x23, [tsk, #TI_PREEMPT]
  478. #endif
  479. irq_handler
  480. #ifdef CONFIG_PREEMPT
  481. ldr x0, [tsk, #TI_PREEMPT]
  482. str x24, [tsk, #TI_PREEMPT]
  483. cmp x0, x23
  484. b.eq 1f
  485. mov x1, #0
  486. str x1, [x1] // BUG
  487. 1:
  488. #endif
  489. #ifdef CONFIG_TRACE_IRQFLAGS
  490. bl trace_hardirqs_on
  491. #endif
  492. b ret_to_user
  493. ENDPROC(el0_irq)
  494. /*
  495. * This is the return code to user mode for abort handlers
  496. */
  497. ret_from_exception:
  498. get_thread_info tsk
  499. b ret_to_user
  500. ENDPROC(ret_from_exception)
  501. /*
  502. * Register switch for AArch64. The callee-saved registers need to be saved
  503. * and restored. On entry:
  504. * x0 = previous task_struct (must be preserved across the switch)
  505. * x1 = next task_struct
  506. * Previous and next are guaranteed not to be the same.
  507. *
  508. */
  509. ENTRY(cpu_switch_to)
  510. add x8, x0, #THREAD_CPU_CONTEXT
  511. mov x9, sp
  512. stp x19, x20, [x8], #16 // store callee-saved registers
  513. stp x21, x22, [x8], #16
  514. stp x23, x24, [x8], #16
  515. stp x25, x26, [x8], #16
  516. stp x27, x28, [x8], #16
  517. stp x29, x9, [x8], #16
  518. str lr, [x8]
  519. add x8, x1, #THREAD_CPU_CONTEXT
  520. ldp x19, x20, [x8], #16 // restore callee-saved registers
  521. ldp x21, x22, [x8], #16
  522. ldp x23, x24, [x8], #16
  523. ldp x25, x26, [x8], #16
  524. ldp x27, x28, [x8], #16
  525. ldp x29, x9, [x8], #16
  526. ldr lr, [x8]
  527. mov sp, x9
  528. ret
  529. ENDPROC(cpu_switch_to)
  530. /*
  531. * This is the fast syscall return path. We do as little as possible here,
  532. * and this includes saving x0 back into the kernel stack.
  533. */
  534. ret_fast_syscall:
  535. disable_irq // disable interrupts
  536. ldr x1, [tsk, #TI_FLAGS]
  537. and x2, x1, #_TIF_WORK_MASK
  538. cbnz x2, fast_work_pending
  539. tbz x1, #TIF_SINGLESTEP, fast_exit
  540. disable_dbg
  541. enable_step x2
  542. fast_exit:
  543. kernel_exit 0, ret = 1
  544. /*
  545. * Ok, we need to do extra processing, enter the slow path.
  546. */
  547. fast_work_pending:
  548. str x0, [sp, #S_X0] // returned x0
  549. work_pending:
  550. tbnz x1, #TIF_NEED_RESCHED, work_resched
  551. /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
  552. ldr x2, [sp, #S_PSTATE]
  553. mov x0, sp // 'regs'
  554. tst x2, #PSR_MODE_MASK // user mode regs?
  555. b.ne no_work_pending // returning to kernel
  556. enable_irq // enable interrupts for do_notify_resume()
  557. bl do_notify_resume
  558. b ret_to_user
  559. work_resched:
  560. enable_dbg
  561. bl schedule
  562. /*
  563. * "slow" syscall return path.
  564. */
  565. ret_to_user:
  566. disable_irq // disable interrupts
  567. ldr x1, [tsk, #TI_FLAGS]
  568. and x2, x1, #_TIF_WORK_MASK
  569. cbnz x2, work_pending
  570. tbz x1, #TIF_SINGLESTEP, no_work_pending
  571. disable_dbg
  572. enable_step x2
  573. no_work_pending:
  574. kernel_exit 0, ret = 0
  575. ENDPROC(ret_to_user)
  576. /*
  577. * This is how we return from a fork.
  578. */
  579. ENTRY(ret_from_fork)
  580. bl schedule_tail
  581. cbz x19, 1f // not a kernel thread
  582. mov x0, x20
  583. blr x19
  584. 1: get_thread_info tsk
  585. b ret_to_user
  586. ENDPROC(ret_from_fork)
  587. /*
  588. * SVC handler.
  589. */
  590. .align 6
  591. el0_svc:
  592. adrp stbl, sys_call_table // load syscall table pointer
  593. uxtw scno, w8 // syscall number in w8
  594. mov sc_nr, #__NR_syscalls
  595. el0_svc_naked: // compat entry point
  596. stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
  597. disable_step x16
  598. isb
  599. enable_dbg
  600. enable_irq
  601. get_thread_info tsk
  602. ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing
  603. tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
  604. adr lr, ret_fast_syscall // return address
  605. cmp scno, sc_nr // check upper syscall limit
  606. b.hs ni_sys
  607. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  608. br x16 // call sys_* routine
  609. ni_sys:
  610. mov x0, sp
  611. b do_ni_syscall
  612. ENDPROC(el0_svc)
  613. /*
  614. * This is the really slow path. We're going to be doing context
  615. * switches, and waiting for our parent to respond.
  616. */
  617. __sys_trace:
  618. mov x1, sp
  619. mov w0, #0 // trace entry
  620. bl syscall_trace
  621. adr lr, __sys_trace_return // return address
  622. uxtw scno, w0 // syscall number (possibly new)
  623. mov x1, sp // pointer to regs
  624. cmp scno, sc_nr // check upper syscall limit
  625. b.hs ni_sys
  626. ldp x0, x1, [sp] // restore the syscall args
  627. ldp x2, x3, [sp, #S_X2]
  628. ldp x4, x5, [sp, #S_X4]
  629. ldp x6, x7, [sp, #S_X6]
  630. ldr x16, [stbl, scno, lsl #3] // address in the syscall table
  631. br x16 // call sys_* routine
  632. __sys_trace_return:
  633. str x0, [sp] // save returned x0
  634. mov x1, sp
  635. mov w0, #1 // trace exit
  636. bl syscall_trace
  637. b ret_to_user
  638. /*
  639. * Special system call wrappers.
  640. */
  641. ENTRY(sys_rt_sigreturn_wrapper)
  642. mov x0, sp
  643. b sys_rt_sigreturn
  644. ENDPROC(sys_rt_sigreturn_wrapper)
  645. ENTRY(handle_arch_irq)
  646. .quad 0