entry-common.S 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392
  1. /*
  2. * linux/arch/arm/kernel/entry-common.S
  3. *
  4. * Copyright (C) 2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <asm/unistd.h>
  11. #include "entry-header.S"
  12. .align 5
  13. /*
  14. * This is the fast syscall return path. We do as little as
  15. * possible here, and this includes saving r0 back into the SVC
  16. * stack.
  17. */
  18. ret_fast_syscall:
  19. disable_irq @ disable interrupts
  20. ldr r1, [tsk, #TI_FLAGS]
  21. tst r1, #_TIF_WORK_MASK
  22. bne fast_work_pending
  23. @ fast_restore_user_regs
  24. ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
  25. ldr lr, [sp, #S_OFF + S_PC]! @ get pc
  26. msr spsr_cxsf, r1 @ save in spsr_svc
  27. ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
  28. mov r0, r0
  29. add sp, sp, #S_FRAME_SIZE - S_PC
  30. movs pc, lr @ return & move spsr_svc into cpsr
  31. /*
  32. * Ok, we need to do extra processing, enter the slow path.
  33. */
  34. fast_work_pending:
  35. str r0, [sp, #S_R0+S_OFF]! @ returned r0
  36. work_pending:
  37. tst r1, #_TIF_NEED_RESCHED
  38. bne work_resched
  39. tst r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
  40. beq no_work_pending
  41. mov r0, sp @ 'regs'
  42. mov r2, why @ 'syscall'
  43. bl do_notify_resume
  44. b ret_slow_syscall @ Check work again
  45. work_resched:
  46. bl schedule
  47. /*
  48. * "slow" syscall return path. "why" tells us if this was a real syscall.
  49. */
  50. ENTRY(ret_to_user)
  51. ret_slow_syscall:
  52. disable_irq @ disable interrupts
  53. ldr r1, [tsk, #TI_FLAGS]
  54. tst r1, #_TIF_WORK_MASK
  55. bne work_pending
  56. no_work_pending:
  57. @ slow_restore_user_regs
  58. ldr r1, [sp, #S_PSR] @ get calling cpsr
  59. ldr lr, [sp, #S_PC]! @ get pc
  60. msr spsr_cxsf, r1 @ save in spsr_svc
  61. ldmdb sp, {r0 - lr}^ @ get calling r1 - lr
  62. mov r0, r0
  63. add sp, sp, #S_FRAME_SIZE - S_PC
  64. movs pc, lr @ return & move spsr_svc into cpsr
  65. /*
  66. * This is how we return from a fork.
  67. */
  68. ENTRY(ret_from_fork)
  69. bl schedule_tail
  70. get_thread_info tsk
  71. ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
  72. mov why, #1
  73. tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
  74. beq ret_slow_syscall
  75. mov r1, sp
  76. mov r0, #1 @ trace exit [IP = 1]
  77. bl syscall_trace
  78. b ret_slow_syscall
  79. .equ NR_syscalls,0
  80. #define CALL(x) .equ NR_syscalls,NR_syscalls+1
  81. #include "calls.S"
  82. #undef CALL
  83. #define CALL(x) .long x
  84. /*=============================================================================
  85. * SWI handler
  86. *-----------------------------------------------------------------------------
  87. */
  88. /* If we're optimising for StrongARM the resulting code won't
  89. run on an ARM7 and we can save a couple of instructions.
  90. --pb */
  91. #ifdef CONFIG_CPU_ARM710
  92. #define A710(code...) code
  93. .Larm710bug:
  94. ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
  95. mov r0, r0
  96. add sp, sp, #S_FRAME_SIZE
  97. subs pc, lr, #4
  98. #else
  99. #define A710(code...)
  100. #endif
  101. .align 5
  102. ENTRY(vector_swi)
  103. sub sp, sp, #S_FRAME_SIZE
  104. stmia sp, {r0 - r12} @ Calling r0 - r12
  105. add r8, sp, #S_PC
  106. stmdb r8, {sp, lr}^ @ Calling sp, lr
  107. mrs r8, spsr @ called from non-FIQ mode, so ok.
  108. str lr, [sp, #S_PC] @ Save calling PC
  109. str r8, [sp, #S_PSR] @ Save CPSR
  110. str r0, [sp, #S_OLD_R0] @ Save OLD_R0
  111. zero_fp
  112. /*
  113. * Get the system call number.
  114. */
  115. #if defined(CONFIG_OABI_COMPAT)
  116. /*
  117. * If we have CONFIG_OABI_COMPAT then we need to look at the swi
  118. * value to determine if it is an EABI or an old ABI call.
  119. */
  120. #ifdef CONFIG_ARM_THUMB
  121. tst r8, #PSR_T_BIT
  122. movne r10, #0 @ no thumb OABI emulation
  123. ldreq r10, [lr, #-4] @ get SWI instruction
  124. #else
  125. ldr r10, [lr, #-4] @ get SWI instruction
  126. A710( and ip, r10, #0x0f000000 @ check for SWI )
  127. A710( teq ip, #0x0f000000 )
  128. A710( bne .Larm710bug )
  129. #endif
  130. #elif defined(CONFIG_AEABI)
  131. /*
  132. * Pure EABI user space always put syscall number into scno (r7).
  133. */
  134. A710( ldr ip, [lr, #-4] @ get SWI instruction )
  135. A710( and ip, ip, #0x0f000000 @ check for SWI )
  136. A710( teq ip, #0x0f000000 )
  137. A710( bne .Larm710bug )
  138. #elif defined(CONFIG_ARM_THUMB)
  139. /* Legacy ABI only, possibly thumb mode. */
  140. tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
  141. addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
  142. ldreq scno, [lr, #-4]
  143. #else
  144. /* Legacy ABI only. */
  145. ldr scno, [lr, #-4] @ get SWI instruction
  146. A710( and ip, scno, #0x0f000000 @ check for SWI )
  147. A710( teq ip, #0x0f000000 )
  148. A710( bne .Larm710bug )
  149. #endif
  150. #ifdef CONFIG_ALIGNMENT_TRAP
  151. ldr ip, __cr_alignment
  152. ldr ip, [ip]
  153. mcr p15, 0, ip, c1, c0 @ update control register
  154. #endif
  155. enable_irq
  156. get_thread_info tsk
  157. adr tbl, sys_call_table @ load syscall table pointer
  158. ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
  159. #if defined(CONFIG_OABI_COMPAT)
  160. /*
  161. * If the swi argument is zero, this is an EABI call and we do nothing.
  162. *
  163. * If this is an old ABI call, get the syscall number into scno and
  164. * get the old ABI syscall table address.
  165. */
  166. bics r10, r10, #0xff000000
  167. eorne scno, r10, #__NR_OABI_SYSCALL_BASE
  168. ldrne tbl, =sys_oabi_call_table
  169. #elif !defined(CONFIG_AEABI)
  170. bic scno, scno, #0xff000000 @ mask off SWI op-code
  171. eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
  172. #endif
  173. stmdb sp!, {r4, r5} @ push fifth and sixth args
  174. tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
  175. bne __sys_trace
  176. cmp scno, #NR_syscalls @ check upper syscall limit
  177. adr lr, ret_fast_syscall @ return address
  178. ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
  179. add r1, sp, #S_OFF
  180. 2: mov why, #0 @ no longer a real syscall
  181. cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
  182. eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
  183. bcs arm_syscall
  184. b sys_ni_syscall @ not private func
  185. /*
  186. * This is the really slow path. We're going to be doing
  187. * context switches, and waiting for our parent to respond.
  188. */
  189. __sys_trace:
  190. mov r2, scno
  191. add r1, sp, #S_OFF
  192. mov r0, #0 @ trace entry [IP = 0]
  193. bl syscall_trace
  194. adr lr, __sys_trace_return @ return address
  195. mov scno, r0 @ syscall number (possibly new)
  196. add r1, sp, #S_R0 + S_OFF @ pointer to regs
  197. cmp scno, #NR_syscalls @ check upper syscall limit
  198. ldmccia r1, {r0 - r3} @ have to reload r0 - r3
  199. ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
  200. b 2b
  201. __sys_trace_return:
  202. str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
  203. mov r2, scno
  204. mov r1, sp
  205. mov r0, #1 @ trace exit [IP = 1]
  206. bl syscall_trace
  207. b ret_slow_syscall
  208. .align 5
  209. #ifdef CONFIG_ALIGNMENT_TRAP
  210. .type __cr_alignment, #object
  211. __cr_alignment:
  212. .word cr_alignment
  213. #endif
  214. .ltorg
  215. /*
  216. * This is the syscall table declaration for native ABI syscalls.
  217. * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
  218. */
  219. #define ABI(native, compat) native
  220. #ifdef CONFIG_AEABI
  221. #define OBSOLETE(syscall) sys_ni_syscall
  222. #else
  223. #define OBSOLETE(syscall) syscall
  224. #endif
  225. .type sys_call_table, #object
  226. ENTRY(sys_call_table)
  227. #include "calls.S"
  228. #undef ABI
  229. #undef OBSOLETE
  230. /*============================================================================
  231. * Special system call wrappers
  232. */
  233. @ r0 = syscall number
  234. @ r8 = syscall table
  235. .type sys_syscall, #function
  236. sys_syscall:
  237. bic scno, r0, #__NR_OABI_SYSCALL_BASE
  238. cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
  239. cmpne scno, #NR_syscalls @ check range
  240. stmloia sp, {r5, r6} @ shuffle args
  241. movlo r0, r1
  242. movlo r1, r2
  243. movlo r2, r3
  244. movlo r3, r4
  245. ldrlo pc, [tbl, scno, lsl #2]
  246. b sys_ni_syscall
  247. sys_fork_wrapper:
  248. add r0, sp, #S_OFF
  249. b sys_fork
  250. sys_vfork_wrapper:
  251. add r0, sp, #S_OFF
  252. b sys_vfork
  253. sys_execve_wrapper:
  254. add r3, sp, #S_OFF
  255. b sys_execve
  256. sys_clone_wrapper:
  257. add ip, sp, #S_OFF
  258. str ip, [sp, #4]
  259. b sys_clone
  260. sys_sigsuspend_wrapper:
  261. add r3, sp, #S_OFF
  262. b sys_sigsuspend
  263. sys_rt_sigsuspend_wrapper:
  264. add r2, sp, #S_OFF
  265. b sys_rt_sigsuspend
  266. sys_sigreturn_wrapper:
  267. add r0, sp, #S_OFF
  268. b sys_sigreturn
  269. sys_rt_sigreturn_wrapper:
  270. add r0, sp, #S_OFF
  271. b sys_rt_sigreturn
  272. sys_sigaltstack_wrapper:
  273. ldr r2, [sp, #S_OFF + S_SP]
  274. b do_sigaltstack
  275. sys_statfs64_wrapper:
  276. teq r1, #88
  277. moveq r1, #84
  278. b sys_statfs64
  279. sys_fstatfs64_wrapper:
  280. teq r1, #88
  281. moveq r1, #84
  282. b sys_fstatfs64
  283. /*
  284. * Note: off_4k (r5) is always units of 4K. If we can't do the requested
  285. * offset, we return EINVAL.
  286. */
  287. sys_mmap2:
  288. #if PAGE_SHIFT > 12
  289. tst r5, #PGOFF_MASK
  290. moveq r5, r5, lsr #PAGE_SHIFT - 12
  291. streq r5, [sp, #4]
  292. beq do_mmap2
  293. mov r0, #-EINVAL
  294. mov pc, lr
  295. #else
  296. str r5, [sp, #4]
  297. b do_mmap2
  298. #endif
  299. #ifdef CONFIG_OABI_COMPAT
  300. /*
  301. * These are syscalls with argument register differences
  302. */
  303. sys_oabi_pread64:
  304. stmia sp, {r3, r4}
  305. b sys_pread64
  306. sys_oabi_pwrite64:
  307. stmia sp, {r3, r4}
  308. b sys_pwrite64
  309. sys_oabi_truncate64:
  310. mov r3, r2
  311. mov r2, r1
  312. b sys_truncate64
  313. sys_oabi_ftruncate64:
  314. mov r3, r2
  315. mov r2, r1
  316. b sys_ftruncate64
  317. sys_oabi_readahead:
  318. str r3, [sp]
  319. mov r3, r2
  320. mov r2, r1
  321. b sys_readahead
  322. /*
  323. * Let's declare a second syscall table for old ABI binaries
  324. * using the compatibility syscall entries.
  325. */
  326. #define ABI(native, compat) compat
  327. #define OBSOLETE(syscall) syscall
  328. .type sys_oabi_call_table, #object
  329. ENTRY(sys_oabi_call_table)
  330. #include "calls.S"
  331. #undef ABI
  332. #undef OBSOLETE
  333. #endif