entry-common.S 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. /*
  2. * linux/arch/arm/kernel/entry-common.S
  3. *
  4. * Copyright (C) 2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <asm/unistd.h>
  11. #include <asm/ftrace.h>
  12. #include <mach/entry-macro.S>
  13. #include <asm/unwind.h>
  14. #include "entry-header.S"
  15. .align 5
  16. /*
  17. * This is the fast syscall return path. We do as little as
  18. * possible here, and this includes saving r0 back into the SVC
  19. * stack.
  20. */
  21. ret_fast_syscall:
  22. UNWIND(.fnstart )
  23. UNWIND(.cantunwind )
  24. disable_irq @ disable interrupts
  25. ldr r1, [tsk, #TI_FLAGS]
  26. tst r1, #_TIF_WORK_MASK
  27. bne fast_work_pending
  28. /* perform architecture specific actions before user return */
  29. arch_ret_to_user r1, lr
  30. @ fast_restore_user_regs
  31. ldr r1, [sp, #S_OFF + S_PSR] @ get calling cpsr
  32. ldr lr, [sp, #S_OFF + S_PC]! @ get pc
  33. msr spsr_cxsf, r1 @ save in spsr_svc
  34. ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
  35. mov r0, r0
  36. add sp, sp, #S_FRAME_SIZE - S_PC
  37. movs pc, lr @ return & move spsr_svc into cpsr
  38. UNWIND(.fnend )
  39. /*
  40. * Ok, we need to do extra processing, enter the slow path.
  41. */
  42. fast_work_pending:
  43. str r0, [sp, #S_R0+S_OFF]! @ returned r0
  44. work_pending:
  45. tst r1, #_TIF_NEED_RESCHED
  46. bne work_resched
  47. tst r1, #_TIF_SIGPENDING
  48. beq no_work_pending
  49. mov r0, sp @ 'regs'
  50. mov r2, why @ 'syscall'
  51. bl do_notify_resume
  52. b ret_slow_syscall @ Check work again
  53. work_resched:
  54. bl schedule
  55. /*
  56. * "slow" syscall return path. "why" tells us if this was a real syscall.
  57. */
  58. ENTRY(ret_to_user)
  59. ret_slow_syscall:
  60. disable_irq @ disable interrupts
  61. ldr r1, [tsk, #TI_FLAGS]
  62. tst r1, #_TIF_WORK_MASK
  63. bne work_pending
  64. no_work_pending:
  65. /* perform architecture specific actions before user return */
  66. arch_ret_to_user r1, lr
  67. @ slow_restore_user_regs
  68. ldr r1, [sp, #S_PSR] @ get calling cpsr
  69. ldr lr, [sp, #S_PC]! @ get pc
  70. msr spsr_cxsf, r1 @ save in spsr_svc
  71. ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
  72. mov r0, r0
  73. add sp, sp, #S_FRAME_SIZE - S_PC
  74. movs pc, lr @ return & move spsr_svc into cpsr
  75. ENDPROC(ret_to_user)
  76. /*
  77. * This is how we return from a fork.
  78. */
  79. ENTRY(ret_from_fork)
  80. bl schedule_tail
  81. get_thread_info tsk
  82. ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
  83. mov why, #1
  84. tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
  85. beq ret_slow_syscall
  86. mov r1, sp
  87. mov r0, #1 @ trace exit [IP = 1]
  88. bl syscall_trace
  89. b ret_slow_syscall
  90. ENDPROC(ret_from_fork)
  91. .equ NR_syscalls,0
  92. #define CALL(x) .equ NR_syscalls,NR_syscalls+1
  93. #include "calls.S"
  94. #undef CALL
  95. #define CALL(x) .long x
  96. #ifdef CONFIG_FUNCTION_TRACER
  97. #ifdef CONFIG_DYNAMIC_FTRACE
  98. ENTRY(mcount)
  99. stmdb sp!, {r0-r3, lr}
  100. mov r0, lr
  101. sub r0, r0, #MCOUNT_INSN_SIZE
  102. .globl mcount_call
  103. mcount_call:
  104. bl ftrace_stub
  105. ldr lr, [fp, #-4] @ restore lr
  106. ldmia sp!, {r0-r3, pc}
  107. ENTRY(ftrace_caller)
  108. stmdb sp!, {r0-r3, lr}
  109. ldr r1, [fp, #-4]
  110. mov r0, lr
  111. sub r0, r0, #MCOUNT_INSN_SIZE
  112. .globl ftrace_call
  113. ftrace_call:
  114. bl ftrace_stub
  115. ldr lr, [fp, #-4] @ restore lr
  116. ldmia sp!, {r0-r3, pc}
  117. #else
  118. ENTRY(mcount)
  119. stmdb sp!, {r0-r3, lr}
  120. ldr r0, =ftrace_trace_function
  121. ldr r2, [r0]
  122. adr r0, ftrace_stub
  123. cmp r0, r2
  124. bne trace
  125. ldr lr, [fp, #-4] @ restore lr
  126. ldmia sp!, {r0-r3, pc}
  127. trace:
  128. ldr r1, [fp, #-4] @ lr of instrumented routine
  129. mov r0, lr
  130. sub r0, r0, #MCOUNT_INSN_SIZE
  131. mov lr, pc
  132. mov pc, r2
  133. mov lr, r1 @ restore lr
  134. ldmia sp!, {r0-r3, pc}
  135. #endif /* CONFIG_DYNAMIC_FTRACE */
  136. .globl ftrace_stub
  137. ftrace_stub:
  138. mov pc, lr
  139. #endif /* CONFIG_FUNCTION_TRACER */
  140. /*=============================================================================
  141. * SWI handler
  142. *-----------------------------------------------------------------------------
  143. */
  144. /* If we're optimising for StrongARM the resulting code won't
  145. run on an ARM7 and we can save a couple of instructions.
  146. --pb */
  147. #ifdef CONFIG_CPU_ARM710
  148. #define A710(code...) code
  149. .Larm710bug:
  150. ldmia sp, {r0 - lr}^ @ Get calling r0 - lr
  151. mov r0, r0
  152. add sp, sp, #S_FRAME_SIZE
  153. subs pc, lr, #4
  154. #else
  155. #define A710(code...)
  156. #endif
  157. .align 5
  158. ENTRY(vector_swi)
  159. sub sp, sp, #S_FRAME_SIZE
  160. stmia sp, {r0 - r12} @ Calling r0 - r12
  161. add r8, sp, #S_PC
  162. stmdb r8, {sp, lr}^ @ Calling sp, lr
  163. mrs r8, spsr @ called from non-FIQ mode, so ok.
  164. str lr, [sp, #S_PC] @ Save calling PC
  165. str r8, [sp, #S_PSR] @ Save CPSR
  166. str r0, [sp, #S_OLD_R0] @ Save OLD_R0
  167. zero_fp
  168. /*
  169. * Get the system call number.
  170. */
  171. #if defined(CONFIG_OABI_COMPAT)
  172. /*
  173. * If we have CONFIG_OABI_COMPAT then we need to look at the swi
  174. * value to determine if it is an EABI or an old ABI call.
  175. */
  176. #ifdef CONFIG_ARM_THUMB
  177. tst r8, #PSR_T_BIT
  178. movne r10, #0 @ no thumb OABI emulation
  179. ldreq r10, [lr, #-4] @ get SWI instruction
  180. #else
  181. ldr r10, [lr, #-4] @ get SWI instruction
  182. A710( and ip, r10, #0x0f000000 @ check for SWI )
  183. A710( teq ip, #0x0f000000 )
  184. A710( bne .Larm710bug )
  185. #endif
  186. #ifdef CONFIG_CPU_ENDIAN_BE8
  187. rev r10, r10 @ little endian instruction
  188. #endif
  189. #elif defined(CONFIG_AEABI)
  190. /*
  191. * Pure EABI user space always put syscall number into scno (r7).
  192. */
  193. A710( ldr ip, [lr, #-4] @ get SWI instruction )
  194. A710( and ip, ip, #0x0f000000 @ check for SWI )
  195. A710( teq ip, #0x0f000000 )
  196. A710( bne .Larm710bug )
  197. #elif defined(CONFIG_ARM_THUMB)
  198. /* Legacy ABI only, possibly thumb mode. */
  199. tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
  200. addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
  201. ldreq scno, [lr, #-4]
  202. #else
  203. /* Legacy ABI only. */
  204. ldr scno, [lr, #-4] @ get SWI instruction
  205. A710( and ip, scno, #0x0f000000 @ check for SWI )
  206. A710( teq ip, #0x0f000000 )
  207. A710( bne .Larm710bug )
  208. #endif
  209. #ifdef CONFIG_ALIGNMENT_TRAP
  210. ldr ip, __cr_alignment
  211. ldr ip, [ip]
  212. mcr p15, 0, ip, c1, c0 @ update control register
  213. #endif
  214. enable_irq
  215. get_thread_info tsk
  216. adr tbl, sys_call_table @ load syscall table pointer
  217. ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing
  218. #if defined(CONFIG_OABI_COMPAT)
  219. /*
  220. * If the swi argument is zero, this is an EABI call and we do nothing.
  221. *
  222. * If this is an old ABI call, get the syscall number into scno and
  223. * get the old ABI syscall table address.
  224. */
  225. bics r10, r10, #0xff000000
  226. eorne scno, r10, #__NR_OABI_SYSCALL_BASE
  227. ldrne tbl, =sys_oabi_call_table
  228. #elif !defined(CONFIG_AEABI)
  229. bic scno, scno, #0xff000000 @ mask off SWI op-code
  230. eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
  231. #endif
  232. stmdb sp!, {r4, r5} @ push fifth and sixth args
  233. tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
  234. bne __sys_trace
  235. cmp scno, #NR_syscalls @ check upper syscall limit
  236. adr lr, ret_fast_syscall @ return address
  237. ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
  238. add r1, sp, #S_OFF
  239. 2: mov why, #0 @ no longer a real syscall
  240. cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
  241. eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
  242. bcs arm_syscall
  243. b sys_ni_syscall @ not private func
  244. ENDPROC(vector_swi)
  245. /*
  246. * This is the really slow path. We're going to be doing
  247. * context switches, and waiting for our parent to respond.
  248. */
  249. __sys_trace:
  250. mov r2, scno
  251. add r1, sp, #S_OFF
  252. mov r0, #0 @ trace entry [IP = 0]
  253. bl syscall_trace
  254. adr lr, __sys_trace_return @ return address
  255. mov scno, r0 @ syscall number (possibly new)
  256. add r1, sp, #S_R0 + S_OFF @ pointer to regs
  257. cmp scno, #NR_syscalls @ check upper syscall limit
  258. ldmccia r1, {r0 - r3} @ have to reload r0 - r3
  259. ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
  260. b 2b
  261. __sys_trace_return:
  262. str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
  263. mov r2, scno
  264. mov r1, sp
  265. mov r0, #1 @ trace exit [IP = 1]
  266. bl syscall_trace
  267. b ret_slow_syscall
  268. .align 5
  269. #ifdef CONFIG_ALIGNMENT_TRAP
  270. .type __cr_alignment, #object
  271. __cr_alignment:
  272. .word cr_alignment
  273. #endif
  274. .ltorg
  275. /*
  276. * This is the syscall table declaration for native ABI syscalls.
  277. * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
  278. */
  279. #define ABI(native, compat) native
  280. #ifdef CONFIG_AEABI
  281. #define OBSOLETE(syscall) sys_ni_syscall
  282. #else
  283. #define OBSOLETE(syscall) syscall
  284. #endif
  285. .type sys_call_table, #object
  286. ENTRY(sys_call_table)
  287. #include "calls.S"
  288. #undef ABI
  289. #undef OBSOLETE
  290. /*============================================================================
  291. * Special system call wrappers
  292. */
  293. @ r0 = syscall number
  294. @ r8 = syscall table
  295. sys_syscall:
  296. bic scno, r0, #__NR_OABI_SYSCALL_BASE
  297. cmp scno, #__NR_syscall - __NR_SYSCALL_BASE
  298. cmpne scno, #NR_syscalls @ check range
  299. stmloia sp, {r5, r6} @ shuffle args
  300. movlo r0, r1
  301. movlo r1, r2
  302. movlo r2, r3
  303. movlo r3, r4
  304. ldrlo pc, [tbl, scno, lsl #2]
  305. b sys_ni_syscall
  306. ENDPROC(sys_syscall)
  307. sys_fork_wrapper:
  308. add r0, sp, #S_OFF
  309. b sys_fork
  310. ENDPROC(sys_fork_wrapper)
  311. sys_vfork_wrapper:
  312. add r0, sp, #S_OFF
  313. b sys_vfork
  314. ENDPROC(sys_vfork_wrapper)
  315. sys_execve_wrapper:
  316. add r3, sp, #S_OFF
  317. b sys_execve
  318. ENDPROC(sys_execve_wrapper)
  319. sys_clone_wrapper:
  320. add ip, sp, #S_OFF
  321. str ip, [sp, #4]
  322. b sys_clone
  323. ENDPROC(sys_clone_wrapper)
  324. sys_sigsuspend_wrapper:
  325. add r3, sp, #S_OFF
  326. b sys_sigsuspend
  327. ENDPROC(sys_sigsuspend_wrapper)
  328. sys_rt_sigsuspend_wrapper:
  329. add r2, sp, #S_OFF
  330. b sys_rt_sigsuspend
  331. ENDPROC(sys_rt_sigsuspend_wrapper)
  332. sys_sigreturn_wrapper:
  333. add r0, sp, #S_OFF
  334. b sys_sigreturn
  335. ENDPROC(sys_sigreturn_wrapper)
  336. sys_rt_sigreturn_wrapper:
  337. add r0, sp, #S_OFF
  338. b sys_rt_sigreturn
  339. ENDPROC(sys_rt_sigreturn_wrapper)
  340. sys_sigaltstack_wrapper:
  341. ldr r2, [sp, #S_OFF + S_SP]
  342. b do_sigaltstack
  343. ENDPROC(sys_sigaltstack_wrapper)
  344. sys_statfs64_wrapper:
  345. teq r1, #88
  346. moveq r1, #84
  347. b sys_statfs64
  348. ENDPROC(sys_statfs64_wrapper)
  349. sys_fstatfs64_wrapper:
  350. teq r1, #88
  351. moveq r1, #84
  352. b sys_fstatfs64
  353. ENDPROC(sys_fstatfs64_wrapper)
  354. /*
  355. * Note: off_4k (r5) is always units of 4K. If we can't do the requested
  356. * offset, we return EINVAL.
  357. */
  358. sys_mmap2:
  359. #if PAGE_SHIFT > 12
  360. tst r5, #PGOFF_MASK
  361. moveq r5, r5, lsr #PAGE_SHIFT - 12
  362. streq r5, [sp, #4]
  363. beq do_mmap2
  364. mov r0, #-EINVAL
  365. mov pc, lr
  366. #else
  367. str r5, [sp, #4]
  368. b do_mmap2
  369. #endif
  370. ENDPROC(sys_mmap2)
  371. ENTRY(pabort_ifar)
  372. mrc p15, 0, r0, cr6, cr0, 2
  373. ENTRY(pabort_noifar)
  374. mov pc, lr
  375. ENDPROC(pabort_ifar)
  376. ENDPROC(pabort_noifar)
  377. #ifdef CONFIG_OABI_COMPAT
  378. /*
  379. * These are syscalls with argument register differences
  380. */
  381. sys_oabi_pread64:
  382. stmia sp, {r3, r4}
  383. b sys_pread64
  384. ENDPROC(sys_oabi_pread64)
  385. sys_oabi_pwrite64:
  386. stmia sp, {r3, r4}
  387. b sys_pwrite64
  388. ENDPROC(sys_oabi_pwrite64)
  389. sys_oabi_truncate64:
  390. mov r3, r2
  391. mov r2, r1
  392. b sys_truncate64
  393. ENDPROC(sys_oabi_truncate64)
  394. sys_oabi_ftruncate64:
  395. mov r3, r2
  396. mov r2, r1
  397. b sys_ftruncate64
  398. ENDPROC(sys_oabi_ftruncate64)
  399. sys_oabi_readahead:
  400. str r3, [sp]
  401. mov r3, r2
  402. mov r2, r1
  403. b sys_readahead
  404. ENDPROC(sys_oabi_readahead)
  405. /*
  406. * Let's declare a second syscall table for old ABI binaries
  407. * using the compatibility syscall entries.
  408. */
  409. #define ABI(native, compat) compat
  410. #define OBSOLETE(syscall) syscall
  411. .type sys_oabi_call_table, #object
  412. ENTRY(sys_oabi_call_table)
  413. #include "calls.S"
  414. #undef ABI
  415. #undef OBSOLETE
  416. #endif