entry_32.S 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - orig_eax
  33. * 2C(%esp) - %eip
  34. * 30(%esp) - %cs
  35. * 34(%esp) - %eflags
  36. * 38(%esp) - %oldesp
  37. * 3C(%esp) - %oldss
  38. *
  39. * "current" is in register %ebx during any slow entries.
  40. */
  41. #include <linux/linkage.h>
  42. #include <asm/thread_info.h>
  43. #include <asm/irqflags.h>
  44. #include <asm/errno.h>
  45. #include <asm/segment.h>
  46. #include <asm/smp.h>
  47. #include <asm/page.h>
  48. #include <asm/desc.h>
  49. #include <asm/percpu.h>
  50. #include <asm/dwarf2.h>
  51. #include <asm/processor-flags.h>
  52. #include "irq_vectors.h"
  53. /*
  54. * We use macros for low-level operations which need to be overridden
  55. * for paravirtualization. The following will never clobber any registers:
  56. * INTERRUPT_RETURN (aka. "iret")
  57. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  58. * ENABLE_INTERRUPTS_SYSCALL_RET (aka "sti; sysexit").
  59. *
  60. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  61. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  62. * Allowing a register to be clobbered can shrink the paravirt replacement
  63. * enough to patch inline, increasing performance.
  64. */
  65. #define nr_syscalls ((syscall_table_size)/4)
  66. #ifdef CONFIG_PREEMPT
  67. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  68. #else
  69. #define preempt_stop(clobbers)
  70. #define resume_kernel restore_nocheck
  71. #endif
  72. .macro TRACE_IRQS_IRET
  73. #ifdef CONFIG_TRACE_IRQFLAGS
  74. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  75. jz 1f
  76. TRACE_IRQS_ON
  77. 1:
  78. #endif
  79. .endm
  80. #ifdef CONFIG_VM86
  81. #define resume_userspace_sig check_userspace
  82. #else
  83. #define resume_userspace_sig resume_userspace
  84. #endif
  85. #define SAVE_ALL \
  86. cld; \
  87. pushl %fs; \
  88. CFI_ADJUST_CFA_OFFSET 4;\
  89. /*CFI_REL_OFFSET fs, 0;*/\
  90. pushl %es; \
  91. CFI_ADJUST_CFA_OFFSET 4;\
  92. /*CFI_REL_OFFSET es, 0;*/\
  93. pushl %ds; \
  94. CFI_ADJUST_CFA_OFFSET 4;\
  95. /*CFI_REL_OFFSET ds, 0;*/\
  96. pushl %eax; \
  97. CFI_ADJUST_CFA_OFFSET 4;\
  98. CFI_REL_OFFSET eax, 0;\
  99. pushl %ebp; \
  100. CFI_ADJUST_CFA_OFFSET 4;\
  101. CFI_REL_OFFSET ebp, 0;\
  102. pushl %edi; \
  103. CFI_ADJUST_CFA_OFFSET 4;\
  104. CFI_REL_OFFSET edi, 0;\
  105. pushl %esi; \
  106. CFI_ADJUST_CFA_OFFSET 4;\
  107. CFI_REL_OFFSET esi, 0;\
  108. pushl %edx; \
  109. CFI_ADJUST_CFA_OFFSET 4;\
  110. CFI_REL_OFFSET edx, 0;\
  111. pushl %ecx; \
  112. CFI_ADJUST_CFA_OFFSET 4;\
  113. CFI_REL_OFFSET ecx, 0;\
  114. pushl %ebx; \
  115. CFI_ADJUST_CFA_OFFSET 4;\
  116. CFI_REL_OFFSET ebx, 0;\
  117. movl $(__USER_DS), %edx; \
  118. movl %edx, %ds; \
  119. movl %edx, %es; \
  120. movl $(__KERNEL_PERCPU), %edx; \
  121. movl %edx, %fs
  122. #define RESTORE_INT_REGS \
  123. popl %ebx; \
  124. CFI_ADJUST_CFA_OFFSET -4;\
  125. CFI_RESTORE ebx;\
  126. popl %ecx; \
  127. CFI_ADJUST_CFA_OFFSET -4;\
  128. CFI_RESTORE ecx;\
  129. popl %edx; \
  130. CFI_ADJUST_CFA_OFFSET -4;\
  131. CFI_RESTORE edx;\
  132. popl %esi; \
  133. CFI_ADJUST_CFA_OFFSET -4;\
  134. CFI_RESTORE esi;\
  135. popl %edi; \
  136. CFI_ADJUST_CFA_OFFSET -4;\
  137. CFI_RESTORE edi;\
  138. popl %ebp; \
  139. CFI_ADJUST_CFA_OFFSET -4;\
  140. CFI_RESTORE ebp;\
  141. popl %eax; \
  142. CFI_ADJUST_CFA_OFFSET -4;\
  143. CFI_RESTORE eax
  144. #define RESTORE_REGS \
  145. RESTORE_INT_REGS; \
  146. 1: popl %ds; \
  147. CFI_ADJUST_CFA_OFFSET -4;\
  148. /*CFI_RESTORE ds;*/\
  149. 2: popl %es; \
  150. CFI_ADJUST_CFA_OFFSET -4;\
  151. /*CFI_RESTORE es;*/\
  152. 3: popl %fs; \
  153. CFI_ADJUST_CFA_OFFSET -4;\
  154. /*CFI_RESTORE fs;*/\
  155. .pushsection .fixup,"ax"; \
  156. 4: movl $0,(%esp); \
  157. jmp 1b; \
  158. 5: movl $0,(%esp); \
  159. jmp 2b; \
  160. 6: movl $0,(%esp); \
  161. jmp 3b; \
  162. .section __ex_table,"a";\
  163. .align 4; \
  164. .long 1b,4b; \
  165. .long 2b,5b; \
  166. .long 3b,6b; \
  167. .popsection
  168. #define RING0_INT_FRAME \
  169. CFI_STARTPROC simple;\
  170. CFI_SIGNAL_FRAME;\
  171. CFI_DEF_CFA esp, 3*4;\
  172. /*CFI_OFFSET cs, -2*4;*/\
  173. CFI_OFFSET eip, -3*4
  174. #define RING0_EC_FRAME \
  175. CFI_STARTPROC simple;\
  176. CFI_SIGNAL_FRAME;\
  177. CFI_DEF_CFA esp, 4*4;\
  178. /*CFI_OFFSET cs, -2*4;*/\
  179. CFI_OFFSET eip, -3*4
  180. #define RING0_PTREGS_FRAME \
  181. CFI_STARTPROC simple;\
  182. CFI_SIGNAL_FRAME;\
  183. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
  184. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
  185. CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
  186. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
  187. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
  188. CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
  189. CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
  190. CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
  191. CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
  192. CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
  193. CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
  194. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  195. ENTRY(ret_from_fork)
  196. CFI_STARTPROC
  197. pushl %eax
  198. CFI_ADJUST_CFA_OFFSET 4
  199. call schedule_tail
  200. GET_THREAD_INFO(%ebp)
  201. popl %eax
  202. CFI_ADJUST_CFA_OFFSET -4
  203. pushl $0x0202 # Reset kernel eflags
  204. CFI_ADJUST_CFA_OFFSET 4
  205. popfl
  206. CFI_ADJUST_CFA_OFFSET -4
  207. jmp syscall_exit
  208. CFI_ENDPROC
  209. END(ret_from_fork)
  210. /*
  211. * Return to user mode is not as complex as all this looks,
  212. * but we want the default path for a system call return to
  213. * go as quickly as possible which is why some of this is
  214. * less clear than it otherwise should be.
  215. */
  216. # userspace resumption stub bypassing syscall exit tracing
  217. ALIGN
  218. RING0_PTREGS_FRAME
  219. ret_from_exception:
  220. preempt_stop(CLBR_ANY)
  221. ret_from_intr:
  222. GET_THREAD_INFO(%ebp)
  223. check_userspace:
  224. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  225. movb PT_CS(%esp), %al
  226. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  227. cmpl $USER_RPL, %eax
  228. jb resume_kernel # not returning to v8086 or userspace
  229. ENTRY(resume_userspace)
  230. LOCKDEP_SYS_EXIT
  231. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  232. # setting need_resched or sigpending
  233. # between sampling and the iret
  234. movl TI_flags(%ebp), %ecx
  235. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  236. # int/exception return?
  237. jne work_pending
  238. jmp restore_all
  239. END(ret_from_exception)
  240. #ifdef CONFIG_PREEMPT
  241. ENTRY(resume_kernel)
  242. DISABLE_INTERRUPTS(CLBR_ANY)
  243. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  244. jnz restore_nocheck
  245. need_resched:
  246. movl TI_flags(%ebp), %ecx # need_resched set ?
  247. testb $_TIF_NEED_RESCHED, %cl
  248. jz restore_all
  249. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  250. jz restore_all
  251. call preempt_schedule_irq
  252. jmp need_resched
  253. END(resume_kernel)
  254. #endif
  255. CFI_ENDPROC
  256. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  257. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  258. # sysenter call handler stub
  259. ENTRY(ia32_sysenter_target)
  260. CFI_STARTPROC simple
  261. CFI_SIGNAL_FRAME
  262. CFI_DEF_CFA esp, 0
  263. CFI_REGISTER esp, ebp
  264. movl TSS_sysenter_sp0(%esp),%esp
  265. sysenter_past_esp:
  266. /*
  267. * Interrupts are disabled here, but we can't trace it until
  268. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  269. * we immediately enable interrupts at that point anyway.
  270. */
  271. pushl $(__USER_DS)
  272. CFI_ADJUST_CFA_OFFSET 4
  273. /*CFI_REL_OFFSET ss, 0*/
  274. pushl %ebp
  275. CFI_ADJUST_CFA_OFFSET 4
  276. CFI_REL_OFFSET esp, 0
  277. pushfl
  278. orl $X86_EFLAGS_IF, (%esp)
  279. CFI_ADJUST_CFA_OFFSET 4
  280. pushl $(__USER_CS)
  281. CFI_ADJUST_CFA_OFFSET 4
  282. /*CFI_REL_OFFSET cs, 0*/
  283. /*
  284. * Push current_thread_info()->sysenter_return to the stack.
  285. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  286. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  287. */
  288. pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
  289. CFI_ADJUST_CFA_OFFSET 4
  290. CFI_REL_OFFSET eip, 0
  291. pushl %eax
  292. CFI_ADJUST_CFA_OFFSET 4
  293. SAVE_ALL
  294. ENABLE_INTERRUPTS(CLBR_NONE)
  295. /*
  296. * Load the potential sixth argument from user stack.
  297. * Careful about security.
  298. */
  299. cmpl $__PAGE_OFFSET-3,%ebp
  300. jae syscall_fault
  301. 1: movl (%ebp),%ebp
  302. movl %ebp,PT_EBP(%esp)
  303. .section __ex_table,"a"
  304. .align 4
  305. .long 1b,syscall_fault
  306. .previous
  307. GET_THREAD_INFO(%ebp)
  308. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  309. testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  310. jnz syscall_trace_entry
  311. cmpl $(nr_syscalls), %eax
  312. jae syscall_badsys
  313. call *sys_call_table(,%eax,4)
  314. movl %eax,PT_EAX(%esp)
  315. LOCKDEP_SYS_EXIT
  316. DISABLE_INTERRUPTS(CLBR_ANY)
  317. TRACE_IRQS_OFF
  318. movl TI_flags(%ebp), %ecx
  319. testw $_TIF_ALLWORK_MASK, %cx
  320. jne syscall_exit_work
  321. /* if something modifies registers it must also disable sysexit */
  322. movl PT_EIP(%esp), %edx
  323. movl PT_OLDESP(%esp), %ecx
  324. xorl %ebp,%ebp
  325. TRACE_IRQS_ON
  326. 1: mov PT_FS(%esp), %fs
  327. ENABLE_INTERRUPTS_SYSCALL_RET
  328. CFI_ENDPROC
  329. .pushsection .fixup,"ax"
  330. 2: movl $0,PT_FS(%esp)
  331. jmp 1b
  332. .section __ex_table,"a"
  333. .align 4
  334. .long 1b,2b
  335. .popsection
  336. ENDPROC(ia32_sysenter_target)
  337. # system call handler stub
  338. ENTRY(system_call)
  339. RING0_INT_FRAME # can't unwind into user space anyway
  340. pushl %eax # save orig_eax
  341. CFI_ADJUST_CFA_OFFSET 4
  342. SAVE_ALL
  343. GET_THREAD_INFO(%ebp)
  344. # system call tracing in operation / emulation
  345. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  346. testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  347. jnz syscall_trace_entry
  348. cmpl $(nr_syscalls), %eax
  349. jae syscall_badsys
  350. syscall_call:
  351. call *sys_call_table(,%eax,4)
  352. movl %eax,PT_EAX(%esp) # store the return value
  353. syscall_exit:
  354. LOCKDEP_SYS_EXIT
  355. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  356. # setting need_resched or sigpending
  357. # between sampling and the iret
  358. TRACE_IRQS_OFF
  359. testl $X86_EFLAGS_TF,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
  360. jz no_singlestep
  361. orl $_TIF_SINGLESTEP,TI_flags(%ebp)
  362. no_singlestep:
  363. movl TI_flags(%ebp), %ecx
  364. testw $_TIF_ALLWORK_MASK, %cx # current->work
  365. jne syscall_exit_work
  366. restore_all:
  367. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  368. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  369. # are returning to the kernel.
  370. # See comments in process.c:copy_thread() for details.
  371. movb PT_OLDSS(%esp), %ah
  372. movb PT_CS(%esp), %al
  373. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  374. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  375. CFI_REMEMBER_STATE
  376. je ldt_ss # returning to user-space with LDT SS
  377. restore_nocheck:
  378. TRACE_IRQS_IRET
  379. restore_nocheck_notrace:
  380. RESTORE_REGS
  381. addl $4, %esp # skip orig_eax/error_code
  382. CFI_ADJUST_CFA_OFFSET -4
  383. irq_return:
  384. INTERRUPT_RETURN
  385. .section .fixup,"ax"
  386. ENTRY(iret_exc)
  387. pushl $0 # no error code
  388. pushl $do_iret_error
  389. jmp error_code
  390. .previous
  391. .section __ex_table,"a"
  392. .align 4
  393. .long irq_return,iret_exc
  394. .previous
  395. CFI_RESTORE_STATE
  396. ldt_ss:
  397. larl PT_OLDSS(%esp), %eax
  398. jnz restore_nocheck
  399. testl $0x00400000, %eax # returning to 32bit stack?
  400. jnz restore_nocheck # allright, normal return
  401. #ifdef CONFIG_PARAVIRT
  402. /*
  403. * The kernel can't run on a non-flat stack if paravirt mode
  404. * is active. Rather than try to fixup the high bits of
  405. * ESP, bypass this code entirely. This may break DOSemu
  406. * and/or Wine support in a paravirt VM, although the option
  407. * is still available to implement the setting of the high
  408. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  409. */
  410. cmpl $0, pv_info+PARAVIRT_enabled
  411. jne restore_nocheck
  412. #endif
  413. /* If returning to userspace with 16bit stack,
  414. * try to fix the higher word of ESP, as the CPU
  415. * won't restore it.
  416. * This is an "official" bug of all the x86-compatible
  417. * CPUs, which we can try to work around to make
  418. * dosemu and wine happy. */
  419. movl PT_OLDESP(%esp), %eax
  420. movl %esp, %edx
  421. call patch_espfix_desc
  422. pushl $__ESPFIX_SS
  423. CFI_ADJUST_CFA_OFFSET 4
  424. pushl %eax
  425. CFI_ADJUST_CFA_OFFSET 4
  426. DISABLE_INTERRUPTS(CLBR_EAX)
  427. TRACE_IRQS_OFF
  428. lss (%esp), %esp
  429. CFI_ADJUST_CFA_OFFSET -8
  430. jmp restore_nocheck
  431. CFI_ENDPROC
  432. ENDPROC(system_call)
  433. # perform work that needs to be done immediately before resumption
  434. ALIGN
  435. RING0_PTREGS_FRAME # can't unwind into user space anyway
  436. work_pending:
  437. testb $_TIF_NEED_RESCHED, %cl
  438. jz work_notifysig
  439. work_resched:
  440. call schedule
  441. LOCKDEP_SYS_EXIT
  442. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  443. # setting need_resched or sigpending
  444. # between sampling and the iret
  445. TRACE_IRQS_OFF
  446. movl TI_flags(%ebp), %ecx
  447. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  448. # than syscall tracing?
  449. jz restore_all
  450. testb $_TIF_NEED_RESCHED, %cl
  451. jnz work_resched
  452. work_notifysig: # deal with pending signals and
  453. # notify-resume requests
  454. #ifdef CONFIG_VM86
  455. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  456. movl %esp, %eax
  457. jne work_notifysig_v86 # returning to kernel-space or
  458. # vm86-space
  459. xorl %edx, %edx
  460. call do_notify_resume
  461. jmp resume_userspace_sig
  462. ALIGN
  463. work_notifysig_v86:
  464. pushl %ecx # save ti_flags for do_notify_resume
  465. CFI_ADJUST_CFA_OFFSET 4
  466. call save_v86_state # %eax contains pt_regs pointer
  467. popl %ecx
  468. CFI_ADJUST_CFA_OFFSET -4
  469. movl %eax, %esp
  470. #else
  471. movl %esp, %eax
  472. #endif
  473. xorl %edx, %edx
  474. call do_notify_resume
  475. jmp resume_userspace_sig
  476. END(work_pending)
  477. # perform syscall exit tracing
  478. ALIGN
  479. syscall_trace_entry:
  480. movl $-ENOSYS,PT_EAX(%esp)
  481. movl %esp, %eax
  482. xorl %edx,%edx
  483. call do_syscall_trace
  484. cmpl $0, %eax
  485. jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
  486. # so must skip actual syscall
  487. movl PT_ORIG_EAX(%esp), %eax
  488. cmpl $(nr_syscalls), %eax
  489. jnae syscall_call
  490. jmp syscall_exit
  491. END(syscall_trace_entry)
  492. # perform syscall exit tracing
  493. ALIGN
  494. syscall_exit_work:
  495. testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
  496. jz work_pending
  497. TRACE_IRQS_ON
  498. ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
  499. # schedule() instead
  500. movl %esp, %eax
  501. movl $1, %edx
  502. call do_syscall_trace
  503. jmp resume_userspace
  504. END(syscall_exit_work)
  505. CFI_ENDPROC
  506. RING0_INT_FRAME # can't unwind into user space anyway
  507. syscall_fault:
  508. GET_THREAD_INFO(%ebp)
  509. movl $-EFAULT,PT_EAX(%esp)
  510. jmp resume_userspace
  511. END(syscall_fault)
  512. syscall_badsys:
  513. movl $-ENOSYS,PT_EAX(%esp)
  514. jmp resume_userspace
  515. END(syscall_badsys)
  516. CFI_ENDPROC
  517. #define FIXUP_ESPFIX_STACK \
  518. /* since we are on a wrong stack, we cant make it a C code :( */ \
  519. PER_CPU(gdt_page, %ebx); \
  520. GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
  521. addl %esp, %eax; \
  522. pushl $__KERNEL_DS; \
  523. CFI_ADJUST_CFA_OFFSET 4; \
  524. pushl %eax; \
  525. CFI_ADJUST_CFA_OFFSET 4; \
  526. lss (%esp), %esp; \
  527. CFI_ADJUST_CFA_OFFSET -8;
  528. #define UNWIND_ESPFIX_STACK \
  529. movl %ss, %eax; \
  530. /* see if on espfix stack */ \
  531. cmpw $__ESPFIX_SS, %ax; \
  532. jne 27f; \
  533. movl $__KERNEL_DS, %eax; \
  534. movl %eax, %ds; \
  535. movl %eax, %es; \
  536. /* switch to normal stack */ \
  537. FIXUP_ESPFIX_STACK; \
  538. 27:;
  539. /*
  540. * Build the entry stubs and pointer table with
  541. * some assembler magic.
  542. */
  543. .section .rodata,"a"
  544. ENTRY(interrupt)
  545. .text
  546. ENTRY(irq_entries_start)
  547. RING0_INT_FRAME
  548. vector=0
  549. .rept NR_IRQS
  550. ALIGN
  551. .if vector
  552. CFI_ADJUST_CFA_OFFSET -4
  553. .endif
  554. 1: pushl $~(vector)
  555. CFI_ADJUST_CFA_OFFSET 4
  556. jmp common_interrupt
  557. .previous
  558. .long 1b
  559. .text
  560. vector=vector+1
  561. .endr
  562. END(irq_entries_start)
  563. .previous
  564. END(interrupt)
  565. .previous
  566. /*
  567. * the CPU automatically disables interrupts when executing an IRQ vector,
  568. * so IRQ-flags tracing has to follow that:
  569. */
  570. ALIGN
  571. common_interrupt:
  572. SAVE_ALL
  573. TRACE_IRQS_OFF
  574. movl %esp,%eax
  575. call do_IRQ
  576. jmp ret_from_intr
  577. ENDPROC(common_interrupt)
  578. CFI_ENDPROC
  579. #define BUILD_INTERRUPT(name, nr) \
  580. ENTRY(name) \
  581. RING0_INT_FRAME; \
  582. pushl $~(nr); \
  583. CFI_ADJUST_CFA_OFFSET 4; \
  584. SAVE_ALL; \
  585. TRACE_IRQS_OFF \
  586. movl %esp,%eax; \
  587. call smp_##name; \
  588. jmp ret_from_intr; \
  589. CFI_ENDPROC; \
  590. ENDPROC(name)
  591. /* The include is where all of the SMP etc. interrupts come from */
  592. #include "entry_arch.h"
  593. KPROBE_ENTRY(page_fault)
  594. RING0_EC_FRAME
  595. pushl $do_page_fault
  596. CFI_ADJUST_CFA_OFFSET 4
  597. ALIGN
  598. error_code:
  599. /* the function address is in %fs's slot on the stack */
  600. pushl %es
  601. CFI_ADJUST_CFA_OFFSET 4
  602. /*CFI_REL_OFFSET es, 0*/
  603. pushl %ds
  604. CFI_ADJUST_CFA_OFFSET 4
  605. /*CFI_REL_OFFSET ds, 0*/
  606. pushl %eax
  607. CFI_ADJUST_CFA_OFFSET 4
  608. CFI_REL_OFFSET eax, 0
  609. pushl %ebp
  610. CFI_ADJUST_CFA_OFFSET 4
  611. CFI_REL_OFFSET ebp, 0
  612. pushl %edi
  613. CFI_ADJUST_CFA_OFFSET 4
  614. CFI_REL_OFFSET edi, 0
  615. pushl %esi
  616. CFI_ADJUST_CFA_OFFSET 4
  617. CFI_REL_OFFSET esi, 0
  618. pushl %edx
  619. CFI_ADJUST_CFA_OFFSET 4
  620. CFI_REL_OFFSET edx, 0
  621. pushl %ecx
  622. CFI_ADJUST_CFA_OFFSET 4
  623. CFI_REL_OFFSET ecx, 0
  624. pushl %ebx
  625. CFI_ADJUST_CFA_OFFSET 4
  626. CFI_REL_OFFSET ebx, 0
  627. cld
  628. pushl %fs
  629. CFI_ADJUST_CFA_OFFSET 4
  630. /*CFI_REL_OFFSET fs, 0*/
  631. movl $(__KERNEL_PERCPU), %ecx
  632. movl %ecx, %fs
  633. UNWIND_ESPFIX_STACK
  634. popl %ecx
  635. CFI_ADJUST_CFA_OFFSET -4
  636. /*CFI_REGISTER es, ecx*/
  637. movl PT_FS(%esp), %edi # get the function address
  638. movl PT_ORIG_EAX(%esp), %edx # get the error code
  639. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  640. mov %ecx, PT_FS(%esp)
  641. /*CFI_REL_OFFSET fs, ES*/
  642. movl $(__USER_DS), %ecx
  643. movl %ecx, %ds
  644. movl %ecx, %es
  645. movl %esp,%eax # pt_regs pointer
  646. call *%edi
  647. jmp ret_from_exception
  648. CFI_ENDPROC
  649. KPROBE_END(page_fault)
  650. ENTRY(coprocessor_error)
  651. RING0_INT_FRAME
  652. pushl $0
  653. CFI_ADJUST_CFA_OFFSET 4
  654. pushl $do_coprocessor_error
  655. CFI_ADJUST_CFA_OFFSET 4
  656. jmp error_code
  657. CFI_ENDPROC
  658. END(coprocessor_error)
  659. ENTRY(simd_coprocessor_error)
  660. RING0_INT_FRAME
  661. pushl $0
  662. CFI_ADJUST_CFA_OFFSET 4
  663. pushl $do_simd_coprocessor_error
  664. CFI_ADJUST_CFA_OFFSET 4
  665. jmp error_code
  666. CFI_ENDPROC
  667. END(simd_coprocessor_error)
  668. ENTRY(device_not_available)
  669. RING0_INT_FRAME
  670. pushl $-1 # mark this as an int
  671. CFI_ADJUST_CFA_OFFSET 4
  672. SAVE_ALL
  673. GET_CR0_INTO_EAX
  674. testl $0x4, %eax # EM (math emulation bit)
  675. jne device_not_available_emulate
  676. preempt_stop(CLBR_ANY)
  677. call math_state_restore
  678. jmp ret_from_exception
  679. device_not_available_emulate:
  680. pushl $0 # temporary storage for ORIG_EIP
  681. CFI_ADJUST_CFA_OFFSET 4
  682. call math_emulate
  683. addl $4, %esp
  684. CFI_ADJUST_CFA_OFFSET -4
  685. jmp ret_from_exception
  686. CFI_ENDPROC
  687. END(device_not_available)
  688. /*
  689. * Debug traps and NMI can happen at the one SYSENTER instruction
  690. * that sets up the real kernel stack. Check here, since we can't
  691. * allow the wrong stack to be used.
  692. *
  693. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  694. * already pushed 3 words if it hits on the sysenter instruction:
  695. * eflags, cs and eip.
  696. *
  697. * We just load the right stack, and push the three (known) values
  698. * by hand onto the new stack - while updating the return eip past
  699. * the instruction that would have done it for sysenter.
  700. */
  701. #define FIX_STACK(offset, ok, label) \
  702. cmpw $__KERNEL_CS,4(%esp); \
  703. jne ok; \
  704. label: \
  705. movl TSS_sysenter_sp0+offset(%esp),%esp; \
  706. CFI_DEF_CFA esp, 0; \
  707. CFI_UNDEFINED eip; \
  708. pushfl; \
  709. CFI_ADJUST_CFA_OFFSET 4; \
  710. pushl $__KERNEL_CS; \
  711. CFI_ADJUST_CFA_OFFSET 4; \
  712. pushl $sysenter_past_esp; \
  713. CFI_ADJUST_CFA_OFFSET 4; \
  714. CFI_REL_OFFSET eip, 0
  715. KPROBE_ENTRY(debug)
  716. RING0_INT_FRAME
  717. cmpl $ia32_sysenter_target,(%esp)
  718. jne debug_stack_correct
  719. FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
  720. debug_stack_correct:
  721. pushl $-1 # mark this as an int
  722. CFI_ADJUST_CFA_OFFSET 4
  723. SAVE_ALL
  724. xorl %edx,%edx # error code 0
  725. movl %esp,%eax # pt_regs pointer
  726. call do_debug
  727. jmp ret_from_exception
  728. CFI_ENDPROC
  729. KPROBE_END(debug)
  730. /*
  731. * NMI is doubly nasty. It can happen _while_ we're handling
  732. * a debug fault, and the debug fault hasn't yet been able to
  733. * clear up the stack. So we first check whether we got an
  734. * NMI on the sysenter entry path, but after that we need to
  735. * check whether we got an NMI on the debug path where the debug
  736. * fault happened on the sysenter path.
  737. */
  738. KPROBE_ENTRY(nmi)
  739. RING0_INT_FRAME
  740. pushl %eax
  741. CFI_ADJUST_CFA_OFFSET 4
  742. movl %ss, %eax
  743. cmpw $__ESPFIX_SS, %ax
  744. popl %eax
  745. CFI_ADJUST_CFA_OFFSET -4
  746. je nmi_espfix_stack
  747. cmpl $ia32_sysenter_target,(%esp)
  748. je nmi_stack_fixup
  749. pushl %eax
  750. CFI_ADJUST_CFA_OFFSET 4
  751. movl %esp,%eax
  752. /* Do not access memory above the end of our stack page,
  753. * it might not exist.
  754. */
  755. andl $(THREAD_SIZE-1),%eax
  756. cmpl $(THREAD_SIZE-20),%eax
  757. popl %eax
  758. CFI_ADJUST_CFA_OFFSET -4
  759. jae nmi_stack_correct
  760. cmpl $ia32_sysenter_target,12(%esp)
  761. je nmi_debug_stack_check
  762. nmi_stack_correct:
  763. /* We have a RING0_INT_FRAME here */
  764. pushl %eax
  765. CFI_ADJUST_CFA_OFFSET 4
  766. SAVE_ALL
  767. xorl %edx,%edx # zero error code
  768. movl %esp,%eax # pt_regs pointer
  769. call do_nmi
  770. jmp restore_nocheck_notrace
  771. CFI_ENDPROC
  772. nmi_stack_fixup:
  773. RING0_INT_FRAME
  774. FIX_STACK(12,nmi_stack_correct, 1)
  775. jmp nmi_stack_correct
  776. nmi_debug_stack_check:
  777. /* We have a RING0_INT_FRAME here */
  778. cmpw $__KERNEL_CS,16(%esp)
  779. jne nmi_stack_correct
  780. cmpl $debug,(%esp)
  781. jb nmi_stack_correct
  782. cmpl $debug_esp_fix_insn,(%esp)
  783. ja nmi_stack_correct
  784. FIX_STACK(24,nmi_stack_correct, 1)
  785. jmp nmi_stack_correct
  786. nmi_espfix_stack:
  787. /* We have a RING0_INT_FRAME here.
  788. *
  789. * create the pointer to lss back
  790. */
  791. pushl %ss
  792. CFI_ADJUST_CFA_OFFSET 4
  793. pushl %esp
  794. CFI_ADJUST_CFA_OFFSET 4
  795. addw $4, (%esp)
  796. /* copy the iret frame of 12 bytes */
  797. .rept 3
  798. pushl 16(%esp)
  799. CFI_ADJUST_CFA_OFFSET 4
  800. .endr
  801. pushl %eax
  802. CFI_ADJUST_CFA_OFFSET 4
  803. SAVE_ALL
  804. FIXUP_ESPFIX_STACK # %eax == %esp
  805. xorl %edx,%edx # zero error code
  806. call do_nmi
  807. RESTORE_REGS
  808. lss 12+4(%esp), %esp # back to espfix stack
  809. CFI_ADJUST_CFA_OFFSET -24
  810. jmp irq_return
  811. CFI_ENDPROC
  812. KPROBE_END(nmi)
  813. #ifdef CONFIG_PARAVIRT
  814. ENTRY(native_iret)
  815. iret
  816. .section __ex_table,"a"
  817. .align 4
  818. .long native_iret, iret_exc
  819. .previous
  820. END(native_iret)
  821. ENTRY(native_irq_enable_syscall_ret)
  822. sti
  823. sysexit
  824. END(native_irq_enable_syscall_ret)
  825. #endif
  826. KPROBE_ENTRY(int3)
  827. RING0_INT_FRAME
  828. pushl $-1 # mark this as an int
  829. CFI_ADJUST_CFA_OFFSET 4
  830. SAVE_ALL
  831. xorl %edx,%edx # zero error code
  832. movl %esp,%eax # pt_regs pointer
  833. call do_int3
  834. jmp ret_from_exception
  835. CFI_ENDPROC
  836. KPROBE_END(int3)
  837. ENTRY(overflow)
  838. RING0_INT_FRAME
  839. pushl $0
  840. CFI_ADJUST_CFA_OFFSET 4
  841. pushl $do_overflow
  842. CFI_ADJUST_CFA_OFFSET 4
  843. jmp error_code
  844. CFI_ENDPROC
  845. END(overflow)
  846. ENTRY(bounds)
  847. RING0_INT_FRAME
  848. pushl $0
  849. CFI_ADJUST_CFA_OFFSET 4
  850. pushl $do_bounds
  851. CFI_ADJUST_CFA_OFFSET 4
  852. jmp error_code
  853. CFI_ENDPROC
  854. END(bounds)
  855. ENTRY(invalid_op)
  856. RING0_INT_FRAME
  857. pushl $0
  858. CFI_ADJUST_CFA_OFFSET 4
  859. pushl $do_invalid_op
  860. CFI_ADJUST_CFA_OFFSET 4
  861. jmp error_code
  862. CFI_ENDPROC
  863. END(invalid_op)
  864. ENTRY(coprocessor_segment_overrun)
  865. RING0_INT_FRAME
  866. pushl $0
  867. CFI_ADJUST_CFA_OFFSET 4
  868. pushl $do_coprocessor_segment_overrun
  869. CFI_ADJUST_CFA_OFFSET 4
  870. jmp error_code
  871. CFI_ENDPROC
  872. END(coprocessor_segment_overrun)
  873. ENTRY(invalid_TSS)
  874. RING0_EC_FRAME
  875. pushl $do_invalid_TSS
  876. CFI_ADJUST_CFA_OFFSET 4
  877. jmp error_code
  878. CFI_ENDPROC
  879. END(invalid_TSS)
  880. ENTRY(segment_not_present)
  881. RING0_EC_FRAME
  882. pushl $do_segment_not_present
  883. CFI_ADJUST_CFA_OFFSET 4
  884. jmp error_code
  885. CFI_ENDPROC
  886. END(segment_not_present)
  887. ENTRY(stack_segment)
  888. RING0_EC_FRAME
  889. pushl $do_stack_segment
  890. CFI_ADJUST_CFA_OFFSET 4
  891. jmp error_code
  892. CFI_ENDPROC
  893. END(stack_segment)
  894. KPROBE_ENTRY(general_protection)
  895. RING0_EC_FRAME
  896. pushl $do_general_protection
  897. CFI_ADJUST_CFA_OFFSET 4
  898. jmp error_code
  899. CFI_ENDPROC
  900. KPROBE_END(general_protection)
  901. ENTRY(alignment_check)
  902. RING0_EC_FRAME
  903. pushl $do_alignment_check
  904. CFI_ADJUST_CFA_OFFSET 4
  905. jmp error_code
  906. CFI_ENDPROC
  907. END(alignment_check)
  908. ENTRY(divide_error)
  909. RING0_INT_FRAME
  910. pushl $0 # no error code
  911. CFI_ADJUST_CFA_OFFSET 4
  912. pushl $do_divide_error
  913. CFI_ADJUST_CFA_OFFSET 4
  914. jmp error_code
  915. CFI_ENDPROC
  916. END(divide_error)
  917. #ifdef CONFIG_X86_MCE
  918. ENTRY(machine_check)
  919. RING0_INT_FRAME
  920. pushl $0
  921. CFI_ADJUST_CFA_OFFSET 4
  922. pushl machine_check_vector
  923. CFI_ADJUST_CFA_OFFSET 4
  924. jmp error_code
  925. CFI_ENDPROC
  926. END(machine_check)
  927. #endif
  928. ENTRY(spurious_interrupt_bug)
  929. RING0_INT_FRAME
  930. pushl $0
  931. CFI_ADJUST_CFA_OFFSET 4
  932. pushl $do_spurious_interrupt_bug
  933. CFI_ADJUST_CFA_OFFSET 4
  934. jmp error_code
  935. CFI_ENDPROC
  936. END(spurious_interrupt_bug)
  937. ENTRY(kernel_thread_helper)
  938. pushl $0 # fake return address for unwinder
  939. CFI_STARTPROC
  940. movl %edx,%eax
  941. push %edx
  942. CFI_ADJUST_CFA_OFFSET 4
  943. call *%ebx
  944. push %eax
  945. CFI_ADJUST_CFA_OFFSET 4
  946. call do_exit
  947. CFI_ENDPROC
  948. ENDPROC(kernel_thread_helper)
  949. #ifdef CONFIG_XEN
  950. /* Xen doesn't set %esp to be precisely what the normal sysenter
  951. entrypoint expects, so fix it up before using the normal path. */
  952. ENTRY(xen_sysenter_target)
  953. RING0_INT_FRAME
  954. addl $5*4, %esp /* remove xen-provided frame */
  955. jmp sysenter_past_esp
  956. ENTRY(xen_hypervisor_callback)
  957. CFI_STARTPROC
  958. pushl $0
  959. CFI_ADJUST_CFA_OFFSET 4
  960. SAVE_ALL
  961. TRACE_IRQS_OFF
  962. /* Check to see if we got the event in the critical
  963. region in xen_iret_direct, after we've reenabled
  964. events and checked for pending events. This simulates
  965. iret instruction's behaviour where it delivers a
  966. pending interrupt when enabling interrupts. */
  967. movl PT_EIP(%esp),%eax
  968. cmpl $xen_iret_start_crit,%eax
  969. jb 1f
  970. cmpl $xen_iret_end_crit,%eax
  971. jae 1f
  972. jmp xen_iret_crit_fixup
  973. ENTRY(xen_do_upcall)
  974. 1: mov %esp, %eax
  975. call xen_evtchn_do_upcall
  976. jmp ret_from_intr
  977. CFI_ENDPROC
  978. ENDPROC(xen_hypervisor_callback)
  979. # Hypervisor uses this for application faults while it executes.
  980. # We get here for two reasons:
  981. # 1. Fault while reloading DS, ES, FS or GS
  982. # 2. Fault while executing IRET
  983. # Category 1 we fix up by reattempting the load, and zeroing the segment
  984. # register if the load fails.
  985. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  986. # normal Linux return path in this case because if we use the IRET hypercall
  987. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  988. # We distinguish between categories by maintaining a status value in EAX.
  989. ENTRY(xen_failsafe_callback)
  990. CFI_STARTPROC
  991. pushl %eax
  992. CFI_ADJUST_CFA_OFFSET 4
  993. movl $1,%eax
  994. 1: mov 4(%esp),%ds
  995. 2: mov 8(%esp),%es
  996. 3: mov 12(%esp),%fs
  997. 4: mov 16(%esp),%gs
  998. testl %eax,%eax
  999. popl %eax
  1000. CFI_ADJUST_CFA_OFFSET -4
  1001. lea 16(%esp),%esp
  1002. CFI_ADJUST_CFA_OFFSET -16
  1003. jz 5f
  1004. addl $16,%esp
  1005. jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
  1006. 5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
  1007. CFI_ADJUST_CFA_OFFSET 4
  1008. SAVE_ALL
  1009. jmp ret_from_exception
  1010. CFI_ENDPROC
  1011. .section .fixup,"ax"
  1012. 6: xorl %eax,%eax
  1013. movl %eax,4(%esp)
  1014. jmp 1b
  1015. 7: xorl %eax,%eax
  1016. movl %eax,8(%esp)
  1017. jmp 2b
  1018. 8: xorl %eax,%eax
  1019. movl %eax,12(%esp)
  1020. jmp 3b
  1021. 9: xorl %eax,%eax
  1022. movl %eax,16(%esp)
  1023. jmp 4b
  1024. .previous
  1025. .section __ex_table,"a"
  1026. .align 4
  1027. .long 1b,6b
  1028. .long 2b,7b
  1029. .long 3b,8b
  1030. .long 4b,9b
  1031. .previous
  1032. ENDPROC(xen_failsafe_callback)
  1033. #endif /* CONFIG_XEN */
  1034. #ifdef CONFIG_FTRACE
  1035. #ifdef CONFIG_DYNAMIC_FTRACE
  1036. ENTRY(mcount)
  1037. pushl %eax
  1038. pushl %ecx
  1039. pushl %edx
  1040. movl 0xc(%esp), %eax
  1041. .globl mcount_call
  1042. mcount_call:
  1043. call ftrace_stub
  1044. popl %edx
  1045. popl %ecx
  1046. popl %eax
  1047. ret
  1048. END(mcount)
  1049. ENTRY(ftrace_caller)
  1050. pushl %eax
  1051. pushl %ecx
  1052. pushl %edx
  1053. movl 0xc(%esp), %eax
  1054. movl 0x4(%ebp), %edx
  1055. .globl ftrace_call
  1056. ftrace_call:
  1057. call ftrace_stub
  1058. popl %edx
  1059. popl %ecx
  1060. popl %eax
  1061. .globl ftrace_stub
  1062. ftrace_stub:
  1063. ret
  1064. END(ftrace_caller)
  1065. #else /* ! CONFIG_DYNAMIC_FTRACE */
  1066. ENTRY(mcount)
  1067. cmpl $ftrace_stub, ftrace_trace_function
  1068. jnz trace
  1069. .globl ftrace_stub
  1070. ftrace_stub:
  1071. ret
  1072. /* taken from glibc */
  1073. trace:
  1074. pushl %eax
  1075. pushl %ecx
  1076. pushl %edx
  1077. movl 0xc(%esp), %eax
  1078. movl 0x4(%ebp), %edx
  1079. call *ftrace_trace_function
  1080. popl %edx
  1081. popl %ecx
  1082. popl %eax
  1083. jmp ftrace_stub
  1084. END(mcount)
  1085. #endif /* CONFIG_DYNAMIC_FTRACE */
  1086. #endif /* CONFIG_FTRACE */
  1087. .section .rodata,"a"
  1088. #include "syscall_table_32.S"
  1089. syscall_table_size=(.-sys_call_table)