entry.S 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. /*
  2. * linux/arch/i386/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. */
  6. /*
  7. * entry.S contains the system-call and fault low-level handling routines.
  8. * This also contains the timer-interrupt handler, as well as all interrupts
  9. * and faults that can result in a task-switch.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after a timer-interrupt and after each system call.
  13. *
  14. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  15. * on a 486.
  16. *
  17. * Stack layout in 'ret_from_system_call':
  18. * ptrace needs to have all regs on the stack.
  19. * if the order here is changed, it needs to be
  20. * updated in fork.c:copy_process, signal.c:do_signal,
  21. * ptrace.c and ptrace.h
  22. *
  23. * 0(%esp) - %ebx
  24. * 4(%esp) - %ecx
  25. * 8(%esp) - %edx
  26. * C(%esp) - %esi
  27. * 10(%esp) - %edi
  28. * 14(%esp) - %ebp
  29. * 18(%esp) - %eax
  30. * 1C(%esp) - %ds
  31. * 20(%esp) - %es
  32. * 24(%esp) - %gs
  33. * 28(%esp) - orig_eax
  34. * 2C(%esp) - %eip
  35. * 30(%esp) - %cs
  36. * 34(%esp) - %eflags
  37. * 38(%esp) - %oldesp
  38. * 3C(%esp) - %oldss
  39. *
  40. * "current" is in register %ebx during any slow entries.
  41. */
  42. #include <linux/linkage.h>
  43. #include <asm/thread_info.h>
  44. #include <asm/irqflags.h>
  45. #include <asm/errno.h>
  46. #include <asm/segment.h>
  47. #include <asm/smp.h>
  48. #include <asm/page.h>
  49. #include <asm/desc.h>
  50. #include <asm/percpu.h>
  51. #include <asm/dwarf2.h>
  52. #include "irq_vectors.h"
  53. /*
  54. * We use macros for low-level operations which need to be overridden
  55. * for paravirtualization. The following will never clobber any registers:
  56. * INTERRUPT_RETURN (aka. "iret")
  57. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  58. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  59. *
  60. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  61. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  62. * Allowing a register to be clobbered can shrink the paravirt replacement
  63. * enough to patch inline, increasing performance.
  64. */
  65. #define nr_syscalls ((syscall_table_size)/4)
  66. CF_MASK = 0x00000001
  67. TF_MASK = 0x00000100
  68. IF_MASK = 0x00000200
  69. DF_MASK = 0x00000400
  70. NT_MASK = 0x00004000
  71. VM_MASK = 0x00020000
  72. #ifdef CONFIG_PREEMPT
  73. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  74. #else
  75. #define preempt_stop(clobbers)
  76. #define resume_kernel restore_nocheck
  77. #endif
  78. .macro TRACE_IRQS_IRET
  79. #ifdef CONFIG_TRACE_IRQFLAGS
  80. testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
  81. jz 1f
  82. TRACE_IRQS_ON
  83. 1:
  84. #endif
  85. .endm
  86. #ifdef CONFIG_VM86
  87. #define resume_userspace_sig check_userspace
  88. #else
  89. #define resume_userspace_sig resume_userspace
  90. #endif
  91. #define SAVE_ALL \
  92. cld; \
  93. pushl %gs; \
  94. CFI_ADJUST_CFA_OFFSET 4;\
  95. /*CFI_REL_OFFSET gs, 0;*/\
  96. pushl %es; \
  97. CFI_ADJUST_CFA_OFFSET 4;\
  98. /*CFI_REL_OFFSET es, 0;*/\
  99. pushl %ds; \
  100. CFI_ADJUST_CFA_OFFSET 4;\
  101. /*CFI_REL_OFFSET ds, 0;*/\
  102. pushl %eax; \
  103. CFI_ADJUST_CFA_OFFSET 4;\
  104. CFI_REL_OFFSET eax, 0;\
  105. pushl %ebp; \
  106. CFI_ADJUST_CFA_OFFSET 4;\
  107. CFI_REL_OFFSET ebp, 0;\
  108. pushl %edi; \
  109. CFI_ADJUST_CFA_OFFSET 4;\
  110. CFI_REL_OFFSET edi, 0;\
  111. pushl %esi; \
  112. CFI_ADJUST_CFA_OFFSET 4;\
  113. CFI_REL_OFFSET esi, 0;\
  114. pushl %edx; \
  115. CFI_ADJUST_CFA_OFFSET 4;\
  116. CFI_REL_OFFSET edx, 0;\
  117. pushl %ecx; \
  118. CFI_ADJUST_CFA_OFFSET 4;\
  119. CFI_REL_OFFSET ecx, 0;\
  120. pushl %ebx; \
  121. CFI_ADJUST_CFA_OFFSET 4;\
  122. CFI_REL_OFFSET ebx, 0;\
  123. movl $(__USER_DS), %edx; \
  124. movl %edx, %ds; \
  125. movl %edx, %es; \
  126. movl $(__KERNEL_PDA), %edx; \
  127. movl %edx, %gs
  128. #define RESTORE_INT_REGS \
  129. popl %ebx; \
  130. CFI_ADJUST_CFA_OFFSET -4;\
  131. CFI_RESTORE ebx;\
  132. popl %ecx; \
  133. CFI_ADJUST_CFA_OFFSET -4;\
  134. CFI_RESTORE ecx;\
  135. popl %edx; \
  136. CFI_ADJUST_CFA_OFFSET -4;\
  137. CFI_RESTORE edx;\
  138. popl %esi; \
  139. CFI_ADJUST_CFA_OFFSET -4;\
  140. CFI_RESTORE esi;\
  141. popl %edi; \
  142. CFI_ADJUST_CFA_OFFSET -4;\
  143. CFI_RESTORE edi;\
  144. popl %ebp; \
  145. CFI_ADJUST_CFA_OFFSET -4;\
  146. CFI_RESTORE ebp;\
  147. popl %eax; \
  148. CFI_ADJUST_CFA_OFFSET -4;\
  149. CFI_RESTORE eax
  150. #define RESTORE_REGS \
  151. RESTORE_INT_REGS; \
  152. 1: popl %ds; \
  153. CFI_ADJUST_CFA_OFFSET -4;\
  154. /*CFI_RESTORE ds;*/\
  155. 2: popl %es; \
  156. CFI_ADJUST_CFA_OFFSET -4;\
  157. /*CFI_RESTORE es;*/\
  158. 3: popl %gs; \
  159. CFI_ADJUST_CFA_OFFSET -4;\
  160. /*CFI_RESTORE gs;*/\
  161. .pushsection .fixup,"ax"; \
  162. 4: movl $0,(%esp); \
  163. jmp 1b; \
  164. 5: movl $0,(%esp); \
  165. jmp 2b; \
  166. 6: movl $0,(%esp); \
  167. jmp 3b; \
  168. .section __ex_table,"a";\
  169. .align 4; \
  170. .long 1b,4b; \
  171. .long 2b,5b; \
  172. .long 3b,6b; \
  173. .popsection
  174. #define RING0_INT_FRAME \
  175. CFI_STARTPROC simple;\
  176. CFI_SIGNAL_FRAME;\
  177. CFI_DEF_CFA esp, 3*4;\
  178. /*CFI_OFFSET cs, -2*4;*/\
  179. CFI_OFFSET eip, -3*4
  180. #define RING0_EC_FRAME \
  181. CFI_STARTPROC simple;\
  182. CFI_SIGNAL_FRAME;\
  183. CFI_DEF_CFA esp, 4*4;\
  184. /*CFI_OFFSET cs, -2*4;*/\
  185. CFI_OFFSET eip, -3*4
  186. #define RING0_PTREGS_FRAME \
  187. CFI_STARTPROC simple;\
  188. CFI_SIGNAL_FRAME;\
  189. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
  190. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
  191. CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
  192. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
  193. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
  194. CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
  195. CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
  196. CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
  197. CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
  198. CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
  199. CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
  200. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  201. ENTRY(ret_from_fork)
  202. CFI_STARTPROC
  203. pushl %eax
  204. CFI_ADJUST_CFA_OFFSET 4
  205. call schedule_tail
  206. GET_THREAD_INFO(%ebp)
  207. popl %eax
  208. CFI_ADJUST_CFA_OFFSET -4
  209. pushl $0x0202 # Reset kernel eflags
  210. CFI_ADJUST_CFA_OFFSET 4
  211. popfl
  212. CFI_ADJUST_CFA_OFFSET -4
  213. jmp syscall_exit
  214. CFI_ENDPROC
  215. /*
  216. * Return to user mode is not as complex as all this looks,
  217. * but we want the default path for a system call return to
  218. * go as quickly as possible which is why some of this is
  219. * less clear than it otherwise should be.
  220. */
  221. # userspace resumption stub bypassing syscall exit tracing
  222. ALIGN
  223. RING0_PTREGS_FRAME
  224. ret_from_exception:
  225. preempt_stop(CLBR_ANY)
  226. ret_from_intr:
  227. GET_THREAD_INFO(%ebp)
  228. check_userspace:
  229. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  230. movb PT_CS(%esp), %al
  231. andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
  232. cmpl $USER_RPL, %eax
  233. jb resume_kernel # not returning to v8086 or userspace
  234. ENTRY(resume_userspace)
  235. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  236. # setting need_resched or sigpending
  237. # between sampling and the iret
  238. movl TI_flags(%ebp), %ecx
  239. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  240. # int/exception return?
  241. jne work_pending
  242. jmp restore_all
  243. #ifdef CONFIG_PREEMPT
  244. ENTRY(resume_kernel)
  245. DISABLE_INTERRUPTS(CLBR_ANY)
  246. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  247. jnz restore_nocheck
  248. need_resched:
  249. movl TI_flags(%ebp), %ecx # need_resched set ?
  250. testb $_TIF_NEED_RESCHED, %cl
  251. jz restore_all
  252. testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  253. jz restore_all
  254. call preempt_schedule_irq
  255. jmp need_resched
  256. #endif
  257. CFI_ENDPROC
  258. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  259. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  260. # sysenter call handler stub
  261. ENTRY(sysenter_entry)
  262. CFI_STARTPROC simple
  263. CFI_SIGNAL_FRAME
  264. CFI_DEF_CFA esp, 0
  265. CFI_REGISTER esp, ebp
  266. movl TSS_sysenter_esp0(%esp),%esp
  267. sysenter_past_esp:
  268. /*
  269. * No need to follow this irqs on/off section: the syscall
  270. * disabled irqs and here we enable it straight after entry:
  271. */
  272. ENABLE_INTERRUPTS(CLBR_NONE)
  273. pushl $(__USER_DS)
  274. CFI_ADJUST_CFA_OFFSET 4
  275. /*CFI_REL_OFFSET ss, 0*/
  276. pushl %ebp
  277. CFI_ADJUST_CFA_OFFSET 4
  278. CFI_REL_OFFSET esp, 0
  279. pushfl
  280. CFI_ADJUST_CFA_OFFSET 4
  281. pushl $(__USER_CS)
  282. CFI_ADJUST_CFA_OFFSET 4
  283. /*CFI_REL_OFFSET cs, 0*/
  284. /*
  285. * Push current_thread_info()->sysenter_return to the stack.
  286. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  287. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  288. */
  289. pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
  290. CFI_ADJUST_CFA_OFFSET 4
  291. CFI_REL_OFFSET eip, 0
  292. /*
  293. * Load the potential sixth argument from user stack.
  294. * Careful about security.
  295. */
  296. cmpl $__PAGE_OFFSET-3,%ebp
  297. jae syscall_fault
  298. 1: movl (%ebp),%ebp
  299. .section __ex_table,"a"
  300. .align 4
  301. .long 1b,syscall_fault
  302. .previous
  303. pushl %eax
  304. CFI_ADJUST_CFA_OFFSET 4
  305. SAVE_ALL
  306. GET_THREAD_INFO(%ebp)
  307. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  308. testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  309. jnz syscall_trace_entry
  310. cmpl $(nr_syscalls), %eax
  311. jae syscall_badsys
  312. call *sys_call_table(,%eax,4)
  313. movl %eax,PT_EAX(%esp)
  314. DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
  315. TRACE_IRQS_OFF
  316. movl TI_flags(%ebp), %ecx
  317. testw $_TIF_ALLWORK_MASK, %cx
  318. jne syscall_exit_work
  319. /* if something modifies registers it must also disable sysexit */
  320. movl PT_EIP(%esp), %edx
  321. movl PT_OLDESP(%esp), %ecx
  322. xorl %ebp,%ebp
  323. TRACE_IRQS_ON
  324. 1: mov PT_GS(%esp), %gs
  325. ENABLE_INTERRUPTS_SYSEXIT
  326. CFI_ENDPROC
  327. .pushsection .fixup,"ax"
  328. 2: movl $0,PT_GS(%esp)
  329. jmp 1b
  330. .section __ex_table,"a"
  331. .align 4
  332. .long 1b,2b
  333. .popsection
  334. # system call handler stub
  335. ENTRY(system_call)
  336. RING0_INT_FRAME # can't unwind into user space anyway
  337. pushl %eax # save orig_eax
  338. CFI_ADJUST_CFA_OFFSET 4
  339. SAVE_ALL
  340. GET_THREAD_INFO(%ebp)
  341. testl $TF_MASK,PT_EFLAGS(%esp)
  342. jz no_singlestep
  343. orl $_TIF_SINGLESTEP,TI_flags(%ebp)
  344. no_singlestep:
  345. # system call tracing in operation / emulation
  346. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  347. testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  348. jnz syscall_trace_entry
  349. cmpl $(nr_syscalls), %eax
  350. jae syscall_badsys
  351. syscall_call:
  352. call *sys_call_table(,%eax,4)
  353. movl %eax,PT_EAX(%esp) # store the return value
  354. syscall_exit:
  355. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  356. # setting need_resched or sigpending
  357. # between sampling and the iret
  358. TRACE_IRQS_OFF
  359. movl TI_flags(%ebp), %ecx
  360. testw $_TIF_ALLWORK_MASK, %cx # current->work
  361. jne syscall_exit_work
  362. restore_all:
  363. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  364. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  365. # are returning to the kernel.
  366. # See comments in process.c:copy_thread() for details.
  367. movb PT_OLDSS(%esp), %ah
  368. movb PT_CS(%esp), %al
  369. andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  370. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  371. CFI_REMEMBER_STATE
  372. je ldt_ss # returning to user-space with LDT SS
  373. restore_nocheck:
  374. TRACE_IRQS_IRET
  375. restore_nocheck_notrace:
  376. RESTORE_REGS
  377. addl $4, %esp # skip orig_eax/error_code
  378. CFI_ADJUST_CFA_OFFSET -4
  379. 1: INTERRUPT_RETURN
  380. .section .fixup,"ax"
  381. iret_exc:
  382. TRACE_IRQS_ON
  383. ENABLE_INTERRUPTS(CLBR_NONE)
  384. pushl $0 # no error code
  385. pushl $do_iret_error
  386. jmp error_code
  387. .previous
  388. .section __ex_table,"a"
  389. .align 4
  390. .long 1b,iret_exc
  391. .previous
  392. CFI_RESTORE_STATE
  393. ldt_ss:
  394. larl PT_OLDSS(%esp), %eax
  395. jnz restore_nocheck
  396. testl $0x00400000, %eax # returning to 32bit stack?
  397. jnz restore_nocheck # allright, normal return
  398. #ifdef CONFIG_PARAVIRT
  399. /*
  400. * The kernel can't run on a non-flat stack if paravirt mode
  401. * is active. Rather than try to fixup the high bits of
  402. * ESP, bypass this code entirely. This may break DOSemu
  403. * and/or Wine support in a paravirt VM, although the option
  404. * is still available to implement the setting of the high
  405. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  406. */
  407. cmpl $0, paravirt_ops+PARAVIRT_enabled
  408. jne restore_nocheck
  409. #endif
  410. /* If returning to userspace with 16bit stack,
  411. * try to fix the higher word of ESP, as the CPU
  412. * won't restore it.
  413. * This is an "official" bug of all the x86-compatible
  414. * CPUs, which we can try to work around to make
  415. * dosemu and wine happy. */
  416. movl PT_OLDESP(%esp), %eax
  417. movl %esp, %edx
  418. call patch_espfix_desc
  419. pushl $__ESPFIX_SS
  420. CFI_ADJUST_CFA_OFFSET 4
  421. pushl %eax
  422. CFI_ADJUST_CFA_OFFSET 4
  423. DISABLE_INTERRUPTS(CLBR_EAX)
  424. TRACE_IRQS_OFF
  425. lss (%esp), %esp
  426. CFI_ADJUST_CFA_OFFSET -8
  427. jmp restore_nocheck
  428. CFI_ENDPROC
  429. # perform work that needs to be done immediately before resumption
  430. ALIGN
  431. RING0_PTREGS_FRAME # can't unwind into user space anyway
  432. work_pending:
  433. testb $_TIF_NEED_RESCHED, %cl
  434. jz work_notifysig
  435. work_resched:
  436. call schedule
  437. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  438. # setting need_resched or sigpending
  439. # between sampling and the iret
  440. TRACE_IRQS_OFF
  441. movl TI_flags(%ebp), %ecx
  442. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  443. # than syscall tracing?
  444. jz restore_all
  445. testb $_TIF_NEED_RESCHED, %cl
  446. jnz work_resched
  447. work_notifysig: # deal with pending signals and
  448. # notify-resume requests
  449. #ifdef CONFIG_VM86
  450. testl $VM_MASK, PT_EFLAGS(%esp)
  451. movl %esp, %eax
  452. jne work_notifysig_v86 # returning to kernel-space or
  453. # vm86-space
  454. xorl %edx, %edx
  455. call do_notify_resume
  456. jmp resume_userspace_sig
  457. ALIGN
  458. work_notifysig_v86:
  459. pushl %ecx # save ti_flags for do_notify_resume
  460. CFI_ADJUST_CFA_OFFSET 4
  461. call save_v86_state # %eax contains pt_regs pointer
  462. popl %ecx
  463. CFI_ADJUST_CFA_OFFSET -4
  464. movl %eax, %esp
  465. #else
  466. movl %esp, %eax
  467. #endif
  468. xorl %edx, %edx
  469. call do_notify_resume
  470. jmp resume_userspace_sig
  471. # perform syscall exit tracing
  472. ALIGN
  473. syscall_trace_entry:
  474. movl $-ENOSYS,PT_EAX(%esp)
  475. movl %esp, %eax
  476. xorl %edx,%edx
  477. call do_syscall_trace
  478. cmpl $0, %eax
  479. jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
  480. # so must skip actual syscall
  481. movl PT_ORIG_EAX(%esp), %eax
  482. cmpl $(nr_syscalls), %eax
  483. jnae syscall_call
  484. jmp syscall_exit
  485. # perform syscall exit tracing
  486. ALIGN
  487. syscall_exit_work:
  488. testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
  489. jz work_pending
  490. TRACE_IRQS_ON
  491. ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
  492. # schedule() instead
  493. movl %esp, %eax
  494. movl $1, %edx
  495. call do_syscall_trace
  496. jmp resume_userspace
  497. CFI_ENDPROC
  498. RING0_INT_FRAME # can't unwind into user space anyway
  499. syscall_fault:
  500. pushl %eax # save orig_eax
  501. CFI_ADJUST_CFA_OFFSET 4
  502. SAVE_ALL
  503. GET_THREAD_INFO(%ebp)
  504. movl $-EFAULT,PT_EAX(%esp)
  505. jmp resume_userspace
  506. syscall_badsys:
  507. movl $-ENOSYS,PT_EAX(%esp)
  508. jmp resume_userspace
  509. CFI_ENDPROC
  510. #define FIXUP_ESPFIX_STACK \
  511. /* since we are on a wrong stack, we cant make it a C code :( */ \
  512. movl %gs:PDA_cpu, %ebx; \
  513. PER_CPU(cpu_gdt_descr, %ebx); \
  514. movl GDS_address(%ebx), %ebx; \
  515. GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
  516. addl %esp, %eax; \
  517. pushl $__KERNEL_DS; \
  518. CFI_ADJUST_CFA_OFFSET 4; \
  519. pushl %eax; \
  520. CFI_ADJUST_CFA_OFFSET 4; \
  521. lss (%esp), %esp; \
  522. CFI_ADJUST_CFA_OFFSET -8;
  523. #define UNWIND_ESPFIX_STACK \
  524. movl %ss, %eax; \
  525. /* see if on espfix stack */ \
  526. cmpw $__ESPFIX_SS, %ax; \
  527. jne 27f; \
  528. movl $__KERNEL_DS, %eax; \
  529. movl %eax, %ds; \
  530. movl %eax, %es; \
  531. /* switch to normal stack */ \
  532. FIXUP_ESPFIX_STACK; \
  533. 27:;
  534. /*
  535. * Build the entry stubs and pointer table with
  536. * some assembler magic.
  537. */
  538. .data
  539. ENTRY(interrupt)
  540. .text
  541. vector=0
  542. ENTRY(irq_entries_start)
  543. RING0_INT_FRAME
  544. .rept NR_IRQS
  545. ALIGN
  546. .if vector
  547. CFI_ADJUST_CFA_OFFSET -4
  548. .endif
  549. 1: pushl $~(vector)
  550. CFI_ADJUST_CFA_OFFSET 4
  551. jmp common_interrupt
  552. .data
  553. .long 1b
  554. .text
  555. vector=vector+1
  556. .endr
  557. /*
  558. * the CPU automatically disables interrupts when executing an IRQ vector,
  559. * so IRQ-flags tracing has to follow that:
  560. */
  561. ALIGN
  562. common_interrupt:
  563. SAVE_ALL
  564. TRACE_IRQS_OFF
  565. movl %esp,%eax
  566. call do_IRQ
  567. jmp ret_from_intr
  568. CFI_ENDPROC
  569. #define BUILD_INTERRUPT(name, nr) \
  570. ENTRY(name) \
  571. RING0_INT_FRAME; \
  572. pushl $~(nr); \
  573. CFI_ADJUST_CFA_OFFSET 4; \
  574. SAVE_ALL; \
  575. TRACE_IRQS_OFF \
  576. movl %esp,%eax; \
  577. call smp_/**/name; \
  578. jmp ret_from_intr; \
  579. CFI_ENDPROC
  580. /* The include is where all of the SMP etc. interrupts come from */
  581. #include "entry_arch.h"
  582. KPROBE_ENTRY(page_fault)
  583. RING0_EC_FRAME
  584. pushl $do_page_fault
  585. CFI_ADJUST_CFA_OFFSET 4
  586. ALIGN
  587. error_code:
  588. /* the function address is in %gs's slot on the stack */
  589. pushl %es
  590. CFI_ADJUST_CFA_OFFSET 4
  591. /*CFI_REL_OFFSET es, 0*/
  592. pushl %ds
  593. CFI_ADJUST_CFA_OFFSET 4
  594. /*CFI_REL_OFFSET ds, 0*/
  595. pushl %eax
  596. CFI_ADJUST_CFA_OFFSET 4
  597. CFI_REL_OFFSET eax, 0
  598. pushl %ebp
  599. CFI_ADJUST_CFA_OFFSET 4
  600. CFI_REL_OFFSET ebp, 0
  601. pushl %edi
  602. CFI_ADJUST_CFA_OFFSET 4
  603. CFI_REL_OFFSET edi, 0
  604. pushl %esi
  605. CFI_ADJUST_CFA_OFFSET 4
  606. CFI_REL_OFFSET esi, 0
  607. pushl %edx
  608. CFI_ADJUST_CFA_OFFSET 4
  609. CFI_REL_OFFSET edx, 0
  610. pushl %ecx
  611. CFI_ADJUST_CFA_OFFSET 4
  612. CFI_REL_OFFSET ecx, 0
  613. pushl %ebx
  614. CFI_ADJUST_CFA_OFFSET 4
  615. CFI_REL_OFFSET ebx, 0
  616. cld
  617. pushl %gs
  618. CFI_ADJUST_CFA_OFFSET 4
  619. /*CFI_REL_OFFSET gs, 0*/
  620. movl $(__KERNEL_PDA), %ecx
  621. movl %ecx, %gs
  622. UNWIND_ESPFIX_STACK
  623. popl %ecx
  624. CFI_ADJUST_CFA_OFFSET -4
  625. /*CFI_REGISTER es, ecx*/
  626. movl PT_GS(%esp), %edi # get the function address
  627. movl PT_ORIG_EAX(%esp), %edx # get the error code
  628. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  629. mov %ecx, PT_GS(%esp)
  630. /*CFI_REL_OFFSET gs, ES*/
  631. movl $(__USER_DS), %ecx
  632. movl %ecx, %ds
  633. movl %ecx, %es
  634. movl %esp,%eax # pt_regs pointer
  635. call *%edi
  636. jmp ret_from_exception
  637. CFI_ENDPROC
  638. KPROBE_END(page_fault)
  639. ENTRY(coprocessor_error)
  640. RING0_INT_FRAME
  641. pushl $0
  642. CFI_ADJUST_CFA_OFFSET 4
  643. pushl $do_coprocessor_error
  644. CFI_ADJUST_CFA_OFFSET 4
  645. jmp error_code
  646. CFI_ENDPROC
  647. ENTRY(simd_coprocessor_error)
  648. RING0_INT_FRAME
  649. pushl $0
  650. CFI_ADJUST_CFA_OFFSET 4
  651. pushl $do_simd_coprocessor_error
  652. CFI_ADJUST_CFA_OFFSET 4
  653. jmp error_code
  654. CFI_ENDPROC
  655. ENTRY(device_not_available)
  656. RING0_INT_FRAME
  657. pushl $-1 # mark this as an int
  658. CFI_ADJUST_CFA_OFFSET 4
  659. SAVE_ALL
  660. GET_CR0_INTO_EAX
  661. testl $0x4, %eax # EM (math emulation bit)
  662. jne device_not_available_emulate
  663. preempt_stop(CLBR_ANY)
  664. call math_state_restore
  665. jmp ret_from_exception
  666. device_not_available_emulate:
  667. pushl $0 # temporary storage for ORIG_EIP
  668. CFI_ADJUST_CFA_OFFSET 4
  669. call math_emulate
  670. addl $4, %esp
  671. CFI_ADJUST_CFA_OFFSET -4
  672. jmp ret_from_exception
  673. CFI_ENDPROC
  674. /*
  675. * Debug traps and NMI can happen at the one SYSENTER instruction
  676. * that sets up the real kernel stack. Check here, since we can't
  677. * allow the wrong stack to be used.
  678. *
  679. * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
  680. * already pushed 3 words if it hits on the sysenter instruction:
  681. * eflags, cs and eip.
  682. *
  683. * We just load the right stack, and push the three (known) values
  684. * by hand onto the new stack - while updating the return eip past
  685. * the instruction that would have done it for sysenter.
  686. */
  687. #define FIX_STACK(offset, ok, label) \
  688. cmpw $__KERNEL_CS,4(%esp); \
  689. jne ok; \
  690. label: \
  691. movl TSS_sysenter_esp0+offset(%esp),%esp; \
  692. CFI_DEF_CFA esp, 0; \
  693. CFI_UNDEFINED eip; \
  694. pushfl; \
  695. CFI_ADJUST_CFA_OFFSET 4; \
  696. pushl $__KERNEL_CS; \
  697. CFI_ADJUST_CFA_OFFSET 4; \
  698. pushl $sysenter_past_esp; \
  699. CFI_ADJUST_CFA_OFFSET 4; \
  700. CFI_REL_OFFSET eip, 0
  701. KPROBE_ENTRY(debug)
  702. RING0_INT_FRAME
  703. cmpl $sysenter_entry,(%esp)
  704. jne debug_stack_correct
  705. FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
  706. debug_stack_correct:
  707. pushl $-1 # mark this as an int
  708. CFI_ADJUST_CFA_OFFSET 4
  709. SAVE_ALL
  710. xorl %edx,%edx # error code 0
  711. movl %esp,%eax # pt_regs pointer
  712. call do_debug
  713. jmp ret_from_exception
  714. CFI_ENDPROC
  715. KPROBE_END(debug)
  716. /*
  717. * NMI is doubly nasty. It can happen _while_ we're handling
  718. * a debug fault, and the debug fault hasn't yet been able to
  719. * clear up the stack. So we first check whether we got an
  720. * NMI on the sysenter entry path, but after that we need to
  721. * check whether we got an NMI on the debug path where the debug
  722. * fault happened on the sysenter path.
  723. */
  724. KPROBE_ENTRY(nmi)
  725. RING0_INT_FRAME
  726. pushl %eax
  727. CFI_ADJUST_CFA_OFFSET 4
  728. movl %ss, %eax
  729. cmpw $__ESPFIX_SS, %ax
  730. popl %eax
  731. CFI_ADJUST_CFA_OFFSET -4
  732. je nmi_espfix_stack
  733. cmpl $sysenter_entry,(%esp)
  734. je nmi_stack_fixup
  735. pushl %eax
  736. CFI_ADJUST_CFA_OFFSET 4
  737. movl %esp,%eax
  738. /* Do not access memory above the end of our stack page,
  739. * it might not exist.
  740. */
  741. andl $(THREAD_SIZE-1),%eax
  742. cmpl $(THREAD_SIZE-20),%eax
  743. popl %eax
  744. CFI_ADJUST_CFA_OFFSET -4
  745. jae nmi_stack_correct
  746. cmpl $sysenter_entry,12(%esp)
  747. je nmi_debug_stack_check
  748. nmi_stack_correct:
  749. /* We have a RING0_INT_FRAME here */
  750. pushl %eax
  751. CFI_ADJUST_CFA_OFFSET 4
  752. SAVE_ALL
  753. xorl %edx,%edx # zero error code
  754. movl %esp,%eax # pt_regs pointer
  755. call do_nmi
  756. jmp restore_nocheck_notrace
  757. CFI_ENDPROC
  758. nmi_stack_fixup:
  759. RING0_INT_FRAME
  760. FIX_STACK(12,nmi_stack_correct, 1)
  761. jmp nmi_stack_correct
  762. nmi_debug_stack_check:
  763. /* We have a RING0_INT_FRAME here */
  764. cmpw $__KERNEL_CS,16(%esp)
  765. jne nmi_stack_correct
  766. cmpl $debug,(%esp)
  767. jb nmi_stack_correct
  768. cmpl $debug_esp_fix_insn,(%esp)
  769. ja nmi_stack_correct
  770. FIX_STACK(24,nmi_stack_correct, 1)
  771. jmp nmi_stack_correct
  772. nmi_espfix_stack:
  773. /* We have a RING0_INT_FRAME here.
  774. *
  775. * create the pointer to lss back
  776. */
  777. pushl %ss
  778. CFI_ADJUST_CFA_OFFSET 4
  779. pushl %esp
  780. CFI_ADJUST_CFA_OFFSET 4
  781. addw $4, (%esp)
  782. /* copy the iret frame of 12 bytes */
  783. .rept 3
  784. pushl 16(%esp)
  785. CFI_ADJUST_CFA_OFFSET 4
  786. .endr
  787. pushl %eax
  788. CFI_ADJUST_CFA_OFFSET 4
  789. SAVE_ALL
  790. FIXUP_ESPFIX_STACK # %eax == %esp
  791. xorl %edx,%edx # zero error code
  792. call do_nmi
  793. RESTORE_REGS
  794. lss 12+4(%esp), %esp # back to espfix stack
  795. CFI_ADJUST_CFA_OFFSET -24
  796. 1: INTERRUPT_RETURN
  797. CFI_ENDPROC
  798. .section __ex_table,"a"
  799. .align 4
  800. .long 1b,iret_exc
  801. .previous
  802. KPROBE_END(nmi)
  803. #ifdef CONFIG_PARAVIRT
  804. ENTRY(native_iret)
  805. 1: iret
  806. .section __ex_table,"a"
  807. .align 4
  808. .long 1b,iret_exc
  809. .previous
  810. ENTRY(native_irq_enable_sysexit)
  811. sti
  812. sysexit
  813. #endif
  814. KPROBE_ENTRY(int3)
  815. RING0_INT_FRAME
  816. pushl $-1 # mark this as an int
  817. CFI_ADJUST_CFA_OFFSET 4
  818. SAVE_ALL
  819. xorl %edx,%edx # zero error code
  820. movl %esp,%eax # pt_regs pointer
  821. call do_int3
  822. jmp ret_from_exception
  823. CFI_ENDPROC
  824. KPROBE_END(int3)
  825. ENTRY(overflow)
  826. RING0_INT_FRAME
  827. pushl $0
  828. CFI_ADJUST_CFA_OFFSET 4
  829. pushl $do_overflow
  830. CFI_ADJUST_CFA_OFFSET 4
  831. jmp error_code
  832. CFI_ENDPROC
  833. ENTRY(bounds)
  834. RING0_INT_FRAME
  835. pushl $0
  836. CFI_ADJUST_CFA_OFFSET 4
  837. pushl $do_bounds
  838. CFI_ADJUST_CFA_OFFSET 4
  839. jmp error_code
  840. CFI_ENDPROC
  841. ENTRY(invalid_op)
  842. RING0_INT_FRAME
  843. pushl $0
  844. CFI_ADJUST_CFA_OFFSET 4
  845. pushl $do_invalid_op
  846. CFI_ADJUST_CFA_OFFSET 4
  847. jmp error_code
  848. CFI_ENDPROC
  849. ENTRY(coprocessor_segment_overrun)
  850. RING0_INT_FRAME
  851. pushl $0
  852. CFI_ADJUST_CFA_OFFSET 4
  853. pushl $do_coprocessor_segment_overrun
  854. CFI_ADJUST_CFA_OFFSET 4
  855. jmp error_code
  856. CFI_ENDPROC
  857. ENTRY(invalid_TSS)
  858. RING0_EC_FRAME
  859. pushl $do_invalid_TSS
  860. CFI_ADJUST_CFA_OFFSET 4
  861. jmp error_code
  862. CFI_ENDPROC
  863. ENTRY(segment_not_present)
  864. RING0_EC_FRAME
  865. pushl $do_segment_not_present
  866. CFI_ADJUST_CFA_OFFSET 4
  867. jmp error_code
  868. CFI_ENDPROC
  869. ENTRY(stack_segment)
  870. RING0_EC_FRAME
  871. pushl $do_stack_segment
  872. CFI_ADJUST_CFA_OFFSET 4
  873. jmp error_code
  874. CFI_ENDPROC
  875. KPROBE_ENTRY(general_protection)
  876. RING0_EC_FRAME
  877. pushl $do_general_protection
  878. CFI_ADJUST_CFA_OFFSET 4
  879. jmp error_code
  880. CFI_ENDPROC
  881. KPROBE_END(general_protection)
  882. ENTRY(alignment_check)
  883. RING0_EC_FRAME
  884. pushl $do_alignment_check
  885. CFI_ADJUST_CFA_OFFSET 4
  886. jmp error_code
  887. CFI_ENDPROC
  888. ENTRY(divide_error)
  889. RING0_INT_FRAME
  890. pushl $0 # no error code
  891. CFI_ADJUST_CFA_OFFSET 4
  892. pushl $do_divide_error
  893. CFI_ADJUST_CFA_OFFSET 4
  894. jmp error_code
  895. CFI_ENDPROC
  896. #ifdef CONFIG_X86_MCE
  897. ENTRY(machine_check)
  898. RING0_INT_FRAME
  899. pushl $0
  900. CFI_ADJUST_CFA_OFFSET 4
  901. pushl machine_check_vector
  902. CFI_ADJUST_CFA_OFFSET 4
  903. jmp error_code
  904. CFI_ENDPROC
  905. #endif
  906. ENTRY(spurious_interrupt_bug)
  907. RING0_INT_FRAME
  908. pushl $0
  909. CFI_ADJUST_CFA_OFFSET 4
  910. pushl $do_spurious_interrupt_bug
  911. CFI_ADJUST_CFA_OFFSET 4
  912. jmp error_code
  913. CFI_ENDPROC
  914. ENTRY(kernel_thread_helper)
  915. pushl $0 # fake return address for unwinder
  916. CFI_STARTPROC
  917. movl %edx,%eax
  918. push %edx
  919. CFI_ADJUST_CFA_OFFSET 4
  920. call *%ebx
  921. push %eax
  922. CFI_ADJUST_CFA_OFFSET 4
  923. call do_exit
  924. CFI_ENDPROC
  925. ENDPROC(kernel_thread_helper)
  926. .section .rodata,"a"
  927. #include "syscall_table.S"
  928. syscall_table_size=(.-sys_call_table)