entry_32.S 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - orig_eax
  33. * 2C(%esp) - %eip
  34. * 30(%esp) - %cs
  35. * 34(%esp) - %eflags
  36. * 38(%esp) - %oldesp
  37. * 3C(%esp) - %oldss
  38. *
  39. * "current" is in register %ebx during any slow entries.
  40. */
  41. #include <linux/linkage.h>
  42. #include <asm/thread_info.h>
  43. #include <asm/irqflags.h>
  44. #include <asm/errno.h>
  45. #include <asm/segment.h>
  46. #include <asm/smp.h>
  47. #include <asm/page.h>
  48. #include <asm/desc.h>
  49. #include <asm/percpu.h>
  50. #include <asm/dwarf2.h>
  51. #include <asm/processor-flags.h>
  52. #include "irq_vectors.h"
  53. /*
  54. * We use macros for low-level operations which need to be overridden
  55. * for paravirtualization. The following will never clobber any registers:
  56. * INTERRUPT_RETURN (aka. "iret")
  57. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  58. * ENABLE_INTERRUPTS_SYSCALL_RET (aka "sti; sysexit").
  59. *
  60. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  61. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  62. * Allowing a register to be clobbered can shrink the paravirt replacement
  63. * enough to patch inline, increasing performance.
  64. */
  65. #define nr_syscalls ((syscall_table_size)/4)
  66. #ifdef CONFIG_PREEMPT
  67. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  68. #else
  69. #define preempt_stop(clobbers)
  70. #define resume_kernel restore_nocheck
  71. #endif
  72. .macro TRACE_IRQS_IRET
  73. #ifdef CONFIG_TRACE_IRQFLAGS
  74. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  75. jz 1f
  76. TRACE_IRQS_ON
  77. 1:
  78. #endif
  79. .endm
  80. #ifdef CONFIG_VM86
  81. #define resume_userspace_sig check_userspace
  82. #else
  83. #define resume_userspace_sig resume_userspace
  84. #endif
  85. #define SAVE_ALL \
  86. cld; \
  87. pushl %fs; \
  88. CFI_ADJUST_CFA_OFFSET 4;\
  89. /*CFI_REL_OFFSET fs, 0;*/\
  90. pushl %es; \
  91. CFI_ADJUST_CFA_OFFSET 4;\
  92. /*CFI_REL_OFFSET es, 0;*/\
  93. pushl %ds; \
  94. CFI_ADJUST_CFA_OFFSET 4;\
  95. /*CFI_REL_OFFSET ds, 0;*/\
  96. pushl %eax; \
  97. CFI_ADJUST_CFA_OFFSET 4;\
  98. CFI_REL_OFFSET eax, 0;\
  99. pushl %ebp; \
  100. CFI_ADJUST_CFA_OFFSET 4;\
  101. CFI_REL_OFFSET ebp, 0;\
  102. pushl %edi; \
  103. CFI_ADJUST_CFA_OFFSET 4;\
  104. CFI_REL_OFFSET edi, 0;\
  105. pushl %esi; \
  106. CFI_ADJUST_CFA_OFFSET 4;\
  107. CFI_REL_OFFSET esi, 0;\
  108. pushl %edx; \
  109. CFI_ADJUST_CFA_OFFSET 4;\
  110. CFI_REL_OFFSET edx, 0;\
  111. pushl %ecx; \
  112. CFI_ADJUST_CFA_OFFSET 4;\
  113. CFI_REL_OFFSET ecx, 0;\
  114. pushl %ebx; \
  115. CFI_ADJUST_CFA_OFFSET 4;\
  116. CFI_REL_OFFSET ebx, 0;\
  117. movl $(__USER_DS), %edx; \
  118. movl %edx, %ds; \
  119. movl %edx, %es; \
  120. movl $(__KERNEL_PERCPU), %edx; \
  121. movl %edx, %fs
  122. #define RESTORE_INT_REGS \
  123. popl %ebx; \
  124. CFI_ADJUST_CFA_OFFSET -4;\
  125. CFI_RESTORE ebx;\
  126. popl %ecx; \
  127. CFI_ADJUST_CFA_OFFSET -4;\
  128. CFI_RESTORE ecx;\
  129. popl %edx; \
  130. CFI_ADJUST_CFA_OFFSET -4;\
  131. CFI_RESTORE edx;\
  132. popl %esi; \
  133. CFI_ADJUST_CFA_OFFSET -4;\
  134. CFI_RESTORE esi;\
  135. popl %edi; \
  136. CFI_ADJUST_CFA_OFFSET -4;\
  137. CFI_RESTORE edi;\
  138. popl %ebp; \
  139. CFI_ADJUST_CFA_OFFSET -4;\
  140. CFI_RESTORE ebp;\
  141. popl %eax; \
  142. CFI_ADJUST_CFA_OFFSET -4;\
  143. CFI_RESTORE eax
  144. #define RESTORE_REGS \
  145. RESTORE_INT_REGS; \
  146. 1: popl %ds; \
  147. CFI_ADJUST_CFA_OFFSET -4;\
  148. /*CFI_RESTORE ds;*/\
  149. 2: popl %es; \
  150. CFI_ADJUST_CFA_OFFSET -4;\
  151. /*CFI_RESTORE es;*/\
  152. 3: popl %fs; \
  153. CFI_ADJUST_CFA_OFFSET -4;\
  154. /*CFI_RESTORE fs;*/\
  155. .pushsection .fixup,"ax"; \
  156. 4: movl $0,(%esp); \
  157. jmp 1b; \
  158. 5: movl $0,(%esp); \
  159. jmp 2b; \
  160. 6: movl $0,(%esp); \
  161. jmp 3b; \
  162. .section __ex_table,"a";\
  163. .align 4; \
  164. .long 1b,4b; \
  165. .long 2b,5b; \
  166. .long 3b,6b; \
  167. .popsection
  168. #define RING0_INT_FRAME \
  169. CFI_STARTPROC simple;\
  170. CFI_SIGNAL_FRAME;\
  171. CFI_DEF_CFA esp, 3*4;\
  172. /*CFI_OFFSET cs, -2*4;*/\
  173. CFI_OFFSET eip, -3*4
  174. #define RING0_EC_FRAME \
  175. CFI_STARTPROC simple;\
  176. CFI_SIGNAL_FRAME;\
  177. CFI_DEF_CFA esp, 4*4;\
  178. /*CFI_OFFSET cs, -2*4;*/\
  179. CFI_OFFSET eip, -3*4
  180. #define RING0_PTREGS_FRAME \
  181. CFI_STARTPROC simple;\
  182. CFI_SIGNAL_FRAME;\
  183. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
  184. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
  185. CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
  186. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
  187. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
  188. CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
  189. CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
  190. CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
  191. CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
  192. CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
  193. CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
  194. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  195. ENTRY(ret_from_fork)
  196. CFI_STARTPROC
  197. pushl %eax
  198. CFI_ADJUST_CFA_OFFSET 4
  199. call schedule_tail
  200. GET_THREAD_INFO(%ebp)
  201. popl %eax
  202. CFI_ADJUST_CFA_OFFSET -4
  203. pushl $0x0202 # Reset kernel eflags
  204. CFI_ADJUST_CFA_OFFSET 4
  205. popfl
  206. CFI_ADJUST_CFA_OFFSET -4
  207. jmp syscall_exit
  208. CFI_ENDPROC
  209. END(ret_from_fork)
  210. /*
  211. * Return to user mode is not as complex as all this looks,
  212. * but we want the default path for a system call return to
  213. * go as quickly as possible which is why some of this is
  214. * less clear than it otherwise should be.
  215. */
  216. # userspace resumption stub bypassing syscall exit tracing
  217. ALIGN
  218. RING0_PTREGS_FRAME
  219. ret_from_exception:
  220. preempt_stop(CLBR_ANY)
  221. ret_from_intr:
  222. GET_THREAD_INFO(%ebp)
  223. check_userspace:
  224. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  225. movb PT_CS(%esp), %al
  226. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  227. cmpl $USER_RPL, %eax
  228. jb resume_kernel # not returning to v8086 or userspace
  229. ENTRY(resume_userspace)
  230. LOCKDEP_SYS_EXIT
  231. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  232. # setting need_resched or sigpending
  233. # between sampling and the iret
  234. TRACE_IRQS_OFF
  235. movl TI_flags(%ebp), %ecx
  236. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  237. # int/exception return?
  238. jne work_pending
  239. jmp restore_all
  240. END(ret_from_exception)
  241. #ifdef CONFIG_PREEMPT
  242. ENTRY(resume_kernel)
  243. DISABLE_INTERRUPTS(CLBR_ANY)
  244. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  245. jnz restore_nocheck
  246. need_resched:
  247. movl TI_flags(%ebp), %ecx # need_resched set ?
  248. testb $_TIF_NEED_RESCHED, %cl
  249. jz restore_all
  250. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  251. jz restore_all
  252. call preempt_schedule_irq
  253. jmp need_resched
  254. END(resume_kernel)
  255. #endif
  256. CFI_ENDPROC
  257. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  258. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  259. # sysenter call handler stub
  260. ENTRY(ia32_sysenter_target)
  261. CFI_STARTPROC simple
  262. CFI_SIGNAL_FRAME
  263. CFI_DEF_CFA esp, 0
  264. CFI_REGISTER esp, ebp
  265. movl TSS_sysenter_sp0(%esp),%esp
  266. sysenter_past_esp:
  267. /*
  268. * Interrupts are disabled here, but we can't trace it until
  269. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  270. * we immediately enable interrupts at that point anyway.
  271. */
  272. pushl $(__USER_DS)
  273. CFI_ADJUST_CFA_OFFSET 4
  274. /*CFI_REL_OFFSET ss, 0*/
  275. pushl %ebp
  276. CFI_ADJUST_CFA_OFFSET 4
  277. CFI_REL_OFFSET esp, 0
  278. pushfl
  279. orl $X86_EFLAGS_IF, (%esp)
  280. CFI_ADJUST_CFA_OFFSET 4
  281. pushl $(__USER_CS)
  282. CFI_ADJUST_CFA_OFFSET 4
  283. /*CFI_REL_OFFSET cs, 0*/
  284. /*
  285. * Push current_thread_info()->sysenter_return to the stack.
  286. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  287. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  288. */
  289. pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
  290. CFI_ADJUST_CFA_OFFSET 4
  291. CFI_REL_OFFSET eip, 0
  292. pushl %eax
  293. CFI_ADJUST_CFA_OFFSET 4
  294. SAVE_ALL
  295. ENABLE_INTERRUPTS(CLBR_NONE)
  296. /*
  297. * Load the potential sixth argument from user stack.
  298. * Careful about security.
  299. */
  300. cmpl $__PAGE_OFFSET-3,%ebp
  301. jae syscall_fault
  302. 1: movl (%ebp),%ebp
  303. movl %ebp,PT_EBP(%esp)
  304. .section __ex_table,"a"
  305. .align 4
  306. .long 1b,syscall_fault
  307. .previous
  308. GET_THREAD_INFO(%ebp)
  309. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  310. testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  311. jnz syscall_trace_entry
  312. cmpl $(nr_syscalls), %eax
  313. jae syscall_badsys
  314. call *sys_call_table(,%eax,4)
  315. movl %eax,PT_EAX(%esp)
  316. LOCKDEP_SYS_EXIT
  317. DISABLE_INTERRUPTS(CLBR_ANY)
  318. TRACE_IRQS_OFF
  319. movl TI_flags(%ebp), %ecx
  320. testw $_TIF_ALLWORK_MASK, %cx
  321. jne syscall_exit_work
  322. /* if something modifies registers it must also disable sysexit */
  323. movl PT_EIP(%esp), %edx
  324. movl PT_OLDESP(%esp), %ecx
  325. xorl %ebp,%ebp
  326. TRACE_IRQS_ON
  327. 1: mov PT_FS(%esp), %fs
  328. ENABLE_INTERRUPTS_SYSCALL_RET
  329. CFI_ENDPROC
  330. .pushsection .fixup,"ax"
  331. 2: movl $0,PT_FS(%esp)
  332. jmp 1b
  333. .section __ex_table,"a"
  334. .align 4
  335. .long 1b,2b
  336. .popsection
  337. ENDPROC(ia32_sysenter_target)
  338. # system call handler stub
  339. ENTRY(system_call)
  340. RING0_INT_FRAME # can't unwind into user space anyway
  341. pushl %eax # save orig_eax
  342. CFI_ADJUST_CFA_OFFSET 4
  343. SAVE_ALL
  344. GET_THREAD_INFO(%ebp)
  345. # system call tracing in operation / emulation
  346. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  347. testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  348. jnz syscall_trace_entry
  349. cmpl $(nr_syscalls), %eax
  350. jae syscall_badsys
  351. syscall_call:
  352. call *sys_call_table(,%eax,4)
  353. movl %eax,PT_EAX(%esp) # store the return value
  354. syscall_exit:
  355. LOCKDEP_SYS_EXIT
  356. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  357. # setting need_resched or sigpending
  358. # between sampling and the iret
  359. TRACE_IRQS_OFF
  360. testl $X86_EFLAGS_TF,PT_EFLAGS(%esp) # If tracing set singlestep flag on exit
  361. jz no_singlestep
  362. orl $_TIF_SINGLESTEP,TI_flags(%ebp)
  363. no_singlestep:
  364. movl TI_flags(%ebp), %ecx
  365. testw $_TIF_ALLWORK_MASK, %cx # current->work
  366. jne syscall_exit_work
  367. restore_all:
  368. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  369. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  370. # are returning to the kernel.
  371. # See comments in process.c:copy_thread() for details.
  372. movb PT_OLDSS(%esp), %ah
  373. movb PT_CS(%esp), %al
  374. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  375. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  376. CFI_REMEMBER_STATE
  377. je ldt_ss # returning to user-space with LDT SS
  378. restore_nocheck:
  379. TRACE_IRQS_IRET
  380. restore_nocheck_notrace:
  381. RESTORE_REGS
  382. addl $4, %esp # skip orig_eax/error_code
  383. CFI_ADJUST_CFA_OFFSET -4
  384. irq_return:
  385. INTERRUPT_RETURN
  386. .section .fixup,"ax"
  387. ENTRY(iret_exc)
  388. pushl $0 # no error code
  389. pushl $do_iret_error
  390. jmp error_code
  391. .previous
  392. .section __ex_table,"a"
  393. .align 4
  394. .long irq_return,iret_exc
  395. .previous
  396. CFI_RESTORE_STATE
  397. ldt_ss:
  398. larl PT_OLDSS(%esp), %eax
  399. jnz restore_nocheck
  400. testl $0x00400000, %eax # returning to 32bit stack?
  401. jnz restore_nocheck # allright, normal return
  402. #ifdef CONFIG_PARAVIRT
  403. /*
  404. * The kernel can't run on a non-flat stack if paravirt mode
  405. * is active. Rather than try to fixup the high bits of
  406. * ESP, bypass this code entirely. This may break DOSemu
  407. * and/or Wine support in a paravirt VM, although the option
  408. * is still available to implement the setting of the high
  409. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  410. */
  411. cmpl $0, pv_info+PARAVIRT_enabled
  412. jne restore_nocheck
  413. #endif
  414. /* If returning to userspace with 16bit stack,
  415. * try to fix the higher word of ESP, as the CPU
  416. * won't restore it.
  417. * This is an "official" bug of all the x86-compatible
  418. * CPUs, which we can try to work around to make
  419. * dosemu and wine happy. */
  420. movl PT_OLDESP(%esp), %eax
  421. movl %esp, %edx
  422. call patch_espfix_desc
  423. pushl $__ESPFIX_SS
  424. CFI_ADJUST_CFA_OFFSET 4
  425. pushl %eax
  426. CFI_ADJUST_CFA_OFFSET 4
  427. DISABLE_INTERRUPTS(CLBR_EAX)
  428. TRACE_IRQS_OFF
  429. lss (%esp), %esp
  430. CFI_ADJUST_CFA_OFFSET -8
  431. jmp restore_nocheck
  432. CFI_ENDPROC
  433. ENDPROC(system_call)
  434. # perform work that needs to be done immediately before resumption
  435. ALIGN
  436. RING0_PTREGS_FRAME # can't unwind into user space anyway
  437. work_pending:
  438. testb $_TIF_NEED_RESCHED, %cl
  439. jz work_notifysig
  440. work_resched:
  441. call schedule
  442. LOCKDEP_SYS_EXIT
  443. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  444. # setting need_resched or sigpending
  445. # between sampling and the iret
  446. TRACE_IRQS_OFF
  447. movl TI_flags(%ebp), %ecx
  448. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  449. # than syscall tracing?
  450. jz restore_all
  451. testb $_TIF_NEED_RESCHED, %cl
  452. jnz work_resched
  453. work_notifysig: # deal with pending signals and
  454. # notify-resume requests
  455. #ifdef CONFIG_VM86
  456. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  457. movl %esp, %eax
  458. jne work_notifysig_v86 # returning to kernel-space or
  459. # vm86-space
  460. xorl %edx, %edx
  461. call do_notify_resume
  462. jmp resume_userspace_sig
  463. ALIGN
  464. work_notifysig_v86:
  465. pushl %ecx # save ti_flags for do_notify_resume
  466. CFI_ADJUST_CFA_OFFSET 4
  467. call save_v86_state # %eax contains pt_regs pointer
  468. popl %ecx
  469. CFI_ADJUST_CFA_OFFSET -4
  470. movl %eax, %esp
  471. #else
  472. movl %esp, %eax
  473. #endif
  474. xorl %edx, %edx
  475. call do_notify_resume
  476. jmp resume_userspace_sig
  477. END(work_pending)
  478. # perform syscall exit tracing
  479. ALIGN
  480. syscall_trace_entry:
  481. movl $-ENOSYS,PT_EAX(%esp)
  482. movl %esp, %eax
  483. xorl %edx,%edx
  484. call do_syscall_trace
  485. cmpl $0, %eax
  486. jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
  487. # so must skip actual syscall
  488. movl PT_ORIG_EAX(%esp), %eax
  489. cmpl $(nr_syscalls), %eax
  490. jnae syscall_call
  491. jmp syscall_exit
  492. END(syscall_trace_entry)
  493. # perform syscall exit tracing
  494. ALIGN
  495. syscall_exit_work:
  496. testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
  497. jz work_pending
  498. TRACE_IRQS_ON
  499. ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
  500. # schedule() instead
  501. movl %esp, %eax
  502. movl $1, %edx
  503. call do_syscall_trace
  504. jmp resume_userspace
  505. END(syscall_exit_work)
  506. CFI_ENDPROC
  507. RING0_INT_FRAME # can't unwind into user space anyway
  508. syscall_fault:
  509. GET_THREAD_INFO(%ebp)
  510. movl $-EFAULT,PT_EAX(%esp)
  511. jmp resume_userspace
  512. END(syscall_fault)
  513. syscall_badsys:
  514. movl $-ENOSYS,PT_EAX(%esp)
  515. jmp resume_userspace
  516. END(syscall_badsys)
  517. CFI_ENDPROC
  518. #define FIXUP_ESPFIX_STACK \
  519. /* since we are on a wrong stack, we cant make it a C code :( */ \
  520. PER_CPU(gdt_page, %ebx); \
  521. GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
  522. addl %esp, %eax; \
  523. pushl $__KERNEL_DS; \
  524. CFI_ADJUST_CFA_OFFSET 4; \
  525. pushl %eax; \
  526. CFI_ADJUST_CFA_OFFSET 4; \
  527. lss (%esp), %esp; \
  528. CFI_ADJUST_CFA_OFFSET -8;
  529. #define UNWIND_ESPFIX_STACK \
  530. movl %ss, %eax; \
  531. /* see if on espfix stack */ \
  532. cmpw $__ESPFIX_SS, %ax; \
  533. jne 27f; \
  534. movl $__KERNEL_DS, %eax; \
  535. movl %eax, %ds; \
  536. movl %eax, %es; \
  537. /* switch to normal stack */ \
  538. FIXUP_ESPFIX_STACK; \
  539. 27:;
  540. /*
  541. * Build the entry stubs and pointer table with
  542. * some assembler magic.
  543. */
  544. .section .rodata,"a"
  545. ENTRY(interrupt)
  546. .text
  547. ENTRY(irq_entries_start)
  548. RING0_INT_FRAME
  549. vector=0
  550. .rept NR_IRQS
  551. ALIGN
  552. .if vector
  553. CFI_ADJUST_CFA_OFFSET -4
  554. .endif
  555. 1: pushl $~(vector)
  556. CFI_ADJUST_CFA_OFFSET 4
  557. jmp common_interrupt
  558. .previous
  559. .long 1b
  560. .text
  561. vector=vector+1
  562. .endr
  563. END(irq_entries_start)
  564. .previous
  565. END(interrupt)
  566. .previous
  567. /*
  568. * the CPU automatically disables interrupts when executing an IRQ vector,
  569. * so IRQ-flags tracing has to follow that:
  570. */
  571. ALIGN
  572. common_interrupt:
  573. SAVE_ALL
  574. TRACE_IRQS_OFF
  575. movl %esp,%eax
  576. call do_IRQ
  577. jmp ret_from_intr
  578. ENDPROC(common_interrupt)
  579. CFI_ENDPROC
  580. #define BUILD_INTERRUPT(name, nr) \
  581. ENTRY(name) \
  582. RING0_INT_FRAME; \
  583. pushl $~(nr); \
  584. CFI_ADJUST_CFA_OFFSET 4; \
  585. SAVE_ALL; \
  586. TRACE_IRQS_OFF \
  587. movl %esp,%eax; \
  588. call smp_##name; \
  589. jmp ret_from_intr; \
  590. CFI_ENDPROC; \
  591. ENDPROC(name)
  592. /* The include is where all of the SMP etc. interrupts come from */
  593. #include "entry_arch.h"
  594. KPROBE_ENTRY(page_fault)
  595. RING0_EC_FRAME
  596. pushl $do_page_fault
  597. CFI_ADJUST_CFA_OFFSET 4
  598. ALIGN
  599. error_code:
  600. /* the function address is in %fs's slot on the stack */
  601. pushl %es
  602. CFI_ADJUST_CFA_OFFSET 4
  603. /*CFI_REL_OFFSET es, 0*/
  604. pushl %ds
  605. CFI_ADJUST_CFA_OFFSET 4
  606. /*CFI_REL_OFFSET ds, 0*/
  607. pushl %eax
  608. CFI_ADJUST_CFA_OFFSET 4
  609. CFI_REL_OFFSET eax, 0
  610. pushl %ebp
  611. CFI_ADJUST_CFA_OFFSET 4
  612. CFI_REL_OFFSET ebp, 0
  613. pushl %edi
  614. CFI_ADJUST_CFA_OFFSET 4
  615. CFI_REL_OFFSET edi, 0
  616. pushl %esi
  617. CFI_ADJUST_CFA_OFFSET 4
  618. CFI_REL_OFFSET esi, 0
  619. pushl %edx
  620. CFI_ADJUST_CFA_OFFSET 4
  621. CFI_REL_OFFSET edx, 0
  622. pushl %ecx
  623. CFI_ADJUST_CFA_OFFSET 4
  624. CFI_REL_OFFSET ecx, 0
  625. pushl %ebx
  626. CFI_ADJUST_CFA_OFFSET 4
  627. CFI_REL_OFFSET ebx, 0
  628. cld
  629. pushl %fs
  630. CFI_ADJUST_CFA_OFFSET 4
  631. /*CFI_REL_OFFSET fs, 0*/
  632. movl $(__KERNEL_PERCPU), %ecx
  633. movl %ecx, %fs
  634. UNWIND_ESPFIX_STACK
  635. popl %ecx
  636. CFI_ADJUST_CFA_OFFSET -4
  637. /*CFI_REGISTER es, ecx*/
  638. movl PT_FS(%esp), %edi # get the function address
  639. movl PT_ORIG_EAX(%esp), %edx # get the error code
  640. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  641. mov %ecx, PT_FS(%esp)
  642. /*CFI_REL_OFFSET fs, ES*/
  643. movl $(__USER_DS), %ecx
  644. movl %ecx, %ds
  645. movl %ecx, %es
  646. movl %esp,%eax # pt_regs pointer
  647. call *%edi
  648. jmp ret_from_exception
  649. CFI_ENDPROC
  650. KPROBE_END(page_fault)
  651. ENTRY(coprocessor_error)
  652. RING0_INT_FRAME
  653. pushl $0
  654. CFI_ADJUST_CFA_OFFSET 4
  655. pushl $do_coprocessor_error
  656. CFI_ADJUST_CFA_OFFSET 4
  657. jmp error_code
  658. CFI_ENDPROC
  659. END(coprocessor_error)
  660. ENTRY(simd_coprocessor_error)
  661. RING0_INT_FRAME
  662. pushl $0
  663. CFI_ADJUST_CFA_OFFSET 4
  664. pushl $do_simd_coprocessor_error
  665. CFI_ADJUST_CFA_OFFSET 4
  666. jmp error_code
  667. CFI_ENDPROC
  668. END(simd_coprocessor_error)
  669. ENTRY(device_not_available)
  670. RING0_INT_FRAME
  671. pushl $-1 # mark this as an int
  672. CFI_ADJUST_CFA_OFFSET 4
  673. SAVE_ALL
  674. GET_CR0_INTO_EAX
  675. testl $0x4, %eax # EM (math emulation bit)
  676. jne device_not_available_emulate
  677. preempt_stop(CLBR_ANY)
  678. call math_state_restore
  679. jmp ret_from_exception
  680. device_not_available_emulate:
  681. pushl $0 # temporary storage for ORIG_EIP
  682. CFI_ADJUST_CFA_OFFSET 4
  683. call math_emulate
  684. addl $4, %esp
  685. CFI_ADJUST_CFA_OFFSET -4
  686. jmp ret_from_exception
  687. CFI_ENDPROC
  688. END(device_not_available)
  689. /*
  690. * Debug traps and NMI can happen at the one SYSENTER instruction
  691. * that sets up the real kernel stack. Check here, since we can't
  692. * allow the wrong stack to be used.
  693. *
  694. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  695. * already pushed 3 words if it hits on the sysenter instruction:
  696. * eflags, cs and eip.
  697. *
  698. * We just load the right stack, and push the three (known) values
  699. * by hand onto the new stack - while updating the return eip past
  700. * the instruction that would have done it for sysenter.
  701. */
  702. #define FIX_STACK(offset, ok, label) \
  703. cmpw $__KERNEL_CS,4(%esp); \
  704. jne ok; \
  705. label: \
  706. movl TSS_sysenter_sp0+offset(%esp),%esp; \
  707. CFI_DEF_CFA esp, 0; \
  708. CFI_UNDEFINED eip; \
  709. pushfl; \
  710. CFI_ADJUST_CFA_OFFSET 4; \
  711. pushl $__KERNEL_CS; \
  712. CFI_ADJUST_CFA_OFFSET 4; \
  713. pushl $sysenter_past_esp; \
  714. CFI_ADJUST_CFA_OFFSET 4; \
  715. CFI_REL_OFFSET eip, 0
  716. KPROBE_ENTRY(debug)
  717. RING0_INT_FRAME
  718. cmpl $ia32_sysenter_target,(%esp)
  719. jne debug_stack_correct
  720. FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
  721. debug_stack_correct:
  722. pushl $-1 # mark this as an int
  723. CFI_ADJUST_CFA_OFFSET 4
  724. SAVE_ALL
  725. xorl %edx,%edx # error code 0
  726. movl %esp,%eax # pt_regs pointer
  727. call do_debug
  728. jmp ret_from_exception
  729. CFI_ENDPROC
  730. KPROBE_END(debug)
  731. /*
  732. * NMI is doubly nasty. It can happen _while_ we're handling
  733. * a debug fault, and the debug fault hasn't yet been able to
  734. * clear up the stack. So we first check whether we got an
  735. * NMI on the sysenter entry path, but after that we need to
  736. * check whether we got an NMI on the debug path where the debug
  737. * fault happened on the sysenter path.
  738. */
  739. KPROBE_ENTRY(nmi)
  740. RING0_INT_FRAME
  741. pushl %eax
  742. CFI_ADJUST_CFA_OFFSET 4
  743. movl %ss, %eax
  744. cmpw $__ESPFIX_SS, %ax
  745. popl %eax
  746. CFI_ADJUST_CFA_OFFSET -4
  747. je nmi_espfix_stack
  748. cmpl $ia32_sysenter_target,(%esp)
  749. je nmi_stack_fixup
  750. pushl %eax
  751. CFI_ADJUST_CFA_OFFSET 4
  752. movl %esp,%eax
  753. /* Do not access memory above the end of our stack page,
  754. * it might not exist.
  755. */
  756. andl $(THREAD_SIZE-1),%eax
  757. cmpl $(THREAD_SIZE-20),%eax
  758. popl %eax
  759. CFI_ADJUST_CFA_OFFSET -4
  760. jae nmi_stack_correct
  761. cmpl $ia32_sysenter_target,12(%esp)
  762. je nmi_debug_stack_check
  763. nmi_stack_correct:
  764. /* We have a RING0_INT_FRAME here */
  765. pushl %eax
  766. CFI_ADJUST_CFA_OFFSET 4
  767. SAVE_ALL
  768. xorl %edx,%edx # zero error code
  769. movl %esp,%eax # pt_regs pointer
  770. call do_nmi
  771. jmp restore_nocheck_notrace
  772. CFI_ENDPROC
  773. nmi_stack_fixup:
  774. RING0_INT_FRAME
  775. FIX_STACK(12,nmi_stack_correct, 1)
  776. jmp nmi_stack_correct
  777. nmi_debug_stack_check:
  778. /* We have a RING0_INT_FRAME here */
  779. cmpw $__KERNEL_CS,16(%esp)
  780. jne nmi_stack_correct
  781. cmpl $debug,(%esp)
  782. jb nmi_stack_correct
  783. cmpl $debug_esp_fix_insn,(%esp)
  784. ja nmi_stack_correct
  785. FIX_STACK(24,nmi_stack_correct, 1)
  786. jmp nmi_stack_correct
  787. nmi_espfix_stack:
  788. /* We have a RING0_INT_FRAME here.
  789. *
  790. * create the pointer to lss back
  791. */
  792. pushl %ss
  793. CFI_ADJUST_CFA_OFFSET 4
  794. pushl %esp
  795. CFI_ADJUST_CFA_OFFSET 4
  796. addw $4, (%esp)
  797. /* copy the iret frame of 12 bytes */
  798. .rept 3
  799. pushl 16(%esp)
  800. CFI_ADJUST_CFA_OFFSET 4
  801. .endr
  802. pushl %eax
  803. CFI_ADJUST_CFA_OFFSET 4
  804. SAVE_ALL
  805. FIXUP_ESPFIX_STACK # %eax == %esp
  806. xorl %edx,%edx # zero error code
  807. call do_nmi
  808. RESTORE_REGS
  809. lss 12+4(%esp), %esp # back to espfix stack
  810. CFI_ADJUST_CFA_OFFSET -24
  811. jmp irq_return
  812. CFI_ENDPROC
  813. KPROBE_END(nmi)
  814. #ifdef CONFIG_PARAVIRT
  815. ENTRY(native_iret)
  816. iret
  817. .section __ex_table,"a"
  818. .align 4
  819. .long native_iret, iret_exc
  820. .previous
  821. END(native_iret)
  822. ENTRY(native_irq_enable_syscall_ret)
  823. sti
  824. sysexit
  825. END(native_irq_enable_syscall_ret)
  826. #endif
  827. KPROBE_ENTRY(int3)
  828. RING0_INT_FRAME
  829. pushl $-1 # mark this as an int
  830. CFI_ADJUST_CFA_OFFSET 4
  831. SAVE_ALL
  832. xorl %edx,%edx # zero error code
  833. movl %esp,%eax # pt_regs pointer
  834. call do_int3
  835. jmp ret_from_exception
  836. CFI_ENDPROC
  837. KPROBE_END(int3)
  838. ENTRY(overflow)
  839. RING0_INT_FRAME
  840. pushl $0
  841. CFI_ADJUST_CFA_OFFSET 4
  842. pushl $do_overflow
  843. CFI_ADJUST_CFA_OFFSET 4
  844. jmp error_code
  845. CFI_ENDPROC
  846. END(overflow)
  847. ENTRY(bounds)
  848. RING0_INT_FRAME
  849. pushl $0
  850. CFI_ADJUST_CFA_OFFSET 4
  851. pushl $do_bounds
  852. CFI_ADJUST_CFA_OFFSET 4
  853. jmp error_code
  854. CFI_ENDPROC
  855. END(bounds)
  856. ENTRY(invalid_op)
  857. RING0_INT_FRAME
  858. pushl $0
  859. CFI_ADJUST_CFA_OFFSET 4
  860. pushl $do_invalid_op
  861. CFI_ADJUST_CFA_OFFSET 4
  862. jmp error_code
  863. CFI_ENDPROC
  864. END(invalid_op)
  865. ENTRY(coprocessor_segment_overrun)
  866. RING0_INT_FRAME
  867. pushl $0
  868. CFI_ADJUST_CFA_OFFSET 4
  869. pushl $do_coprocessor_segment_overrun
  870. CFI_ADJUST_CFA_OFFSET 4
  871. jmp error_code
  872. CFI_ENDPROC
  873. END(coprocessor_segment_overrun)
  874. ENTRY(invalid_TSS)
  875. RING0_EC_FRAME
  876. pushl $do_invalid_TSS
  877. CFI_ADJUST_CFA_OFFSET 4
  878. jmp error_code
  879. CFI_ENDPROC
  880. END(invalid_TSS)
  881. ENTRY(segment_not_present)
  882. RING0_EC_FRAME
  883. pushl $do_segment_not_present
  884. CFI_ADJUST_CFA_OFFSET 4
  885. jmp error_code
  886. CFI_ENDPROC
  887. END(segment_not_present)
  888. ENTRY(stack_segment)
  889. RING0_EC_FRAME
  890. pushl $do_stack_segment
  891. CFI_ADJUST_CFA_OFFSET 4
  892. jmp error_code
  893. CFI_ENDPROC
  894. END(stack_segment)
  895. KPROBE_ENTRY(general_protection)
  896. RING0_EC_FRAME
  897. pushl $do_general_protection
  898. CFI_ADJUST_CFA_OFFSET 4
  899. jmp error_code
  900. CFI_ENDPROC
  901. KPROBE_END(general_protection)
  902. ENTRY(alignment_check)
  903. RING0_EC_FRAME
  904. pushl $do_alignment_check
  905. CFI_ADJUST_CFA_OFFSET 4
  906. jmp error_code
  907. CFI_ENDPROC
  908. END(alignment_check)
  909. ENTRY(divide_error)
  910. RING0_INT_FRAME
  911. pushl $0 # no error code
  912. CFI_ADJUST_CFA_OFFSET 4
  913. pushl $do_divide_error
  914. CFI_ADJUST_CFA_OFFSET 4
  915. jmp error_code
  916. CFI_ENDPROC
  917. END(divide_error)
  918. #ifdef CONFIG_X86_MCE
  919. ENTRY(machine_check)
  920. RING0_INT_FRAME
  921. pushl $0
  922. CFI_ADJUST_CFA_OFFSET 4
  923. pushl machine_check_vector
  924. CFI_ADJUST_CFA_OFFSET 4
  925. jmp error_code
  926. CFI_ENDPROC
  927. END(machine_check)
  928. #endif
  929. ENTRY(spurious_interrupt_bug)
  930. RING0_INT_FRAME
  931. pushl $0
  932. CFI_ADJUST_CFA_OFFSET 4
  933. pushl $do_spurious_interrupt_bug
  934. CFI_ADJUST_CFA_OFFSET 4
  935. jmp error_code
  936. CFI_ENDPROC
  937. END(spurious_interrupt_bug)
  938. ENTRY(kernel_thread_helper)
  939. pushl $0 # fake return address for unwinder
  940. CFI_STARTPROC
  941. movl %edx,%eax
  942. push %edx
  943. CFI_ADJUST_CFA_OFFSET 4
  944. call *%ebx
  945. push %eax
  946. CFI_ADJUST_CFA_OFFSET 4
  947. call do_exit
  948. CFI_ENDPROC
  949. ENDPROC(kernel_thread_helper)
  950. #ifdef CONFIG_XEN
  951. /* Xen doesn't set %esp to be precisely what the normal sysenter
  952. entrypoint expects, so fix it up before using the normal path. */
  953. ENTRY(xen_sysenter_target)
  954. RING0_INT_FRAME
  955. addl $5*4, %esp /* remove xen-provided frame */
  956. jmp sysenter_past_esp
  957. ENTRY(xen_hypervisor_callback)
  958. CFI_STARTPROC
  959. pushl $0
  960. CFI_ADJUST_CFA_OFFSET 4
  961. SAVE_ALL
  962. TRACE_IRQS_OFF
  963. /* Check to see if we got the event in the critical
  964. region in xen_iret_direct, after we've reenabled
  965. events and checked for pending events. This simulates
  966. iret instruction's behaviour where it delivers a
  967. pending interrupt when enabling interrupts. */
  968. movl PT_EIP(%esp),%eax
  969. cmpl $xen_iret_start_crit,%eax
  970. jb 1f
  971. cmpl $xen_iret_end_crit,%eax
  972. jae 1f
  973. jmp xen_iret_crit_fixup
  974. ENTRY(xen_do_upcall)
  975. 1: mov %esp, %eax
  976. call xen_evtchn_do_upcall
  977. jmp ret_from_intr
  978. CFI_ENDPROC
  979. ENDPROC(xen_hypervisor_callback)
  980. # Hypervisor uses this for application faults while it executes.
  981. # We get here for two reasons:
  982. # 1. Fault while reloading DS, ES, FS or GS
  983. # 2. Fault while executing IRET
  984. # Category 1 we fix up by reattempting the load, and zeroing the segment
  985. # register if the load fails.
  986. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  987. # normal Linux return path in this case because if we use the IRET hypercall
  988. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  989. # We distinguish between categories by maintaining a status value in EAX.
  990. ENTRY(xen_failsafe_callback)
  991. CFI_STARTPROC
  992. pushl %eax
  993. CFI_ADJUST_CFA_OFFSET 4
  994. movl $1,%eax
  995. 1: mov 4(%esp),%ds
  996. 2: mov 8(%esp),%es
  997. 3: mov 12(%esp),%fs
  998. 4: mov 16(%esp),%gs
  999. testl %eax,%eax
  1000. popl %eax
  1001. CFI_ADJUST_CFA_OFFSET -4
  1002. lea 16(%esp),%esp
  1003. CFI_ADJUST_CFA_OFFSET -16
  1004. jz 5f
  1005. addl $16,%esp
  1006. jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
  1007. 5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
  1008. CFI_ADJUST_CFA_OFFSET 4
  1009. SAVE_ALL
  1010. jmp ret_from_exception
  1011. CFI_ENDPROC
  1012. .section .fixup,"ax"
  1013. 6: xorl %eax,%eax
  1014. movl %eax,4(%esp)
  1015. jmp 1b
  1016. 7: xorl %eax,%eax
  1017. movl %eax,8(%esp)
  1018. jmp 2b
  1019. 8: xorl %eax,%eax
  1020. movl %eax,12(%esp)
  1021. jmp 3b
  1022. 9: xorl %eax,%eax
  1023. movl %eax,16(%esp)
  1024. jmp 4b
  1025. .previous
  1026. .section __ex_table,"a"
  1027. .align 4
  1028. .long 1b,6b
  1029. .long 2b,7b
  1030. .long 3b,8b
  1031. .long 4b,9b
  1032. .previous
  1033. ENDPROC(xen_failsafe_callback)
  1034. #endif /* CONFIG_XEN */
  1035. .section .rodata,"a"
  1036. #include "syscall_table_32.S"
  1037. syscall_table_size=(.-sys_call_table)