entry_32.S 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  33. * 2C(%esp) - orig_eax
  34. * 30(%esp) - %eip
  35. * 34(%esp) - %cs
  36. * 38(%esp) - %eflags
  37. * 3C(%esp) - %oldesp
  38. * 40(%esp) - %oldss
  39. *
  40. * "current" is in register %ebx during any slow entries.
  41. */
  42. #include <linux/linkage.h>
  43. #include <asm/thread_info.h>
  44. #include <asm/irqflags.h>
  45. #include <asm/errno.h>
  46. #include <asm/segment.h>
  47. #include <asm/smp.h>
  48. #include <asm/page_types.h>
  49. #include <asm/desc.h>
  50. #include <asm/percpu.h>
  51. #include <asm/dwarf2.h>
  52. #include <asm/processor-flags.h>
  53. #include <asm/ftrace.h>
  54. #include <asm/irq_vectors.h>
  55. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  56. #include <linux/elf-em.h>
  57. #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
  58. #define __AUDIT_ARCH_LE 0x40000000
  59. #ifndef CONFIG_AUDITSYSCALL
  60. #define sysenter_audit syscall_trace_entry
  61. #define sysexit_audit syscall_exit_work
  62. #endif
  63. /*
  64. * We use macros for low-level operations which need to be overridden
  65. * for paravirtualization. The following will never clobber any registers:
  66. * INTERRUPT_RETURN (aka. "iret")
  67. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  68. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  69. *
  70. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  71. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  72. * Allowing a register to be clobbered can shrink the paravirt replacement
  73. * enough to patch inline, increasing performance.
  74. */
  75. #define nr_syscalls ((syscall_table_size)/4)
  76. #ifdef CONFIG_PREEMPT
  77. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  78. #else
  79. #define preempt_stop(clobbers)
  80. #define resume_kernel restore_nocheck
  81. #endif
  82. .macro TRACE_IRQS_IRET
  83. #ifdef CONFIG_TRACE_IRQFLAGS
  84. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  85. jz 1f
  86. TRACE_IRQS_ON
  87. 1:
  88. #endif
  89. .endm
  90. #ifdef CONFIG_VM86
  91. #define resume_userspace_sig check_userspace
  92. #else
  93. #define resume_userspace_sig resume_userspace
  94. #endif
  95. /*
  96. * User gs save/restore
  97. *
  98. * %gs is used for userland TLS and kernel only uses it for stack
  99. * canary which is required to be at %gs:20 by gcc. Read the comment
  100. * at the top of stackprotector.h for more info.
  101. *
  102. * Local labels 98 and 99 are used.
  103. */
  104. #ifdef CONFIG_X86_32_LAZY_GS
  105. /* unfortunately push/pop can't be no-op */
  106. .macro PUSH_GS
  107. pushl $0
  108. CFI_ADJUST_CFA_OFFSET 4
  109. .endm
  110. .macro POP_GS pop=0
  111. addl $(4 + \pop), %esp
  112. CFI_ADJUST_CFA_OFFSET -(4 + \pop)
  113. .endm
  114. .macro POP_GS_EX
  115. .endm
  116. /* all the rest are no-op */
  117. .macro PTGS_TO_GS
  118. .endm
  119. .macro PTGS_TO_GS_EX
  120. .endm
  121. .macro GS_TO_REG reg
  122. .endm
  123. .macro REG_TO_PTGS reg
  124. .endm
  125. .macro SET_KERNEL_GS reg
  126. .endm
  127. #else /* CONFIG_X86_32_LAZY_GS */
  128. .macro PUSH_GS
  129. pushl %gs
  130. CFI_ADJUST_CFA_OFFSET 4
  131. /*CFI_REL_OFFSET gs, 0*/
  132. .endm
  133. .macro POP_GS pop=0
  134. 98: popl %gs
  135. CFI_ADJUST_CFA_OFFSET -4
  136. /*CFI_RESTORE gs*/
  137. .if \pop <> 0
  138. add $\pop, %esp
  139. CFI_ADJUST_CFA_OFFSET -\pop
  140. .endif
  141. .endm
  142. .macro POP_GS_EX
  143. .pushsection .fixup, "ax"
  144. 99: movl $0, (%esp)
  145. jmp 98b
  146. .section __ex_table, "a"
  147. .align 4
  148. .long 98b, 99b
  149. .popsection
  150. .endm
  151. .macro PTGS_TO_GS
  152. 98: mov PT_GS(%esp), %gs
  153. .endm
  154. .macro PTGS_TO_GS_EX
  155. .pushsection .fixup, "ax"
  156. 99: movl $0, PT_GS(%esp)
  157. jmp 98b
  158. .section __ex_table, "a"
  159. .align 4
  160. .long 98b, 99b
  161. .popsection
  162. .endm
  163. .macro GS_TO_REG reg
  164. movl %gs, \reg
  165. /*CFI_REGISTER gs, \reg*/
  166. .endm
  167. .macro REG_TO_PTGS reg
  168. movl \reg, PT_GS(%esp)
  169. /*CFI_REL_OFFSET gs, PT_GS*/
  170. .endm
  171. .macro SET_KERNEL_GS reg
  172. movl $(__KERNEL_STACK_CANARY), \reg
  173. movl \reg, %gs
  174. .endm
  175. #endif /* CONFIG_X86_32_LAZY_GS */
  176. .macro SAVE_ALL
  177. cld
  178. PUSH_GS
  179. pushl %fs
  180. CFI_ADJUST_CFA_OFFSET 4
  181. /*CFI_REL_OFFSET fs, 0;*/
  182. pushl %es
  183. CFI_ADJUST_CFA_OFFSET 4
  184. /*CFI_REL_OFFSET es, 0;*/
  185. pushl %ds
  186. CFI_ADJUST_CFA_OFFSET 4
  187. /*CFI_REL_OFFSET ds, 0;*/
  188. pushl %eax
  189. CFI_ADJUST_CFA_OFFSET 4
  190. CFI_REL_OFFSET eax, 0
  191. pushl %ebp
  192. CFI_ADJUST_CFA_OFFSET 4
  193. CFI_REL_OFFSET ebp, 0
  194. pushl %edi
  195. CFI_ADJUST_CFA_OFFSET 4
  196. CFI_REL_OFFSET edi, 0
  197. pushl %esi
  198. CFI_ADJUST_CFA_OFFSET 4
  199. CFI_REL_OFFSET esi, 0
  200. pushl %edx
  201. CFI_ADJUST_CFA_OFFSET 4
  202. CFI_REL_OFFSET edx, 0
  203. pushl %ecx
  204. CFI_ADJUST_CFA_OFFSET 4
  205. CFI_REL_OFFSET ecx, 0
  206. pushl %ebx
  207. CFI_ADJUST_CFA_OFFSET 4
  208. CFI_REL_OFFSET ebx, 0
  209. movl $(__USER_DS), %edx
  210. movl %edx, %ds
  211. movl %edx, %es
  212. movl $(__KERNEL_PERCPU), %edx
  213. movl %edx, %fs
  214. SET_KERNEL_GS %edx
  215. .endm
  216. .macro RESTORE_INT_REGS
  217. popl %ebx
  218. CFI_ADJUST_CFA_OFFSET -4
  219. CFI_RESTORE ebx
  220. popl %ecx
  221. CFI_ADJUST_CFA_OFFSET -4
  222. CFI_RESTORE ecx
  223. popl %edx
  224. CFI_ADJUST_CFA_OFFSET -4
  225. CFI_RESTORE edx
  226. popl %esi
  227. CFI_ADJUST_CFA_OFFSET -4
  228. CFI_RESTORE esi
  229. popl %edi
  230. CFI_ADJUST_CFA_OFFSET -4
  231. CFI_RESTORE edi
  232. popl %ebp
  233. CFI_ADJUST_CFA_OFFSET -4
  234. CFI_RESTORE ebp
  235. popl %eax
  236. CFI_ADJUST_CFA_OFFSET -4
  237. CFI_RESTORE eax
  238. .endm
  239. .macro RESTORE_REGS pop=0
  240. RESTORE_INT_REGS
  241. 1: popl %ds
  242. CFI_ADJUST_CFA_OFFSET -4
  243. /*CFI_RESTORE ds;*/
  244. 2: popl %es
  245. CFI_ADJUST_CFA_OFFSET -4
  246. /*CFI_RESTORE es;*/
  247. 3: popl %fs
  248. CFI_ADJUST_CFA_OFFSET -4
  249. /*CFI_RESTORE fs;*/
  250. POP_GS \pop
  251. .pushsection .fixup, "ax"
  252. 4: movl $0, (%esp)
  253. jmp 1b
  254. 5: movl $0, (%esp)
  255. jmp 2b
  256. 6: movl $0, (%esp)
  257. jmp 3b
  258. .section __ex_table, "a"
  259. .align 4
  260. .long 1b, 4b
  261. .long 2b, 5b
  262. .long 3b, 6b
  263. .popsection
  264. POP_GS_EX
  265. .endm
  266. .macro RING0_INT_FRAME
  267. CFI_STARTPROC simple
  268. CFI_SIGNAL_FRAME
  269. CFI_DEF_CFA esp, 3*4
  270. /*CFI_OFFSET cs, -2*4;*/
  271. CFI_OFFSET eip, -3*4
  272. .endm
  273. .macro RING0_EC_FRAME
  274. CFI_STARTPROC simple
  275. CFI_SIGNAL_FRAME
  276. CFI_DEF_CFA esp, 4*4
  277. /*CFI_OFFSET cs, -2*4;*/
  278. CFI_OFFSET eip, -3*4
  279. .endm
  280. .macro RING0_PTREGS_FRAME
  281. CFI_STARTPROC simple
  282. CFI_SIGNAL_FRAME
  283. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
  284. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
  285. CFI_OFFSET eip, PT_EIP-PT_OLDESP
  286. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
  287. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
  288. CFI_OFFSET eax, PT_EAX-PT_OLDESP
  289. CFI_OFFSET ebp, PT_EBP-PT_OLDESP
  290. CFI_OFFSET edi, PT_EDI-PT_OLDESP
  291. CFI_OFFSET esi, PT_ESI-PT_OLDESP
  292. CFI_OFFSET edx, PT_EDX-PT_OLDESP
  293. CFI_OFFSET ecx, PT_ECX-PT_OLDESP
  294. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  295. .endm
  296. ENTRY(ret_from_fork)
  297. CFI_STARTPROC
  298. pushl %eax
  299. CFI_ADJUST_CFA_OFFSET 4
  300. call schedule_tail
  301. GET_THREAD_INFO(%ebp)
  302. popl %eax
  303. CFI_ADJUST_CFA_OFFSET -4
  304. pushl $0x0202 # Reset kernel eflags
  305. CFI_ADJUST_CFA_OFFSET 4
  306. popfl
  307. CFI_ADJUST_CFA_OFFSET -4
  308. jmp syscall_exit
  309. CFI_ENDPROC
  310. END(ret_from_fork)
  311. /*
  312. * Return to user mode is not as complex as all this looks,
  313. * but we want the default path for a system call return to
  314. * go as quickly as possible which is why some of this is
  315. * less clear than it otherwise should be.
  316. */
  317. # userspace resumption stub bypassing syscall exit tracing
  318. ALIGN
  319. RING0_PTREGS_FRAME
  320. ret_from_exception:
  321. preempt_stop(CLBR_ANY)
  322. ret_from_intr:
  323. GET_THREAD_INFO(%ebp)
  324. check_userspace:
  325. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  326. movb PT_CS(%esp), %al
  327. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  328. cmpl $USER_RPL, %eax
  329. jb resume_kernel # not returning to v8086 or userspace
  330. ENTRY(resume_userspace)
  331. LOCKDEP_SYS_EXIT
  332. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  333. # setting need_resched or sigpending
  334. # between sampling and the iret
  335. TRACE_IRQS_OFF
  336. movl TI_flags(%ebp), %ecx
  337. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  338. # int/exception return?
  339. jne work_pending
  340. jmp restore_all
  341. END(ret_from_exception)
  342. #ifdef CONFIG_PREEMPT
  343. ENTRY(resume_kernel)
  344. DISABLE_INTERRUPTS(CLBR_ANY)
  345. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  346. jnz restore_nocheck
  347. need_resched:
  348. movl TI_flags(%ebp), %ecx # need_resched set ?
  349. testb $_TIF_NEED_RESCHED, %cl
  350. jz restore_all
  351. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  352. jz restore_all
  353. call preempt_schedule_irq
  354. jmp need_resched
  355. END(resume_kernel)
  356. #endif
  357. CFI_ENDPROC
  358. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  359. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  360. # sysenter call handler stub
  361. ENTRY(ia32_sysenter_target)
  362. CFI_STARTPROC simple
  363. CFI_SIGNAL_FRAME
  364. CFI_DEF_CFA esp, 0
  365. CFI_REGISTER esp, ebp
  366. movl TSS_sysenter_sp0(%esp),%esp
  367. sysenter_past_esp:
  368. /*
  369. * Interrupts are disabled here, but we can't trace it until
  370. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  371. * we immediately enable interrupts at that point anyway.
  372. */
  373. pushl $(__USER_DS)
  374. CFI_ADJUST_CFA_OFFSET 4
  375. /*CFI_REL_OFFSET ss, 0*/
  376. pushl %ebp
  377. CFI_ADJUST_CFA_OFFSET 4
  378. CFI_REL_OFFSET esp, 0
  379. pushfl
  380. orl $X86_EFLAGS_IF, (%esp)
  381. CFI_ADJUST_CFA_OFFSET 4
  382. pushl $(__USER_CS)
  383. CFI_ADJUST_CFA_OFFSET 4
  384. /*CFI_REL_OFFSET cs, 0*/
  385. /*
  386. * Push current_thread_info()->sysenter_return to the stack.
  387. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  388. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  389. */
  390. pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
  391. CFI_ADJUST_CFA_OFFSET 4
  392. CFI_REL_OFFSET eip, 0
  393. pushl %eax
  394. CFI_ADJUST_CFA_OFFSET 4
  395. SAVE_ALL
  396. ENABLE_INTERRUPTS(CLBR_NONE)
  397. /*
  398. * Load the potential sixth argument from user stack.
  399. * Careful about security.
  400. */
  401. cmpl $__PAGE_OFFSET-3,%ebp
  402. jae syscall_fault
  403. 1: movl (%ebp),%ebp
  404. movl %ebp,PT_EBP(%esp)
  405. .section __ex_table,"a"
  406. .align 4
  407. .long 1b,syscall_fault
  408. .previous
  409. GET_THREAD_INFO(%ebp)
  410. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  411. testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  412. jnz sysenter_audit
  413. sysenter_do_call:
  414. cmpl $(nr_syscalls), %eax
  415. jae syscall_badsys
  416. call *sys_call_table(,%eax,4)
  417. movl %eax,PT_EAX(%esp)
  418. LOCKDEP_SYS_EXIT
  419. DISABLE_INTERRUPTS(CLBR_ANY)
  420. TRACE_IRQS_OFF
  421. movl TI_flags(%ebp), %ecx
  422. testw $_TIF_ALLWORK_MASK, %cx
  423. jne sysexit_audit
  424. sysenter_exit:
  425. /* if something modifies registers it must also disable sysexit */
  426. movl PT_EIP(%esp), %edx
  427. movl PT_OLDESP(%esp), %ecx
  428. xorl %ebp,%ebp
  429. TRACE_IRQS_ON
  430. 1: mov PT_FS(%esp), %fs
  431. PTGS_TO_GS
  432. ENABLE_INTERRUPTS_SYSEXIT
  433. #ifdef CONFIG_AUDITSYSCALL
  434. sysenter_audit:
  435. testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  436. jnz syscall_trace_entry
  437. addl $4,%esp
  438. CFI_ADJUST_CFA_OFFSET -4
  439. /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
  440. /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
  441. /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
  442. movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
  443. movl %eax,%edx /* 2nd arg: syscall number */
  444. movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
  445. call audit_syscall_entry
  446. pushl %ebx
  447. CFI_ADJUST_CFA_OFFSET 4
  448. movl PT_EAX(%esp),%eax /* reload syscall number */
  449. jmp sysenter_do_call
  450. sysexit_audit:
  451. testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
  452. jne syscall_exit_work
  453. TRACE_IRQS_ON
  454. ENABLE_INTERRUPTS(CLBR_ANY)
  455. movl %eax,%edx /* second arg, syscall return value */
  456. cmpl $0,%eax /* is it < 0? */
  457. setl %al /* 1 if so, 0 if not */
  458. movzbl %al,%eax /* zero-extend that */
  459. inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
  460. call audit_syscall_exit
  461. DISABLE_INTERRUPTS(CLBR_ANY)
  462. TRACE_IRQS_OFF
  463. movl TI_flags(%ebp), %ecx
  464. testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
  465. jne syscall_exit_work
  466. movl PT_EAX(%esp),%eax /* reload syscall return value */
  467. jmp sysenter_exit
  468. #endif
  469. CFI_ENDPROC
  470. .pushsection .fixup,"ax"
  471. 2: movl $0,PT_FS(%esp)
  472. jmp 1b
  473. .section __ex_table,"a"
  474. .align 4
  475. .long 1b,2b
  476. .popsection
  477. PTGS_TO_GS_EX
  478. ENDPROC(ia32_sysenter_target)
  479. # system call handler stub
  480. ENTRY(system_call)
  481. RING0_INT_FRAME # can't unwind into user space anyway
  482. pushl %eax # save orig_eax
  483. CFI_ADJUST_CFA_OFFSET 4
  484. SAVE_ALL
  485. GET_THREAD_INFO(%ebp)
  486. # system call tracing in operation / emulation
  487. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  488. testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  489. jnz syscall_trace_entry
  490. cmpl $(nr_syscalls), %eax
  491. jae syscall_badsys
  492. syscall_call:
  493. call *sys_call_table(,%eax,4)
  494. movl %eax,PT_EAX(%esp) # store the return value
  495. syscall_exit:
  496. LOCKDEP_SYS_EXIT
  497. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  498. # setting need_resched or sigpending
  499. # between sampling and the iret
  500. TRACE_IRQS_OFF
  501. movl TI_flags(%ebp), %ecx
  502. testw $_TIF_ALLWORK_MASK, %cx # current->work
  503. jne syscall_exit_work
  504. restore_all:
  505. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  506. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  507. # are returning to the kernel.
  508. # See comments in process.c:copy_thread() for details.
  509. movb PT_OLDSS(%esp), %ah
  510. movb PT_CS(%esp), %al
  511. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  512. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  513. CFI_REMEMBER_STATE
  514. je ldt_ss # returning to user-space with LDT SS
  515. restore_nocheck:
  516. TRACE_IRQS_IRET
  517. restore_nocheck_notrace:
  518. RESTORE_REGS 4 # skip orig_eax/error_code
  519. CFI_ADJUST_CFA_OFFSET -4
  520. irq_return:
  521. INTERRUPT_RETURN
  522. .section .fixup,"ax"
  523. ENTRY(iret_exc)
  524. pushl $0 # no error code
  525. pushl $do_iret_error
  526. jmp error_code
  527. .previous
  528. .section __ex_table,"a"
  529. .align 4
  530. .long irq_return,iret_exc
  531. .previous
  532. CFI_RESTORE_STATE
  533. ldt_ss:
  534. larl PT_OLDSS(%esp), %eax
  535. jnz restore_nocheck
  536. testl $0x00400000, %eax # returning to 32bit stack?
  537. jnz restore_nocheck # allright, normal return
  538. #ifdef CONFIG_PARAVIRT
  539. /*
  540. * The kernel can't run on a non-flat stack if paravirt mode
  541. * is active. Rather than try to fixup the high bits of
  542. * ESP, bypass this code entirely. This may break DOSemu
  543. * and/or Wine support in a paravirt VM, although the option
  544. * is still available to implement the setting of the high
  545. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  546. */
  547. cmpl $0, pv_info+PARAVIRT_enabled
  548. jne restore_nocheck
  549. #endif
  550. /* If returning to userspace with 16bit stack,
  551. * try to fix the higher word of ESP, as the CPU
  552. * won't restore it.
  553. * This is an "official" bug of all the x86-compatible
  554. * CPUs, which we can try to work around to make
  555. * dosemu and wine happy. */
  556. movl PT_OLDESP(%esp), %eax
  557. movl %esp, %edx
  558. call patch_espfix_desc
  559. pushl $__ESPFIX_SS
  560. CFI_ADJUST_CFA_OFFSET 4
  561. pushl %eax
  562. CFI_ADJUST_CFA_OFFSET 4
  563. DISABLE_INTERRUPTS(CLBR_EAX)
  564. TRACE_IRQS_OFF
  565. lss (%esp), %esp
  566. CFI_ADJUST_CFA_OFFSET -8
  567. jmp restore_nocheck
  568. CFI_ENDPROC
  569. ENDPROC(system_call)
  570. # perform work that needs to be done immediately before resumption
  571. ALIGN
  572. RING0_PTREGS_FRAME # can't unwind into user space anyway
  573. work_pending:
  574. testb $_TIF_NEED_RESCHED, %cl
  575. jz work_notifysig
  576. work_resched:
  577. call schedule
  578. LOCKDEP_SYS_EXIT
  579. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  580. # setting need_resched or sigpending
  581. # between sampling and the iret
  582. TRACE_IRQS_OFF
  583. movl TI_flags(%ebp), %ecx
  584. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  585. # than syscall tracing?
  586. jz restore_all
  587. testb $_TIF_NEED_RESCHED, %cl
  588. jnz work_resched
  589. work_notifysig: # deal with pending signals and
  590. # notify-resume requests
  591. #ifdef CONFIG_VM86
  592. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  593. movl %esp, %eax
  594. jne work_notifysig_v86 # returning to kernel-space or
  595. # vm86-space
  596. xorl %edx, %edx
  597. call do_notify_resume
  598. jmp resume_userspace_sig
  599. ALIGN
  600. work_notifysig_v86:
  601. pushl %ecx # save ti_flags for do_notify_resume
  602. CFI_ADJUST_CFA_OFFSET 4
  603. call save_v86_state # %eax contains pt_regs pointer
  604. popl %ecx
  605. CFI_ADJUST_CFA_OFFSET -4
  606. movl %eax, %esp
  607. #else
  608. movl %esp, %eax
  609. #endif
  610. xorl %edx, %edx
  611. call do_notify_resume
  612. jmp resume_userspace_sig
  613. END(work_pending)
  614. # perform syscall exit tracing
  615. ALIGN
  616. syscall_trace_entry:
  617. movl $-ENOSYS,PT_EAX(%esp)
  618. movl %esp, %eax
  619. call syscall_trace_enter
  620. /* What it returned is what we'll actually use. */
  621. cmpl $(nr_syscalls), %eax
  622. jnae syscall_call
  623. jmp syscall_exit
  624. END(syscall_trace_entry)
  625. # perform syscall exit tracing
  626. ALIGN
  627. syscall_exit_work:
  628. testb $_TIF_WORK_SYSCALL_EXIT, %cl
  629. jz work_pending
  630. TRACE_IRQS_ON
  631. ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
  632. # schedule() instead
  633. movl %esp, %eax
  634. call syscall_trace_leave
  635. jmp resume_userspace
  636. END(syscall_exit_work)
  637. CFI_ENDPROC
  638. RING0_INT_FRAME # can't unwind into user space anyway
  639. syscall_fault:
  640. GET_THREAD_INFO(%ebp)
  641. movl $-EFAULT,PT_EAX(%esp)
  642. jmp resume_userspace
  643. END(syscall_fault)
  644. syscall_badsys:
  645. movl $-ENOSYS,PT_EAX(%esp)
  646. jmp resume_userspace
  647. END(syscall_badsys)
  648. CFI_ENDPROC
  649. /*
  650. * System calls that need a pt_regs pointer.
  651. */
  652. #define PTREGSCALL(name) \
  653. ALIGN; \
  654. ptregs_##name: \
  655. leal 4(%esp),%eax; \
  656. jmp sys_##name;
  657. PTREGSCALL(iopl)
  658. PTREGSCALL(fork)
  659. PTREGSCALL(clone)
  660. PTREGSCALL(vfork)
  661. PTREGSCALL(execve)
  662. PTREGSCALL(sigaltstack)
  663. PTREGSCALL(sigreturn)
  664. PTREGSCALL(rt_sigreturn)
  665. PTREGSCALL(vm86)
  666. PTREGSCALL(vm86old)
  667. .macro FIXUP_ESPFIX_STACK
  668. /* since we are on a wrong stack, we cant make it a C code :( */
  669. PER_CPU(gdt_page, %ebx)
  670. GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
  671. addl %esp, %eax
  672. pushl $__KERNEL_DS
  673. CFI_ADJUST_CFA_OFFSET 4
  674. pushl %eax
  675. CFI_ADJUST_CFA_OFFSET 4
  676. lss (%esp), %esp
  677. CFI_ADJUST_CFA_OFFSET -8
  678. .endm
  679. .macro UNWIND_ESPFIX_STACK
  680. movl %ss, %eax
  681. /* see if on espfix stack */
  682. cmpw $__ESPFIX_SS, %ax
  683. jne 27f
  684. movl $__KERNEL_DS, %eax
  685. movl %eax, %ds
  686. movl %eax, %es
  687. /* switch to normal stack */
  688. FIXUP_ESPFIX_STACK
  689. 27:
  690. .endm
  691. /*
  692. * Build the entry stubs and pointer table with some assembler magic.
  693. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  694. * single cache line on all modern x86 implementations.
  695. */
  696. .section .init.rodata,"a"
  697. ENTRY(interrupt)
  698. .text
  699. .p2align 5
  700. .p2align CONFIG_X86_L1_CACHE_SHIFT
  701. ENTRY(irq_entries_start)
  702. RING0_INT_FRAME
  703. vector=FIRST_EXTERNAL_VECTOR
  704. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  705. .balign 32
  706. .rept 7
  707. .if vector < NR_VECTORS
  708. .if vector <> FIRST_EXTERNAL_VECTOR
  709. CFI_ADJUST_CFA_OFFSET -4
  710. .endif
  711. 1: pushl $(~vector+0x80) /* Note: always in signed byte range */
  712. CFI_ADJUST_CFA_OFFSET 4
  713. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  714. jmp 2f
  715. .endif
  716. .previous
  717. .long 1b
  718. .text
  719. vector=vector+1
  720. .endif
  721. .endr
  722. 2: jmp common_interrupt
  723. .endr
  724. END(irq_entries_start)
  725. .previous
  726. END(interrupt)
  727. .previous
  728. /*
  729. * the CPU automatically disables interrupts when executing an IRQ vector,
  730. * so IRQ-flags tracing has to follow that:
  731. */
  732. .p2align CONFIG_X86_L1_CACHE_SHIFT
  733. common_interrupt:
  734. addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
  735. SAVE_ALL
  736. TRACE_IRQS_OFF
  737. movl %esp,%eax
  738. call do_IRQ
  739. jmp ret_from_intr
  740. ENDPROC(common_interrupt)
  741. CFI_ENDPROC
  742. #define BUILD_INTERRUPT3(name, nr, fn) \
  743. ENTRY(name) \
  744. RING0_INT_FRAME; \
  745. pushl $~(nr); \
  746. CFI_ADJUST_CFA_OFFSET 4; \
  747. SAVE_ALL; \
  748. TRACE_IRQS_OFF \
  749. movl %esp,%eax; \
  750. call fn; \
  751. jmp ret_from_intr; \
  752. CFI_ENDPROC; \
  753. ENDPROC(name)
  754. #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
  755. /* The include is where all of the SMP etc. interrupts come from */
  756. #include <asm/entry_arch.h>
  757. ENTRY(coprocessor_error)
  758. RING0_INT_FRAME
  759. pushl $0
  760. CFI_ADJUST_CFA_OFFSET 4
  761. pushl $do_coprocessor_error
  762. CFI_ADJUST_CFA_OFFSET 4
  763. jmp error_code
  764. CFI_ENDPROC
  765. END(coprocessor_error)
  766. ENTRY(simd_coprocessor_error)
  767. RING0_INT_FRAME
  768. pushl $0
  769. CFI_ADJUST_CFA_OFFSET 4
  770. pushl $do_simd_coprocessor_error
  771. CFI_ADJUST_CFA_OFFSET 4
  772. jmp error_code
  773. CFI_ENDPROC
  774. END(simd_coprocessor_error)
  775. ENTRY(device_not_available)
  776. RING0_INT_FRAME
  777. pushl $-1 # mark this as an int
  778. CFI_ADJUST_CFA_OFFSET 4
  779. pushl $do_device_not_available
  780. CFI_ADJUST_CFA_OFFSET 4
  781. jmp error_code
  782. CFI_ENDPROC
  783. END(device_not_available)
  784. #ifdef CONFIG_PARAVIRT
  785. ENTRY(native_iret)
  786. iret
  787. .section __ex_table,"a"
  788. .align 4
  789. .long native_iret, iret_exc
  790. .previous
  791. END(native_iret)
  792. ENTRY(native_irq_enable_sysexit)
  793. sti
  794. sysexit
  795. END(native_irq_enable_sysexit)
  796. #endif
  797. ENTRY(overflow)
  798. RING0_INT_FRAME
  799. pushl $0
  800. CFI_ADJUST_CFA_OFFSET 4
  801. pushl $do_overflow
  802. CFI_ADJUST_CFA_OFFSET 4
  803. jmp error_code
  804. CFI_ENDPROC
  805. END(overflow)
  806. ENTRY(bounds)
  807. RING0_INT_FRAME
  808. pushl $0
  809. CFI_ADJUST_CFA_OFFSET 4
  810. pushl $do_bounds
  811. CFI_ADJUST_CFA_OFFSET 4
  812. jmp error_code
  813. CFI_ENDPROC
  814. END(bounds)
  815. ENTRY(invalid_op)
  816. RING0_INT_FRAME
  817. pushl $0
  818. CFI_ADJUST_CFA_OFFSET 4
  819. pushl $do_invalid_op
  820. CFI_ADJUST_CFA_OFFSET 4
  821. jmp error_code
  822. CFI_ENDPROC
  823. END(invalid_op)
  824. ENTRY(coprocessor_segment_overrun)
  825. RING0_INT_FRAME
  826. pushl $0
  827. CFI_ADJUST_CFA_OFFSET 4
  828. pushl $do_coprocessor_segment_overrun
  829. CFI_ADJUST_CFA_OFFSET 4
  830. jmp error_code
  831. CFI_ENDPROC
  832. END(coprocessor_segment_overrun)
  833. ENTRY(invalid_TSS)
  834. RING0_EC_FRAME
  835. pushl $do_invalid_TSS
  836. CFI_ADJUST_CFA_OFFSET 4
  837. jmp error_code
  838. CFI_ENDPROC
  839. END(invalid_TSS)
  840. ENTRY(segment_not_present)
  841. RING0_EC_FRAME
  842. pushl $do_segment_not_present
  843. CFI_ADJUST_CFA_OFFSET 4
  844. jmp error_code
  845. CFI_ENDPROC
  846. END(segment_not_present)
  847. ENTRY(stack_segment)
  848. RING0_EC_FRAME
  849. pushl $do_stack_segment
  850. CFI_ADJUST_CFA_OFFSET 4
  851. jmp error_code
  852. CFI_ENDPROC
  853. END(stack_segment)
  854. ENTRY(alignment_check)
  855. RING0_EC_FRAME
  856. pushl $do_alignment_check
  857. CFI_ADJUST_CFA_OFFSET 4
  858. jmp error_code
  859. CFI_ENDPROC
  860. END(alignment_check)
  861. ENTRY(divide_error)
  862. RING0_INT_FRAME
  863. pushl $0 # no error code
  864. CFI_ADJUST_CFA_OFFSET 4
  865. pushl $do_divide_error
  866. CFI_ADJUST_CFA_OFFSET 4
  867. jmp error_code
  868. CFI_ENDPROC
  869. END(divide_error)
  870. #ifdef CONFIG_X86_MCE
  871. ENTRY(machine_check)
  872. RING0_INT_FRAME
  873. pushl $0
  874. CFI_ADJUST_CFA_OFFSET 4
  875. pushl machine_check_vector
  876. CFI_ADJUST_CFA_OFFSET 4
  877. jmp error_code
  878. CFI_ENDPROC
  879. END(machine_check)
  880. #endif
  881. ENTRY(spurious_interrupt_bug)
  882. RING0_INT_FRAME
  883. pushl $0
  884. CFI_ADJUST_CFA_OFFSET 4
  885. pushl $do_spurious_interrupt_bug
  886. CFI_ADJUST_CFA_OFFSET 4
  887. jmp error_code
  888. CFI_ENDPROC
  889. END(spurious_interrupt_bug)
  890. ENTRY(kernel_thread_helper)
  891. pushl $0 # fake return address for unwinder
  892. CFI_STARTPROC
  893. movl %edx,%eax
  894. push %edx
  895. CFI_ADJUST_CFA_OFFSET 4
  896. call *%ebx
  897. push %eax
  898. CFI_ADJUST_CFA_OFFSET 4
  899. call do_exit
  900. ud2 # padding for call trace
  901. CFI_ENDPROC
  902. ENDPROC(kernel_thread_helper)
  903. #ifdef CONFIG_XEN
  904. /* Xen doesn't set %esp to be precisely what the normal sysenter
  905. entrypoint expects, so fix it up before using the normal path. */
  906. ENTRY(xen_sysenter_target)
  907. RING0_INT_FRAME
  908. addl $5*4, %esp /* remove xen-provided frame */
  909. CFI_ADJUST_CFA_OFFSET -5*4
  910. jmp sysenter_past_esp
  911. CFI_ENDPROC
  912. ENTRY(xen_hypervisor_callback)
  913. CFI_STARTPROC
  914. pushl $0
  915. CFI_ADJUST_CFA_OFFSET 4
  916. SAVE_ALL
  917. TRACE_IRQS_OFF
  918. /* Check to see if we got the event in the critical
  919. region in xen_iret_direct, after we've reenabled
  920. events and checked for pending events. This simulates
  921. iret instruction's behaviour where it delivers a
  922. pending interrupt when enabling interrupts. */
  923. movl PT_EIP(%esp),%eax
  924. cmpl $xen_iret_start_crit,%eax
  925. jb 1f
  926. cmpl $xen_iret_end_crit,%eax
  927. jae 1f
  928. jmp xen_iret_crit_fixup
  929. ENTRY(xen_do_upcall)
  930. 1: mov %esp, %eax
  931. call xen_evtchn_do_upcall
  932. jmp ret_from_intr
  933. CFI_ENDPROC
  934. ENDPROC(xen_hypervisor_callback)
  935. # Hypervisor uses this for application faults while it executes.
  936. # We get here for two reasons:
  937. # 1. Fault while reloading DS, ES, FS or GS
  938. # 2. Fault while executing IRET
  939. # Category 1 we fix up by reattempting the load, and zeroing the segment
  940. # register if the load fails.
  941. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  942. # normal Linux return path in this case because if we use the IRET hypercall
  943. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  944. # We distinguish between categories by maintaining a status value in EAX.
  945. ENTRY(xen_failsafe_callback)
  946. CFI_STARTPROC
  947. pushl %eax
  948. CFI_ADJUST_CFA_OFFSET 4
  949. movl $1,%eax
  950. 1: mov 4(%esp),%ds
  951. 2: mov 8(%esp),%es
  952. 3: mov 12(%esp),%fs
  953. 4: mov 16(%esp),%gs
  954. testl %eax,%eax
  955. popl %eax
  956. CFI_ADJUST_CFA_OFFSET -4
  957. lea 16(%esp),%esp
  958. CFI_ADJUST_CFA_OFFSET -16
  959. jz 5f
  960. addl $16,%esp
  961. jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
  962. 5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
  963. CFI_ADJUST_CFA_OFFSET 4
  964. SAVE_ALL
  965. jmp ret_from_exception
  966. CFI_ENDPROC
  967. .section .fixup,"ax"
  968. 6: xorl %eax,%eax
  969. movl %eax,4(%esp)
  970. jmp 1b
  971. 7: xorl %eax,%eax
  972. movl %eax,8(%esp)
  973. jmp 2b
  974. 8: xorl %eax,%eax
  975. movl %eax,12(%esp)
  976. jmp 3b
  977. 9: xorl %eax,%eax
  978. movl %eax,16(%esp)
  979. jmp 4b
  980. .previous
  981. .section __ex_table,"a"
  982. .align 4
  983. .long 1b,6b
  984. .long 2b,7b
  985. .long 3b,8b
  986. .long 4b,9b
  987. .previous
  988. ENDPROC(xen_failsafe_callback)
  989. #endif /* CONFIG_XEN */
  990. #ifdef CONFIG_FUNCTION_TRACER
  991. #ifdef CONFIG_DYNAMIC_FTRACE
  992. ENTRY(mcount)
  993. ret
  994. END(mcount)
  995. ENTRY(ftrace_caller)
  996. cmpl $0, function_trace_stop
  997. jne ftrace_stub
  998. pushl %eax
  999. pushl %ecx
  1000. pushl %edx
  1001. movl 0xc(%esp), %eax
  1002. movl 0x4(%ebp), %edx
  1003. subl $MCOUNT_INSN_SIZE, %eax
  1004. .globl ftrace_call
  1005. ftrace_call:
  1006. call ftrace_stub
  1007. popl %edx
  1008. popl %ecx
  1009. popl %eax
  1010. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1011. .globl ftrace_graph_call
  1012. ftrace_graph_call:
  1013. jmp ftrace_stub
  1014. #endif
  1015. .globl ftrace_stub
  1016. ftrace_stub:
  1017. ret
  1018. END(ftrace_caller)
  1019. #else /* ! CONFIG_DYNAMIC_FTRACE */
  1020. ENTRY(mcount)
  1021. cmpl $0, function_trace_stop
  1022. jne ftrace_stub
  1023. cmpl $ftrace_stub, ftrace_trace_function
  1024. jnz trace
  1025. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1026. cmpl $ftrace_stub, ftrace_graph_return
  1027. jnz ftrace_graph_caller
  1028. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  1029. jnz ftrace_graph_caller
  1030. #endif
  1031. .globl ftrace_stub
  1032. ftrace_stub:
  1033. ret
  1034. /* taken from glibc */
  1035. trace:
  1036. pushl %eax
  1037. pushl %ecx
  1038. pushl %edx
  1039. movl 0xc(%esp), %eax
  1040. movl 0x4(%ebp), %edx
  1041. subl $MCOUNT_INSN_SIZE, %eax
  1042. call *ftrace_trace_function
  1043. popl %edx
  1044. popl %ecx
  1045. popl %eax
  1046. jmp ftrace_stub
  1047. END(mcount)
  1048. #endif /* CONFIG_DYNAMIC_FTRACE */
  1049. #endif /* CONFIG_FUNCTION_TRACER */
  1050. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1051. ENTRY(ftrace_graph_caller)
  1052. cmpl $0, function_trace_stop
  1053. jne ftrace_stub
  1054. pushl %eax
  1055. pushl %ecx
  1056. pushl %edx
  1057. movl 0xc(%esp), %edx
  1058. lea 0x4(%ebp), %eax
  1059. subl $MCOUNT_INSN_SIZE, %edx
  1060. call prepare_ftrace_return
  1061. popl %edx
  1062. popl %ecx
  1063. popl %eax
  1064. ret
  1065. END(ftrace_graph_caller)
  1066. .globl return_to_handler
  1067. return_to_handler:
  1068. pushl $0
  1069. pushl %eax
  1070. pushl %ecx
  1071. pushl %edx
  1072. call ftrace_return_to_handler
  1073. movl %eax, 0xc(%esp)
  1074. popl %edx
  1075. popl %ecx
  1076. popl %eax
  1077. ret
  1078. #endif
  1079. .section .rodata,"a"
  1080. #include "syscall_table_32.S"
  1081. syscall_table_size=(.-sys_call_table)
  1082. /*
  1083. * Some functions should be protected against kprobes
  1084. */
  1085. .pushsection .kprobes.text, "ax"
  1086. ENTRY(page_fault)
  1087. RING0_EC_FRAME
  1088. pushl $do_page_fault
  1089. CFI_ADJUST_CFA_OFFSET 4
  1090. ALIGN
  1091. error_code:
  1092. /* the function address is in %gs's slot on the stack */
  1093. pushl %fs
  1094. CFI_ADJUST_CFA_OFFSET 4
  1095. /*CFI_REL_OFFSET fs, 0*/
  1096. pushl %es
  1097. CFI_ADJUST_CFA_OFFSET 4
  1098. /*CFI_REL_OFFSET es, 0*/
  1099. pushl %ds
  1100. CFI_ADJUST_CFA_OFFSET 4
  1101. /*CFI_REL_OFFSET ds, 0*/
  1102. pushl %eax
  1103. CFI_ADJUST_CFA_OFFSET 4
  1104. CFI_REL_OFFSET eax, 0
  1105. pushl %ebp
  1106. CFI_ADJUST_CFA_OFFSET 4
  1107. CFI_REL_OFFSET ebp, 0
  1108. pushl %edi
  1109. CFI_ADJUST_CFA_OFFSET 4
  1110. CFI_REL_OFFSET edi, 0
  1111. pushl %esi
  1112. CFI_ADJUST_CFA_OFFSET 4
  1113. CFI_REL_OFFSET esi, 0
  1114. pushl %edx
  1115. CFI_ADJUST_CFA_OFFSET 4
  1116. CFI_REL_OFFSET edx, 0
  1117. pushl %ecx
  1118. CFI_ADJUST_CFA_OFFSET 4
  1119. CFI_REL_OFFSET ecx, 0
  1120. pushl %ebx
  1121. CFI_ADJUST_CFA_OFFSET 4
  1122. CFI_REL_OFFSET ebx, 0
  1123. cld
  1124. movl $(__KERNEL_PERCPU), %ecx
  1125. movl %ecx, %fs
  1126. UNWIND_ESPFIX_STACK
  1127. GS_TO_REG %ecx
  1128. movl PT_GS(%esp), %edi # get the function address
  1129. movl PT_ORIG_EAX(%esp), %edx # get the error code
  1130. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  1131. REG_TO_PTGS %ecx
  1132. SET_KERNEL_GS %ecx
  1133. movl $(__USER_DS), %ecx
  1134. movl %ecx, %ds
  1135. movl %ecx, %es
  1136. TRACE_IRQS_OFF
  1137. movl %esp,%eax # pt_regs pointer
  1138. call *%edi
  1139. jmp ret_from_exception
  1140. CFI_ENDPROC
  1141. END(page_fault)
  1142. /*
  1143. * Debug traps and NMI can happen at the one SYSENTER instruction
  1144. * that sets up the real kernel stack. Check here, since we can't
  1145. * allow the wrong stack to be used.
  1146. *
  1147. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  1148. * already pushed 3 words if it hits on the sysenter instruction:
  1149. * eflags, cs and eip.
  1150. *
  1151. * We just load the right stack, and push the three (known) values
  1152. * by hand onto the new stack - while updating the return eip past
  1153. * the instruction that would have done it for sysenter.
  1154. */
  1155. .macro FIX_STACK offset ok label
  1156. cmpw $__KERNEL_CS, 4(%esp)
  1157. jne \ok
  1158. \label:
  1159. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  1160. CFI_DEF_CFA esp, 0
  1161. CFI_UNDEFINED eip
  1162. pushfl
  1163. CFI_ADJUST_CFA_OFFSET 4
  1164. pushl $__KERNEL_CS
  1165. CFI_ADJUST_CFA_OFFSET 4
  1166. pushl $sysenter_past_esp
  1167. CFI_ADJUST_CFA_OFFSET 4
  1168. CFI_REL_OFFSET eip, 0
  1169. .endm
  1170. ENTRY(debug)
  1171. RING0_INT_FRAME
  1172. cmpl $ia32_sysenter_target,(%esp)
  1173. jne debug_stack_correct
  1174. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  1175. debug_stack_correct:
  1176. pushl $-1 # mark this as an int
  1177. CFI_ADJUST_CFA_OFFSET 4
  1178. SAVE_ALL
  1179. TRACE_IRQS_OFF
  1180. xorl %edx,%edx # error code 0
  1181. movl %esp,%eax # pt_regs pointer
  1182. call do_debug
  1183. jmp ret_from_exception
  1184. CFI_ENDPROC
  1185. END(debug)
  1186. /*
  1187. * NMI is doubly nasty. It can happen _while_ we're handling
  1188. * a debug fault, and the debug fault hasn't yet been able to
  1189. * clear up the stack. So we first check whether we got an
  1190. * NMI on the sysenter entry path, but after that we need to
  1191. * check whether we got an NMI on the debug path where the debug
  1192. * fault happened on the sysenter path.
  1193. */
  1194. ENTRY(nmi)
  1195. RING0_INT_FRAME
  1196. pushl %eax
  1197. CFI_ADJUST_CFA_OFFSET 4
  1198. movl %ss, %eax
  1199. cmpw $__ESPFIX_SS, %ax
  1200. popl %eax
  1201. CFI_ADJUST_CFA_OFFSET -4
  1202. je nmi_espfix_stack
  1203. cmpl $ia32_sysenter_target,(%esp)
  1204. je nmi_stack_fixup
  1205. pushl %eax
  1206. CFI_ADJUST_CFA_OFFSET 4
  1207. movl %esp,%eax
  1208. /* Do not access memory above the end of our stack page,
  1209. * it might not exist.
  1210. */
  1211. andl $(THREAD_SIZE-1),%eax
  1212. cmpl $(THREAD_SIZE-20),%eax
  1213. popl %eax
  1214. CFI_ADJUST_CFA_OFFSET -4
  1215. jae nmi_stack_correct
  1216. cmpl $ia32_sysenter_target,12(%esp)
  1217. je nmi_debug_stack_check
  1218. nmi_stack_correct:
  1219. /* We have a RING0_INT_FRAME here */
  1220. pushl %eax
  1221. CFI_ADJUST_CFA_OFFSET 4
  1222. SAVE_ALL
  1223. xorl %edx,%edx # zero error code
  1224. movl %esp,%eax # pt_regs pointer
  1225. call do_nmi
  1226. jmp restore_nocheck_notrace
  1227. CFI_ENDPROC
  1228. nmi_stack_fixup:
  1229. RING0_INT_FRAME
  1230. FIX_STACK 12, nmi_stack_correct, 1
  1231. jmp nmi_stack_correct
  1232. nmi_debug_stack_check:
  1233. /* We have a RING0_INT_FRAME here */
  1234. cmpw $__KERNEL_CS,16(%esp)
  1235. jne nmi_stack_correct
  1236. cmpl $debug,(%esp)
  1237. jb nmi_stack_correct
  1238. cmpl $debug_esp_fix_insn,(%esp)
  1239. ja nmi_stack_correct
  1240. FIX_STACK 24, nmi_stack_correct, 1
  1241. jmp nmi_stack_correct
  1242. nmi_espfix_stack:
  1243. /* We have a RING0_INT_FRAME here.
  1244. *
  1245. * create the pointer to lss back
  1246. */
  1247. pushl %ss
  1248. CFI_ADJUST_CFA_OFFSET 4
  1249. pushl %esp
  1250. CFI_ADJUST_CFA_OFFSET 4
  1251. addl $4, (%esp)
  1252. /* copy the iret frame of 12 bytes */
  1253. .rept 3
  1254. pushl 16(%esp)
  1255. CFI_ADJUST_CFA_OFFSET 4
  1256. .endr
  1257. pushl %eax
  1258. CFI_ADJUST_CFA_OFFSET 4
  1259. SAVE_ALL
  1260. FIXUP_ESPFIX_STACK # %eax == %esp
  1261. xorl %edx,%edx # zero error code
  1262. call do_nmi
  1263. RESTORE_REGS
  1264. lss 12+4(%esp), %esp # back to espfix stack
  1265. CFI_ADJUST_CFA_OFFSET -24
  1266. jmp irq_return
  1267. CFI_ENDPROC
  1268. END(nmi)
  1269. ENTRY(int3)
  1270. RING0_INT_FRAME
  1271. pushl $-1 # mark this as an int
  1272. CFI_ADJUST_CFA_OFFSET 4
  1273. SAVE_ALL
  1274. TRACE_IRQS_OFF
  1275. xorl %edx,%edx # zero error code
  1276. movl %esp,%eax # pt_regs pointer
  1277. call do_int3
  1278. jmp ret_from_exception
  1279. CFI_ENDPROC
  1280. END(int3)
  1281. ENTRY(general_protection)
  1282. RING0_EC_FRAME
  1283. pushl $do_general_protection
  1284. CFI_ADJUST_CFA_OFFSET 4
  1285. jmp error_code
  1286. CFI_ENDPROC
  1287. END(general_protection)
  1288. /*
  1289. * End of kprobes section
  1290. */
  1291. .popsection