entry_32.S 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - orig_eax
  33. * 2C(%esp) - %eip
  34. * 30(%esp) - %cs
  35. * 34(%esp) - %eflags
  36. * 38(%esp) - %oldesp
  37. * 3C(%esp) - %oldss
  38. *
  39. * "current" is in register %ebx during any slow entries.
  40. */
  41. #include <linux/linkage.h>
  42. #include <asm/thread_info.h>
  43. #include <asm/irqflags.h>
  44. #include <asm/errno.h>
  45. #include <asm/segment.h>
  46. #include <asm/smp.h>
  47. #include <asm/page.h>
  48. #include <asm/desc.h>
  49. #include <asm/percpu.h>
  50. #include <asm/dwarf2.h>
  51. #include <asm/processor-flags.h>
  52. #include <asm/ftrace.h>
  53. #include <asm/irq_vectors.h>
  54. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  55. #include <linux/elf-em.h>
  56. #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
  57. #define __AUDIT_ARCH_LE 0x40000000
  58. #ifndef CONFIG_AUDITSYSCALL
  59. #define sysenter_audit syscall_trace_entry
  60. #define sysexit_audit syscall_exit_work
  61. #endif
  62. /*
  63. * We use macros for low-level operations which need to be overridden
  64. * for paravirtualization. The following will never clobber any registers:
  65. * INTERRUPT_RETURN (aka. "iret")
  66. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  67. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  68. *
  69. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  70. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  71. * Allowing a register to be clobbered can shrink the paravirt replacement
  72. * enough to patch inline, increasing performance.
  73. */
  74. #define nr_syscalls ((syscall_table_size)/4)
  75. #ifdef CONFIG_PREEMPT
  76. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  77. #else
  78. #define preempt_stop(clobbers)
  79. #define resume_kernel restore_nocheck
  80. #endif
  81. .macro TRACE_IRQS_IRET
  82. #ifdef CONFIG_TRACE_IRQFLAGS
  83. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  84. jz 1f
  85. TRACE_IRQS_ON
  86. 1:
  87. #endif
  88. .endm
  89. #ifdef CONFIG_VM86
  90. #define resume_userspace_sig check_userspace
  91. #else
  92. #define resume_userspace_sig resume_userspace
  93. #endif
  94. .macro SAVE_ALL
  95. cld
  96. pushl %fs
  97. CFI_ADJUST_CFA_OFFSET 4
  98. /*CFI_REL_OFFSET fs, 0;*/
  99. pushl %es
  100. CFI_ADJUST_CFA_OFFSET 4
  101. /*CFI_REL_OFFSET es, 0;*/
  102. pushl %ds
  103. CFI_ADJUST_CFA_OFFSET 4
  104. /*CFI_REL_OFFSET ds, 0;*/
  105. pushl %eax
  106. CFI_ADJUST_CFA_OFFSET 4
  107. CFI_REL_OFFSET eax, 0
  108. pushl %ebp
  109. CFI_ADJUST_CFA_OFFSET 4
  110. CFI_REL_OFFSET ebp, 0
  111. pushl %edi
  112. CFI_ADJUST_CFA_OFFSET 4
  113. CFI_REL_OFFSET edi, 0
  114. pushl %esi
  115. CFI_ADJUST_CFA_OFFSET 4
  116. CFI_REL_OFFSET esi, 0
  117. pushl %edx
  118. CFI_ADJUST_CFA_OFFSET 4
  119. CFI_REL_OFFSET edx, 0
  120. pushl %ecx
  121. CFI_ADJUST_CFA_OFFSET 4
  122. CFI_REL_OFFSET ecx, 0
  123. pushl %ebx
  124. CFI_ADJUST_CFA_OFFSET 4
  125. CFI_REL_OFFSET ebx, 0
  126. movl $(__USER_DS), %edx
  127. movl %edx, %ds
  128. movl %edx, %es
  129. movl $(__KERNEL_PERCPU), %edx
  130. movl %edx, %fs
  131. .endm
  132. .macro RESTORE_INT_REGS
  133. popl %ebx
  134. CFI_ADJUST_CFA_OFFSET -4
  135. CFI_RESTORE ebx
  136. popl %ecx
  137. CFI_ADJUST_CFA_OFFSET -4
  138. CFI_RESTORE ecx
  139. popl %edx
  140. CFI_ADJUST_CFA_OFFSET -4
  141. CFI_RESTORE edx
  142. popl %esi
  143. CFI_ADJUST_CFA_OFFSET -4
  144. CFI_RESTORE esi
  145. popl %edi
  146. CFI_ADJUST_CFA_OFFSET -4
  147. CFI_RESTORE edi
  148. popl %ebp
  149. CFI_ADJUST_CFA_OFFSET -4
  150. CFI_RESTORE ebp
  151. popl %eax
  152. CFI_ADJUST_CFA_OFFSET -4
  153. CFI_RESTORE eax
  154. .endm
  155. .macro RESTORE_REGS
  156. RESTORE_INT_REGS
  157. 1: popl %ds
  158. CFI_ADJUST_CFA_OFFSET -4
  159. /*CFI_RESTORE ds;*/
  160. 2: popl %es
  161. CFI_ADJUST_CFA_OFFSET -4
  162. /*CFI_RESTORE es;*/
  163. 3: popl %fs
  164. CFI_ADJUST_CFA_OFFSET -4
  165. /*CFI_RESTORE fs;*/
  166. .pushsection .fixup, "ax"
  167. 4: movl $0, (%esp)
  168. jmp 1b
  169. 5: movl $0, (%esp)
  170. jmp 2b
  171. 6: movl $0, (%esp)
  172. jmp 3b
  173. .section __ex_table, "a"
  174. .align 4
  175. .long 1b, 4b
  176. .long 2b, 5b
  177. .long 3b, 6b
  178. .popsection
  179. .endm
  180. .macro RING0_INT_FRAME
  181. CFI_STARTPROC simple
  182. CFI_SIGNAL_FRAME
  183. CFI_DEF_CFA esp, 3*4
  184. /*CFI_OFFSET cs, -2*4;*/
  185. CFI_OFFSET eip, -3*4
  186. .endm
  187. .macro RING0_EC_FRAME
  188. CFI_STARTPROC simple
  189. CFI_SIGNAL_FRAME
  190. CFI_DEF_CFA esp, 4*4
  191. /*CFI_OFFSET cs, -2*4;*/
  192. CFI_OFFSET eip, -3*4
  193. .endm
  194. .macro RING0_PTREGS_FRAME
  195. CFI_STARTPROC simple
  196. CFI_SIGNAL_FRAME
  197. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
  198. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
  199. CFI_OFFSET eip, PT_EIP-PT_OLDESP
  200. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
  201. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
  202. CFI_OFFSET eax, PT_EAX-PT_OLDESP
  203. CFI_OFFSET ebp, PT_EBP-PT_OLDESP
  204. CFI_OFFSET edi, PT_EDI-PT_OLDESP
  205. CFI_OFFSET esi, PT_ESI-PT_OLDESP
  206. CFI_OFFSET edx, PT_EDX-PT_OLDESP
  207. CFI_OFFSET ecx, PT_ECX-PT_OLDESP
  208. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  209. .endm
  210. ENTRY(ret_from_fork)
  211. CFI_STARTPROC
  212. pushl %eax
  213. CFI_ADJUST_CFA_OFFSET 4
  214. call schedule_tail
  215. GET_THREAD_INFO(%ebp)
  216. popl %eax
  217. CFI_ADJUST_CFA_OFFSET -4
  218. pushl $0x0202 # Reset kernel eflags
  219. CFI_ADJUST_CFA_OFFSET 4
  220. popfl
  221. CFI_ADJUST_CFA_OFFSET -4
  222. jmp syscall_exit
  223. CFI_ENDPROC
  224. END(ret_from_fork)
  225. /*
  226. * Return to user mode is not as complex as all this looks,
  227. * but we want the default path for a system call return to
  228. * go as quickly as possible which is why some of this is
  229. * less clear than it otherwise should be.
  230. */
  231. # userspace resumption stub bypassing syscall exit tracing
  232. ALIGN
  233. RING0_PTREGS_FRAME
  234. ret_from_exception:
  235. preempt_stop(CLBR_ANY)
  236. ret_from_intr:
  237. GET_THREAD_INFO(%ebp)
  238. check_userspace:
  239. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  240. movb PT_CS(%esp), %al
  241. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  242. cmpl $USER_RPL, %eax
  243. jb resume_kernel # not returning to v8086 or userspace
  244. ENTRY(resume_userspace)
  245. LOCKDEP_SYS_EXIT
  246. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  247. # setting need_resched or sigpending
  248. # between sampling and the iret
  249. TRACE_IRQS_OFF
  250. movl TI_flags(%ebp), %ecx
  251. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  252. # int/exception return?
  253. jne work_pending
  254. jmp restore_all
  255. END(ret_from_exception)
  256. #ifdef CONFIG_PREEMPT
  257. ENTRY(resume_kernel)
  258. DISABLE_INTERRUPTS(CLBR_ANY)
  259. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  260. jnz restore_nocheck
  261. need_resched:
  262. movl TI_flags(%ebp), %ecx # need_resched set ?
  263. testb $_TIF_NEED_RESCHED, %cl
  264. jz restore_all
  265. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  266. jz restore_all
  267. call preempt_schedule_irq
  268. jmp need_resched
  269. END(resume_kernel)
  270. #endif
  271. CFI_ENDPROC
  272. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  273. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  274. # sysenter call handler stub
  275. ENTRY(ia32_sysenter_target)
  276. CFI_STARTPROC simple
  277. CFI_SIGNAL_FRAME
  278. CFI_DEF_CFA esp, 0
  279. CFI_REGISTER esp, ebp
  280. movl TSS_sysenter_sp0(%esp),%esp
  281. sysenter_past_esp:
  282. /*
  283. * Interrupts are disabled here, but we can't trace it until
  284. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  285. * we immediately enable interrupts at that point anyway.
  286. */
  287. pushl $(__USER_DS)
  288. CFI_ADJUST_CFA_OFFSET 4
  289. /*CFI_REL_OFFSET ss, 0*/
  290. pushl %ebp
  291. CFI_ADJUST_CFA_OFFSET 4
  292. CFI_REL_OFFSET esp, 0
  293. pushfl
  294. orl $X86_EFLAGS_IF, (%esp)
  295. CFI_ADJUST_CFA_OFFSET 4
  296. pushl $(__USER_CS)
  297. CFI_ADJUST_CFA_OFFSET 4
  298. /*CFI_REL_OFFSET cs, 0*/
  299. /*
  300. * Push current_thread_info()->sysenter_return to the stack.
  301. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  302. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  303. */
  304. pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
  305. CFI_ADJUST_CFA_OFFSET 4
  306. CFI_REL_OFFSET eip, 0
  307. pushl %eax
  308. CFI_ADJUST_CFA_OFFSET 4
  309. SAVE_ALL
  310. ENABLE_INTERRUPTS(CLBR_NONE)
  311. /*
  312. * Load the potential sixth argument from user stack.
  313. * Careful about security.
  314. */
  315. cmpl $__PAGE_OFFSET-3,%ebp
  316. jae syscall_fault
  317. 1: movl (%ebp),%ebp
  318. movl %ebp,PT_EBP(%esp)
  319. .section __ex_table,"a"
  320. .align 4
  321. .long 1b,syscall_fault
  322. .previous
  323. GET_THREAD_INFO(%ebp)
  324. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  325. testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  326. jnz sysenter_audit
  327. sysenter_do_call:
  328. cmpl $(nr_syscalls), %eax
  329. jae syscall_badsys
  330. call *sys_call_table(,%eax,4)
  331. movl %eax,PT_EAX(%esp)
  332. LOCKDEP_SYS_EXIT
  333. DISABLE_INTERRUPTS(CLBR_ANY)
  334. TRACE_IRQS_OFF
  335. movl TI_flags(%ebp), %ecx
  336. testw $_TIF_ALLWORK_MASK, %cx
  337. jne sysexit_audit
  338. sysenter_exit:
  339. /* if something modifies registers it must also disable sysexit */
  340. movl PT_EIP(%esp), %edx
  341. movl PT_OLDESP(%esp), %ecx
  342. xorl %ebp,%ebp
  343. TRACE_IRQS_ON
  344. 1: mov PT_FS(%esp), %fs
  345. ENABLE_INTERRUPTS_SYSEXIT
  346. #ifdef CONFIG_AUDITSYSCALL
  347. sysenter_audit:
  348. testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  349. jnz syscall_trace_entry
  350. addl $4,%esp
  351. CFI_ADJUST_CFA_OFFSET -4
  352. /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
  353. /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
  354. /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
  355. movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
  356. movl %eax,%edx /* 2nd arg: syscall number */
  357. movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
  358. call audit_syscall_entry
  359. pushl %ebx
  360. CFI_ADJUST_CFA_OFFSET 4
  361. movl PT_EAX(%esp),%eax /* reload syscall number */
  362. jmp sysenter_do_call
  363. sysexit_audit:
  364. testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
  365. jne syscall_exit_work
  366. TRACE_IRQS_ON
  367. ENABLE_INTERRUPTS(CLBR_ANY)
  368. movl %eax,%edx /* second arg, syscall return value */
  369. cmpl $0,%eax /* is it < 0? */
  370. setl %al /* 1 if so, 0 if not */
  371. movzbl %al,%eax /* zero-extend that */
  372. inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
  373. call audit_syscall_exit
  374. DISABLE_INTERRUPTS(CLBR_ANY)
  375. TRACE_IRQS_OFF
  376. movl TI_flags(%ebp), %ecx
  377. testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
  378. jne syscall_exit_work
  379. movl PT_EAX(%esp),%eax /* reload syscall return value */
  380. jmp sysenter_exit
  381. #endif
  382. CFI_ENDPROC
  383. .pushsection .fixup,"ax"
  384. 2: movl $0,PT_FS(%esp)
  385. jmp 1b
  386. .section __ex_table,"a"
  387. .align 4
  388. .long 1b,2b
  389. .popsection
  390. ENDPROC(ia32_sysenter_target)
  391. # system call handler stub
  392. ENTRY(system_call)
  393. RING0_INT_FRAME # can't unwind into user space anyway
  394. pushl %eax # save orig_eax
  395. CFI_ADJUST_CFA_OFFSET 4
  396. SAVE_ALL
  397. GET_THREAD_INFO(%ebp)
  398. # system call tracing in operation / emulation
  399. /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
  400. testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  401. jnz syscall_trace_entry
  402. cmpl $(nr_syscalls), %eax
  403. jae syscall_badsys
  404. syscall_call:
  405. call *sys_call_table(,%eax,4)
  406. movl %eax,PT_EAX(%esp) # store the return value
  407. syscall_exit:
  408. LOCKDEP_SYS_EXIT
  409. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  410. # setting need_resched or sigpending
  411. # between sampling and the iret
  412. TRACE_IRQS_OFF
  413. movl TI_flags(%ebp), %ecx
  414. testw $_TIF_ALLWORK_MASK, %cx # current->work
  415. jne syscall_exit_work
  416. restore_all:
  417. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  418. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  419. # are returning to the kernel.
  420. # See comments in process.c:copy_thread() for details.
  421. movb PT_OLDSS(%esp), %ah
  422. movb PT_CS(%esp), %al
  423. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  424. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  425. CFI_REMEMBER_STATE
  426. je ldt_ss # returning to user-space with LDT SS
  427. restore_nocheck:
  428. TRACE_IRQS_IRET
  429. restore_nocheck_notrace:
  430. RESTORE_REGS
  431. addl $4, %esp # skip orig_eax/error_code
  432. CFI_ADJUST_CFA_OFFSET -4
  433. irq_return:
  434. INTERRUPT_RETURN
  435. .section .fixup,"ax"
  436. ENTRY(iret_exc)
  437. pushl $0 # no error code
  438. pushl $do_iret_error
  439. jmp error_code
  440. .previous
  441. .section __ex_table,"a"
  442. .align 4
  443. .long irq_return,iret_exc
  444. .previous
  445. CFI_RESTORE_STATE
  446. ldt_ss:
  447. larl PT_OLDSS(%esp), %eax
  448. jnz restore_nocheck
  449. testl $0x00400000, %eax # returning to 32bit stack?
  450. jnz restore_nocheck # allright, normal return
  451. #ifdef CONFIG_PARAVIRT
  452. /*
  453. * The kernel can't run on a non-flat stack if paravirt mode
  454. * is active. Rather than try to fixup the high bits of
  455. * ESP, bypass this code entirely. This may break DOSemu
  456. * and/or Wine support in a paravirt VM, although the option
  457. * is still available to implement the setting of the high
  458. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  459. */
  460. cmpl $0, pv_info+PARAVIRT_enabled
  461. jne restore_nocheck
  462. #endif
  463. /* If returning to userspace with 16bit stack,
  464. * try to fix the higher word of ESP, as the CPU
  465. * won't restore it.
  466. * This is an "official" bug of all the x86-compatible
  467. * CPUs, which we can try to work around to make
  468. * dosemu and wine happy. */
  469. movl PT_OLDESP(%esp), %eax
  470. movl %esp, %edx
  471. call patch_espfix_desc
  472. pushl $__ESPFIX_SS
  473. CFI_ADJUST_CFA_OFFSET 4
  474. pushl %eax
  475. CFI_ADJUST_CFA_OFFSET 4
  476. DISABLE_INTERRUPTS(CLBR_EAX)
  477. TRACE_IRQS_OFF
  478. lss (%esp), %esp
  479. CFI_ADJUST_CFA_OFFSET -8
  480. jmp restore_nocheck
  481. CFI_ENDPROC
  482. ENDPROC(system_call)
  483. # perform work that needs to be done immediately before resumption
  484. ALIGN
  485. RING0_PTREGS_FRAME # can't unwind into user space anyway
  486. work_pending:
  487. testb $_TIF_NEED_RESCHED, %cl
  488. jz work_notifysig
  489. work_resched:
  490. call schedule
  491. LOCKDEP_SYS_EXIT
  492. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  493. # setting need_resched or sigpending
  494. # between sampling and the iret
  495. TRACE_IRQS_OFF
  496. movl TI_flags(%ebp), %ecx
  497. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  498. # than syscall tracing?
  499. jz restore_all
  500. testb $_TIF_NEED_RESCHED, %cl
  501. jnz work_resched
  502. work_notifysig: # deal with pending signals and
  503. # notify-resume requests
  504. #ifdef CONFIG_VM86
  505. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  506. movl %esp, %eax
  507. jne work_notifysig_v86 # returning to kernel-space or
  508. # vm86-space
  509. xorl %edx, %edx
  510. call do_notify_resume
  511. jmp resume_userspace_sig
  512. ALIGN
  513. work_notifysig_v86:
  514. pushl %ecx # save ti_flags for do_notify_resume
  515. CFI_ADJUST_CFA_OFFSET 4
  516. call save_v86_state # %eax contains pt_regs pointer
  517. popl %ecx
  518. CFI_ADJUST_CFA_OFFSET -4
  519. movl %eax, %esp
  520. #else
  521. movl %esp, %eax
  522. #endif
  523. xorl %edx, %edx
  524. call do_notify_resume
  525. jmp resume_userspace_sig
  526. END(work_pending)
  527. # perform syscall exit tracing
  528. ALIGN
  529. syscall_trace_entry:
  530. movl $-ENOSYS,PT_EAX(%esp)
  531. movl %esp, %eax
  532. call syscall_trace_enter
  533. /* What it returned is what we'll actually use. */
  534. cmpl $(nr_syscalls), %eax
  535. jnae syscall_call
  536. jmp syscall_exit
  537. END(syscall_trace_entry)
  538. # perform syscall exit tracing
  539. ALIGN
  540. syscall_exit_work:
  541. testb $_TIF_WORK_SYSCALL_EXIT, %cl
  542. jz work_pending
  543. TRACE_IRQS_ON
  544. ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
  545. # schedule() instead
  546. movl %esp, %eax
  547. call syscall_trace_leave
  548. jmp resume_userspace
  549. END(syscall_exit_work)
  550. CFI_ENDPROC
  551. RING0_INT_FRAME # can't unwind into user space anyway
  552. syscall_fault:
  553. GET_THREAD_INFO(%ebp)
  554. movl $-EFAULT,PT_EAX(%esp)
  555. jmp resume_userspace
  556. END(syscall_fault)
  557. syscall_badsys:
  558. movl $-ENOSYS,PT_EAX(%esp)
  559. jmp resume_userspace
  560. END(syscall_badsys)
  561. CFI_ENDPROC
  562. .macro FIXUP_ESPFIX_STACK
  563. /* since we are on a wrong stack, we cant make it a C code :( */
  564. PER_CPU(gdt_page, %ebx)
  565. GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
  566. addl %esp, %eax
  567. pushl $__KERNEL_DS
  568. CFI_ADJUST_CFA_OFFSET 4
  569. pushl %eax
  570. CFI_ADJUST_CFA_OFFSET 4
  571. lss (%esp), %esp
  572. CFI_ADJUST_CFA_OFFSET -8
  573. .endm
  574. .macro UNWIND_ESPFIX_STACK
  575. movl %ss, %eax
  576. /* see if on espfix stack */
  577. cmpw $__ESPFIX_SS, %ax
  578. jne 27f
  579. movl $__KERNEL_DS, %eax
  580. movl %eax, %ds
  581. movl %eax, %es
  582. /* switch to normal stack */
  583. FIXUP_ESPFIX_STACK
  584. 27:
  585. .endm
  586. /*
  587. * Build the entry stubs and pointer table with some assembler magic.
  588. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  589. * single cache line on all modern x86 implementations.
  590. */
  591. .section .init.rodata,"a"
  592. ENTRY(interrupt)
  593. .text
  594. .p2align 5
  595. .p2align CONFIG_X86_L1_CACHE_SHIFT
  596. ENTRY(irq_entries_start)
  597. RING0_INT_FRAME
  598. vector=FIRST_EXTERNAL_VECTOR
  599. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  600. .balign 32
  601. .rept 7
  602. .if vector < NR_VECTORS
  603. .if vector <> FIRST_EXTERNAL_VECTOR
  604. CFI_ADJUST_CFA_OFFSET -4
  605. .endif
  606. 1: pushl $(~vector+0x80) /* Note: always in signed byte range */
  607. CFI_ADJUST_CFA_OFFSET 4
  608. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  609. jmp 2f
  610. .endif
  611. .previous
  612. .long 1b
  613. .text
  614. vector=vector+1
  615. .endif
  616. .endr
  617. 2: jmp common_interrupt
  618. .endr
  619. END(irq_entries_start)
  620. .previous
  621. END(interrupt)
  622. .previous
  623. /*
  624. * the CPU automatically disables interrupts when executing an IRQ vector,
  625. * so IRQ-flags tracing has to follow that:
  626. */
  627. .p2align CONFIG_X86_L1_CACHE_SHIFT
  628. common_interrupt:
  629. addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
  630. SAVE_ALL
  631. TRACE_IRQS_OFF
  632. movl %esp,%eax
  633. call do_IRQ
  634. jmp ret_from_intr
  635. ENDPROC(common_interrupt)
  636. CFI_ENDPROC
  637. #define BUILD_INTERRUPT3(name, nr, fn) \
  638. ENTRY(name) \
  639. RING0_INT_FRAME; \
  640. pushl $~(nr); \
  641. CFI_ADJUST_CFA_OFFSET 4; \
  642. SAVE_ALL; \
  643. TRACE_IRQS_OFF \
  644. movl %esp,%eax; \
  645. call fn; \
  646. jmp ret_from_intr; \
  647. CFI_ENDPROC; \
  648. ENDPROC(name)
  649. #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
  650. /* The include is where all of the SMP etc. interrupts come from */
  651. #include "entry_arch.h"
  652. ENTRY(coprocessor_error)
  653. RING0_INT_FRAME
  654. pushl $0
  655. CFI_ADJUST_CFA_OFFSET 4
  656. pushl $do_coprocessor_error
  657. CFI_ADJUST_CFA_OFFSET 4
  658. jmp error_code
  659. CFI_ENDPROC
  660. END(coprocessor_error)
  661. ENTRY(simd_coprocessor_error)
  662. RING0_INT_FRAME
  663. pushl $0
  664. CFI_ADJUST_CFA_OFFSET 4
  665. pushl $do_simd_coprocessor_error
  666. CFI_ADJUST_CFA_OFFSET 4
  667. jmp error_code
  668. CFI_ENDPROC
  669. END(simd_coprocessor_error)
  670. ENTRY(device_not_available)
  671. RING0_INT_FRAME
  672. pushl $-1 # mark this as an int
  673. CFI_ADJUST_CFA_OFFSET 4
  674. pushl $do_device_not_available
  675. CFI_ADJUST_CFA_OFFSET 4
  676. jmp error_code
  677. CFI_ENDPROC
  678. END(device_not_available)
  679. #ifdef CONFIG_PARAVIRT
  680. ENTRY(native_iret)
  681. iret
  682. .section __ex_table,"a"
  683. .align 4
  684. .long native_iret, iret_exc
  685. .previous
  686. END(native_iret)
  687. ENTRY(native_irq_enable_sysexit)
  688. sti
  689. sysexit
  690. END(native_irq_enable_sysexit)
  691. #endif
  692. ENTRY(overflow)
  693. RING0_INT_FRAME
  694. pushl $0
  695. CFI_ADJUST_CFA_OFFSET 4
  696. pushl $do_overflow
  697. CFI_ADJUST_CFA_OFFSET 4
  698. jmp error_code
  699. CFI_ENDPROC
  700. END(overflow)
  701. ENTRY(bounds)
  702. RING0_INT_FRAME
  703. pushl $0
  704. CFI_ADJUST_CFA_OFFSET 4
  705. pushl $do_bounds
  706. CFI_ADJUST_CFA_OFFSET 4
  707. jmp error_code
  708. CFI_ENDPROC
  709. END(bounds)
  710. ENTRY(invalid_op)
  711. RING0_INT_FRAME
  712. pushl $0
  713. CFI_ADJUST_CFA_OFFSET 4
  714. pushl $do_invalid_op
  715. CFI_ADJUST_CFA_OFFSET 4
  716. jmp error_code
  717. CFI_ENDPROC
  718. END(invalid_op)
  719. ENTRY(coprocessor_segment_overrun)
  720. RING0_INT_FRAME
  721. pushl $0
  722. CFI_ADJUST_CFA_OFFSET 4
  723. pushl $do_coprocessor_segment_overrun
  724. CFI_ADJUST_CFA_OFFSET 4
  725. jmp error_code
  726. CFI_ENDPROC
  727. END(coprocessor_segment_overrun)
  728. ENTRY(invalid_TSS)
  729. RING0_EC_FRAME
  730. pushl $do_invalid_TSS
  731. CFI_ADJUST_CFA_OFFSET 4
  732. jmp error_code
  733. CFI_ENDPROC
  734. END(invalid_TSS)
  735. ENTRY(segment_not_present)
  736. RING0_EC_FRAME
  737. pushl $do_segment_not_present
  738. CFI_ADJUST_CFA_OFFSET 4
  739. jmp error_code
  740. CFI_ENDPROC
  741. END(segment_not_present)
  742. ENTRY(stack_segment)
  743. RING0_EC_FRAME
  744. pushl $do_stack_segment
  745. CFI_ADJUST_CFA_OFFSET 4
  746. jmp error_code
  747. CFI_ENDPROC
  748. END(stack_segment)
  749. ENTRY(alignment_check)
  750. RING0_EC_FRAME
  751. pushl $do_alignment_check
  752. CFI_ADJUST_CFA_OFFSET 4
  753. jmp error_code
  754. CFI_ENDPROC
  755. END(alignment_check)
  756. ENTRY(divide_error)
  757. RING0_INT_FRAME
  758. pushl $0 # no error code
  759. CFI_ADJUST_CFA_OFFSET 4
  760. pushl $do_divide_error
  761. CFI_ADJUST_CFA_OFFSET 4
  762. jmp error_code
  763. CFI_ENDPROC
  764. END(divide_error)
  765. #ifdef CONFIG_X86_MCE
  766. ENTRY(machine_check)
  767. RING0_INT_FRAME
  768. pushl $0
  769. CFI_ADJUST_CFA_OFFSET 4
  770. pushl machine_check_vector
  771. CFI_ADJUST_CFA_OFFSET 4
  772. jmp error_code
  773. CFI_ENDPROC
  774. END(machine_check)
  775. #endif
  776. ENTRY(spurious_interrupt_bug)
  777. RING0_INT_FRAME
  778. pushl $0
  779. CFI_ADJUST_CFA_OFFSET 4
  780. pushl $do_spurious_interrupt_bug
  781. CFI_ADJUST_CFA_OFFSET 4
  782. jmp error_code
  783. CFI_ENDPROC
  784. END(spurious_interrupt_bug)
  785. ENTRY(kernel_thread_helper)
  786. pushl $0 # fake return address for unwinder
  787. CFI_STARTPROC
  788. movl %edx,%eax
  789. push %edx
  790. CFI_ADJUST_CFA_OFFSET 4
  791. call *%ebx
  792. push %eax
  793. CFI_ADJUST_CFA_OFFSET 4
  794. call do_exit
  795. ud2 # padding for call trace
  796. CFI_ENDPROC
  797. ENDPROC(kernel_thread_helper)
  798. #ifdef CONFIG_XEN
  799. /* Xen doesn't set %esp to be precisely what the normal sysenter
  800. entrypoint expects, so fix it up before using the normal path. */
  801. ENTRY(xen_sysenter_target)
  802. RING0_INT_FRAME
  803. addl $5*4, %esp /* remove xen-provided frame */
  804. CFI_ADJUST_CFA_OFFSET -5*4
  805. jmp sysenter_past_esp
  806. CFI_ENDPROC
  807. ENTRY(xen_hypervisor_callback)
  808. CFI_STARTPROC
  809. pushl $0
  810. CFI_ADJUST_CFA_OFFSET 4
  811. SAVE_ALL
  812. TRACE_IRQS_OFF
  813. /* Check to see if we got the event in the critical
  814. region in xen_iret_direct, after we've reenabled
  815. events and checked for pending events. This simulates
  816. iret instruction's behaviour where it delivers a
  817. pending interrupt when enabling interrupts. */
  818. movl PT_EIP(%esp),%eax
  819. cmpl $xen_iret_start_crit,%eax
  820. jb 1f
  821. cmpl $xen_iret_end_crit,%eax
  822. jae 1f
  823. jmp xen_iret_crit_fixup
  824. ENTRY(xen_do_upcall)
  825. 1: mov %esp, %eax
  826. call xen_evtchn_do_upcall
  827. jmp ret_from_intr
  828. CFI_ENDPROC
  829. ENDPROC(xen_hypervisor_callback)
  830. # Hypervisor uses this for application faults while it executes.
  831. # We get here for two reasons:
  832. # 1. Fault while reloading DS, ES, FS or GS
  833. # 2. Fault while executing IRET
  834. # Category 1 we fix up by reattempting the load, and zeroing the segment
  835. # register if the load fails.
  836. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  837. # normal Linux return path in this case because if we use the IRET hypercall
  838. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  839. # We distinguish between categories by maintaining a status value in EAX.
  840. ENTRY(xen_failsafe_callback)
  841. CFI_STARTPROC
  842. pushl %eax
  843. CFI_ADJUST_CFA_OFFSET 4
  844. movl $1,%eax
  845. 1: mov 4(%esp),%ds
  846. 2: mov 8(%esp),%es
  847. 3: mov 12(%esp),%fs
  848. 4: mov 16(%esp),%gs
  849. testl %eax,%eax
  850. popl %eax
  851. CFI_ADJUST_CFA_OFFSET -4
  852. lea 16(%esp),%esp
  853. CFI_ADJUST_CFA_OFFSET -16
  854. jz 5f
  855. addl $16,%esp
  856. jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
  857. 5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
  858. CFI_ADJUST_CFA_OFFSET 4
  859. SAVE_ALL
  860. jmp ret_from_exception
  861. CFI_ENDPROC
  862. .section .fixup,"ax"
  863. 6: xorl %eax,%eax
  864. movl %eax,4(%esp)
  865. jmp 1b
  866. 7: xorl %eax,%eax
  867. movl %eax,8(%esp)
  868. jmp 2b
  869. 8: xorl %eax,%eax
  870. movl %eax,12(%esp)
  871. jmp 3b
  872. 9: xorl %eax,%eax
  873. movl %eax,16(%esp)
  874. jmp 4b
  875. .previous
  876. .section __ex_table,"a"
  877. .align 4
  878. .long 1b,6b
  879. .long 2b,7b
  880. .long 3b,8b
  881. .long 4b,9b
  882. .previous
  883. ENDPROC(xen_failsafe_callback)
  884. #endif /* CONFIG_XEN */
  885. #ifdef CONFIG_FUNCTION_TRACER
  886. #ifdef CONFIG_DYNAMIC_FTRACE
  887. ENTRY(mcount)
  888. ret
  889. END(mcount)
  890. ENTRY(ftrace_caller)
  891. cmpl $0, function_trace_stop
  892. jne ftrace_stub
  893. pushl %eax
  894. pushl %ecx
  895. pushl %edx
  896. movl 0xc(%esp), %eax
  897. movl 0x4(%ebp), %edx
  898. subl $MCOUNT_INSN_SIZE, %eax
  899. .globl ftrace_call
  900. ftrace_call:
  901. call ftrace_stub
  902. popl %edx
  903. popl %ecx
  904. popl %eax
  905. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  906. .globl ftrace_graph_call
  907. ftrace_graph_call:
  908. jmp ftrace_stub
  909. #endif
  910. .globl ftrace_stub
  911. ftrace_stub:
  912. ret
  913. END(ftrace_caller)
  914. #else /* ! CONFIG_DYNAMIC_FTRACE */
  915. ENTRY(mcount)
  916. cmpl $0, function_trace_stop
  917. jne ftrace_stub
  918. cmpl $ftrace_stub, ftrace_trace_function
  919. jnz trace
  920. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  921. cmpl $ftrace_stub, ftrace_graph_return
  922. jnz ftrace_graph_caller
  923. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  924. jnz ftrace_graph_caller
  925. #endif
  926. .globl ftrace_stub
  927. ftrace_stub:
  928. ret
  929. /* taken from glibc */
  930. trace:
  931. pushl %eax
  932. pushl %ecx
  933. pushl %edx
  934. movl 0xc(%esp), %eax
  935. movl 0x4(%ebp), %edx
  936. subl $MCOUNT_INSN_SIZE, %eax
  937. call *ftrace_trace_function
  938. popl %edx
  939. popl %ecx
  940. popl %eax
  941. jmp ftrace_stub
  942. END(mcount)
  943. #endif /* CONFIG_DYNAMIC_FTRACE */
  944. #endif /* CONFIG_FUNCTION_TRACER */
  945. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  946. ENTRY(ftrace_graph_caller)
  947. cmpl $0, function_trace_stop
  948. jne ftrace_stub
  949. pushl %eax
  950. pushl %ecx
  951. pushl %edx
  952. movl 0xc(%esp), %edx
  953. lea 0x4(%ebp), %eax
  954. subl $MCOUNT_INSN_SIZE, %edx
  955. call prepare_ftrace_return
  956. popl %edx
  957. popl %ecx
  958. popl %eax
  959. ret
  960. END(ftrace_graph_caller)
  961. .globl return_to_handler
  962. return_to_handler:
  963. pushl $0
  964. pushl %eax
  965. pushl %ecx
  966. pushl %edx
  967. call ftrace_return_to_handler
  968. movl %eax, 0xc(%esp)
  969. popl %edx
  970. popl %ecx
  971. popl %eax
  972. ret
  973. #endif
  974. .section .rodata,"a"
  975. #include "syscall_table_32.S"
  976. syscall_table_size=(.-sys_call_table)
  977. /*
  978. * Some functions should be protected against kprobes
  979. */
  980. .pushsection .kprobes.text, "ax"
  981. ENTRY(page_fault)
  982. RING0_EC_FRAME
  983. pushl $do_page_fault
  984. CFI_ADJUST_CFA_OFFSET 4
  985. ALIGN
  986. error_code:
  987. /* the function address is in %fs's slot on the stack */
  988. pushl %es
  989. CFI_ADJUST_CFA_OFFSET 4
  990. /*CFI_REL_OFFSET es, 0*/
  991. pushl %ds
  992. CFI_ADJUST_CFA_OFFSET 4
  993. /*CFI_REL_OFFSET ds, 0*/
  994. pushl %eax
  995. CFI_ADJUST_CFA_OFFSET 4
  996. CFI_REL_OFFSET eax, 0
  997. pushl %ebp
  998. CFI_ADJUST_CFA_OFFSET 4
  999. CFI_REL_OFFSET ebp, 0
  1000. pushl %edi
  1001. CFI_ADJUST_CFA_OFFSET 4
  1002. CFI_REL_OFFSET edi, 0
  1003. pushl %esi
  1004. CFI_ADJUST_CFA_OFFSET 4
  1005. CFI_REL_OFFSET esi, 0
  1006. pushl %edx
  1007. CFI_ADJUST_CFA_OFFSET 4
  1008. CFI_REL_OFFSET edx, 0
  1009. pushl %ecx
  1010. CFI_ADJUST_CFA_OFFSET 4
  1011. CFI_REL_OFFSET ecx, 0
  1012. pushl %ebx
  1013. CFI_ADJUST_CFA_OFFSET 4
  1014. CFI_REL_OFFSET ebx, 0
  1015. cld
  1016. pushl %fs
  1017. CFI_ADJUST_CFA_OFFSET 4
  1018. /*CFI_REL_OFFSET fs, 0*/
  1019. movl $(__KERNEL_PERCPU), %ecx
  1020. movl %ecx, %fs
  1021. UNWIND_ESPFIX_STACK
  1022. popl %ecx
  1023. CFI_ADJUST_CFA_OFFSET -4
  1024. /*CFI_REGISTER es, ecx*/
  1025. movl PT_FS(%esp), %edi # get the function address
  1026. movl PT_ORIG_EAX(%esp), %edx # get the error code
  1027. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  1028. mov %ecx, PT_FS(%esp)
  1029. /*CFI_REL_OFFSET fs, ES*/
  1030. movl $(__USER_DS), %ecx
  1031. movl %ecx, %ds
  1032. movl %ecx, %es
  1033. TRACE_IRQS_OFF
  1034. movl %esp,%eax # pt_regs pointer
  1035. call *%edi
  1036. jmp ret_from_exception
  1037. CFI_ENDPROC
  1038. END(page_fault)
  1039. /*
  1040. * Debug traps and NMI can happen at the one SYSENTER instruction
  1041. * that sets up the real kernel stack. Check here, since we can't
  1042. * allow the wrong stack to be used.
  1043. *
  1044. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  1045. * already pushed 3 words if it hits on the sysenter instruction:
  1046. * eflags, cs and eip.
  1047. *
  1048. * We just load the right stack, and push the three (known) values
  1049. * by hand onto the new stack - while updating the return eip past
  1050. * the instruction that would have done it for sysenter.
  1051. */
  1052. .macro FIX_STACK offset ok label
  1053. cmpw $__KERNEL_CS, 4(%esp)
  1054. jne \ok
  1055. \label:
  1056. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  1057. CFI_DEF_CFA esp, 0
  1058. CFI_UNDEFINED eip
  1059. pushfl
  1060. CFI_ADJUST_CFA_OFFSET 4
  1061. pushl $__KERNEL_CS
  1062. CFI_ADJUST_CFA_OFFSET 4
  1063. pushl $sysenter_past_esp
  1064. CFI_ADJUST_CFA_OFFSET 4
  1065. CFI_REL_OFFSET eip, 0
  1066. .endm
  1067. ENTRY(debug)
  1068. RING0_INT_FRAME
  1069. cmpl $ia32_sysenter_target,(%esp)
  1070. jne debug_stack_correct
  1071. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  1072. debug_stack_correct:
  1073. pushl $-1 # mark this as an int
  1074. CFI_ADJUST_CFA_OFFSET 4
  1075. SAVE_ALL
  1076. TRACE_IRQS_OFF
  1077. xorl %edx,%edx # error code 0
  1078. movl %esp,%eax # pt_regs pointer
  1079. call do_debug
  1080. jmp ret_from_exception
  1081. CFI_ENDPROC
  1082. END(debug)
  1083. /*
  1084. * NMI is doubly nasty. It can happen _while_ we're handling
  1085. * a debug fault, and the debug fault hasn't yet been able to
  1086. * clear up the stack. So we first check whether we got an
  1087. * NMI on the sysenter entry path, but after that we need to
  1088. * check whether we got an NMI on the debug path where the debug
  1089. * fault happened on the sysenter path.
  1090. */
  1091. ENTRY(nmi)
  1092. RING0_INT_FRAME
  1093. pushl %eax
  1094. CFI_ADJUST_CFA_OFFSET 4
  1095. movl %ss, %eax
  1096. cmpw $__ESPFIX_SS, %ax
  1097. popl %eax
  1098. CFI_ADJUST_CFA_OFFSET -4
  1099. je nmi_espfix_stack
  1100. cmpl $ia32_sysenter_target,(%esp)
  1101. je nmi_stack_fixup
  1102. pushl %eax
  1103. CFI_ADJUST_CFA_OFFSET 4
  1104. movl %esp,%eax
  1105. /* Do not access memory above the end of our stack page,
  1106. * it might not exist.
  1107. */
  1108. andl $(THREAD_SIZE-1),%eax
  1109. cmpl $(THREAD_SIZE-20),%eax
  1110. popl %eax
  1111. CFI_ADJUST_CFA_OFFSET -4
  1112. jae nmi_stack_correct
  1113. cmpl $ia32_sysenter_target,12(%esp)
  1114. je nmi_debug_stack_check
  1115. nmi_stack_correct:
  1116. /* We have a RING0_INT_FRAME here */
  1117. pushl %eax
  1118. CFI_ADJUST_CFA_OFFSET 4
  1119. SAVE_ALL
  1120. xorl %edx,%edx # zero error code
  1121. movl %esp,%eax # pt_regs pointer
  1122. call do_nmi
  1123. jmp restore_nocheck_notrace
  1124. CFI_ENDPROC
  1125. nmi_stack_fixup:
  1126. RING0_INT_FRAME
  1127. FIX_STACK 12, nmi_stack_correct, 1
  1128. jmp nmi_stack_correct
  1129. nmi_debug_stack_check:
  1130. /* We have a RING0_INT_FRAME here */
  1131. cmpw $__KERNEL_CS,16(%esp)
  1132. jne nmi_stack_correct
  1133. cmpl $debug,(%esp)
  1134. jb nmi_stack_correct
  1135. cmpl $debug_esp_fix_insn,(%esp)
  1136. ja nmi_stack_correct
  1137. FIX_STACK 24, nmi_stack_correct, 1
  1138. jmp nmi_stack_correct
  1139. nmi_espfix_stack:
  1140. /* We have a RING0_INT_FRAME here.
  1141. *
  1142. * create the pointer to lss back
  1143. */
  1144. pushl %ss
  1145. CFI_ADJUST_CFA_OFFSET 4
  1146. pushl %esp
  1147. CFI_ADJUST_CFA_OFFSET 4
  1148. addw $4, (%esp)
  1149. /* copy the iret frame of 12 bytes */
  1150. .rept 3
  1151. pushl 16(%esp)
  1152. CFI_ADJUST_CFA_OFFSET 4
  1153. .endr
  1154. pushl %eax
  1155. CFI_ADJUST_CFA_OFFSET 4
  1156. SAVE_ALL
  1157. FIXUP_ESPFIX_STACK # %eax == %esp
  1158. xorl %edx,%edx # zero error code
  1159. call do_nmi
  1160. RESTORE_REGS
  1161. lss 12+4(%esp), %esp # back to espfix stack
  1162. CFI_ADJUST_CFA_OFFSET -24
  1163. jmp irq_return
  1164. CFI_ENDPROC
  1165. END(nmi)
  1166. ENTRY(int3)
  1167. RING0_INT_FRAME
  1168. pushl $-1 # mark this as an int
  1169. CFI_ADJUST_CFA_OFFSET 4
  1170. SAVE_ALL
  1171. TRACE_IRQS_OFF
  1172. xorl %edx,%edx # zero error code
  1173. movl %esp,%eax # pt_regs pointer
  1174. call do_int3
  1175. jmp ret_from_exception
  1176. CFI_ENDPROC
  1177. END(int3)
  1178. ENTRY(general_protection)
  1179. RING0_EC_FRAME
  1180. pushl $do_general_protection
  1181. CFI_ADJUST_CFA_OFFSET 4
  1182. jmp error_code
  1183. CFI_ENDPROC
  1184. END(general_protection)
  1185. /*
  1186. * End of kprobes section
  1187. */
  1188. .popsection