entry_32.S 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  33. * 2C(%esp) - orig_eax
  34. * 30(%esp) - %eip
  35. * 34(%esp) - %cs
  36. * 38(%esp) - %eflags
  37. * 3C(%esp) - %oldesp
  38. * 40(%esp) - %oldss
  39. *
  40. * "current" is in register %ebx during any slow entries.
  41. */
  42. #include <linux/linkage.h>
  43. #include <asm/thread_info.h>
  44. #include <asm/irqflags.h>
  45. #include <asm/errno.h>
  46. #include <asm/segment.h>
  47. #include <asm/smp.h>
  48. #include <asm/page_types.h>
  49. #include <asm/percpu.h>
  50. #include <asm/dwarf2.h>
  51. #include <asm/processor-flags.h>
  52. #include <asm/ftrace.h>
  53. #include <asm/irq_vectors.h>
  54. #include <asm/cpufeature.h>
  55. #include <asm/alternative-asm.h>
  56. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  57. #include <linux/elf-em.h>
  58. #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
  59. #define __AUDIT_ARCH_LE 0x40000000
  60. #ifndef CONFIG_AUDITSYSCALL
  61. #define sysenter_audit syscall_trace_entry
  62. #define sysexit_audit syscall_exit_work
  63. #endif
  64. .section .entry.text, "ax"
  65. /*
  66. * We use macros for low-level operations which need to be overridden
  67. * for paravirtualization. The following will never clobber any registers:
  68. * INTERRUPT_RETURN (aka. "iret")
  69. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  70. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  71. *
  72. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  73. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  74. * Allowing a register to be clobbered can shrink the paravirt replacement
  75. * enough to patch inline, increasing performance.
  76. */
  77. #define nr_syscalls ((syscall_table_size)/4)
  78. #ifdef CONFIG_PREEMPT
  79. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  80. #else
  81. #define preempt_stop(clobbers)
  82. #define resume_kernel restore_all
  83. #endif
  84. .macro TRACE_IRQS_IRET
  85. #ifdef CONFIG_TRACE_IRQFLAGS
  86. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  87. jz 1f
  88. TRACE_IRQS_ON
  89. 1:
  90. #endif
  91. .endm
  92. #ifdef CONFIG_VM86
  93. #define resume_userspace_sig check_userspace
  94. #else
  95. #define resume_userspace_sig resume_userspace
  96. #endif
  97. /*
  98. * User gs save/restore
  99. *
  100. * %gs is used for userland TLS and kernel only uses it for stack
  101. * canary which is required to be at %gs:20 by gcc. Read the comment
  102. * at the top of stackprotector.h for more info.
  103. *
  104. * Local labels 98 and 99 are used.
  105. */
  106. #ifdef CONFIG_X86_32_LAZY_GS
  107. /* unfortunately push/pop can't be no-op */
  108. .macro PUSH_GS
  109. pushl_cfi $0
  110. .endm
  111. .macro POP_GS pop=0
  112. addl $(4 + \pop), %esp
  113. CFI_ADJUST_CFA_OFFSET -(4 + \pop)
  114. .endm
  115. .macro POP_GS_EX
  116. .endm
  117. /* all the rest are no-op */
  118. .macro PTGS_TO_GS
  119. .endm
  120. .macro PTGS_TO_GS_EX
  121. .endm
  122. .macro GS_TO_REG reg
  123. .endm
  124. .macro REG_TO_PTGS reg
  125. .endm
  126. .macro SET_KERNEL_GS reg
  127. .endm
  128. #else /* CONFIG_X86_32_LAZY_GS */
  129. .macro PUSH_GS
  130. pushl_cfi %gs
  131. /*CFI_REL_OFFSET gs, 0*/
  132. .endm
  133. .macro POP_GS pop=0
  134. 98: popl_cfi %gs
  135. /*CFI_RESTORE gs*/
  136. .if \pop <> 0
  137. add $\pop, %esp
  138. CFI_ADJUST_CFA_OFFSET -\pop
  139. .endif
  140. .endm
  141. .macro POP_GS_EX
  142. .pushsection .fixup, "ax"
  143. 99: movl $0, (%esp)
  144. jmp 98b
  145. .section __ex_table, "a"
  146. .align 4
  147. .long 98b, 99b
  148. .popsection
  149. .endm
  150. .macro PTGS_TO_GS
  151. 98: mov PT_GS(%esp), %gs
  152. .endm
  153. .macro PTGS_TO_GS_EX
  154. .pushsection .fixup, "ax"
  155. 99: movl $0, PT_GS(%esp)
  156. jmp 98b
  157. .section __ex_table, "a"
  158. .align 4
  159. .long 98b, 99b
  160. .popsection
  161. .endm
  162. .macro GS_TO_REG reg
  163. movl %gs, \reg
  164. /*CFI_REGISTER gs, \reg*/
  165. .endm
  166. .macro REG_TO_PTGS reg
  167. movl \reg, PT_GS(%esp)
  168. /*CFI_REL_OFFSET gs, PT_GS*/
  169. .endm
  170. .macro SET_KERNEL_GS reg
  171. movl $(__KERNEL_STACK_CANARY), \reg
  172. movl \reg, %gs
  173. .endm
  174. #endif /* CONFIG_X86_32_LAZY_GS */
  175. .macro SAVE_ALL
  176. cld
  177. PUSH_GS
  178. pushl_cfi %fs
  179. /*CFI_REL_OFFSET fs, 0;*/
  180. pushl_cfi %es
  181. /*CFI_REL_OFFSET es, 0;*/
  182. pushl_cfi %ds
  183. /*CFI_REL_OFFSET ds, 0;*/
  184. pushl_cfi %eax
  185. CFI_REL_OFFSET eax, 0
  186. pushl_cfi %ebp
  187. CFI_REL_OFFSET ebp, 0
  188. pushl_cfi %edi
  189. CFI_REL_OFFSET edi, 0
  190. pushl_cfi %esi
  191. CFI_REL_OFFSET esi, 0
  192. pushl_cfi %edx
  193. CFI_REL_OFFSET edx, 0
  194. pushl_cfi %ecx
  195. CFI_REL_OFFSET ecx, 0
  196. pushl_cfi %ebx
  197. CFI_REL_OFFSET ebx, 0
  198. movl $(__USER_DS), %edx
  199. movl %edx, %ds
  200. movl %edx, %es
  201. movl $(__KERNEL_PERCPU), %edx
  202. movl %edx, %fs
  203. SET_KERNEL_GS %edx
  204. .endm
  205. .macro RESTORE_INT_REGS
  206. popl_cfi %ebx
  207. CFI_RESTORE ebx
  208. popl_cfi %ecx
  209. CFI_RESTORE ecx
  210. popl_cfi %edx
  211. CFI_RESTORE edx
  212. popl_cfi %esi
  213. CFI_RESTORE esi
  214. popl_cfi %edi
  215. CFI_RESTORE edi
  216. popl_cfi %ebp
  217. CFI_RESTORE ebp
  218. popl_cfi %eax
  219. CFI_RESTORE eax
  220. .endm
  221. .macro RESTORE_REGS pop=0
  222. RESTORE_INT_REGS
  223. 1: popl_cfi %ds
  224. /*CFI_RESTORE ds;*/
  225. 2: popl_cfi %es
  226. /*CFI_RESTORE es;*/
  227. 3: popl_cfi %fs
  228. /*CFI_RESTORE fs;*/
  229. POP_GS \pop
  230. .pushsection .fixup, "ax"
  231. 4: movl $0, (%esp)
  232. jmp 1b
  233. 5: movl $0, (%esp)
  234. jmp 2b
  235. 6: movl $0, (%esp)
  236. jmp 3b
  237. .section __ex_table, "a"
  238. .align 4
  239. .long 1b, 4b
  240. .long 2b, 5b
  241. .long 3b, 6b
  242. .popsection
  243. POP_GS_EX
  244. .endm
  245. .macro RING0_INT_FRAME
  246. CFI_STARTPROC simple
  247. CFI_SIGNAL_FRAME
  248. CFI_DEF_CFA esp, 3*4
  249. /*CFI_OFFSET cs, -2*4;*/
  250. CFI_OFFSET eip, -3*4
  251. .endm
  252. .macro RING0_EC_FRAME
  253. CFI_STARTPROC simple
  254. CFI_SIGNAL_FRAME
  255. CFI_DEF_CFA esp, 4*4
  256. /*CFI_OFFSET cs, -2*4;*/
  257. CFI_OFFSET eip, -3*4
  258. .endm
  259. .macro RING0_PTREGS_FRAME
  260. CFI_STARTPROC simple
  261. CFI_SIGNAL_FRAME
  262. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
  263. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
  264. CFI_OFFSET eip, PT_EIP-PT_OLDESP
  265. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
  266. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
  267. CFI_OFFSET eax, PT_EAX-PT_OLDESP
  268. CFI_OFFSET ebp, PT_EBP-PT_OLDESP
  269. CFI_OFFSET edi, PT_EDI-PT_OLDESP
  270. CFI_OFFSET esi, PT_ESI-PT_OLDESP
  271. CFI_OFFSET edx, PT_EDX-PT_OLDESP
  272. CFI_OFFSET ecx, PT_ECX-PT_OLDESP
  273. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  274. .endm
  275. ENTRY(ret_from_fork)
  276. CFI_STARTPROC
  277. pushl_cfi %eax
  278. call schedule_tail
  279. GET_THREAD_INFO(%ebp)
  280. popl_cfi %eax
  281. pushl_cfi $0x0202 # Reset kernel eflags
  282. popfl_cfi
  283. jmp syscall_exit
  284. CFI_ENDPROC
  285. END(ret_from_fork)
  286. /*
  287. * Interrupt exit functions should be protected against kprobes
  288. */
  289. .pushsection .kprobes.text, "ax"
  290. /*
  291. * Return to user mode is not as complex as all this looks,
  292. * but we want the default path for a system call return to
  293. * go as quickly as possible which is why some of this is
  294. * less clear than it otherwise should be.
  295. */
  296. # userspace resumption stub bypassing syscall exit tracing
  297. ALIGN
  298. RING0_PTREGS_FRAME
  299. ret_from_exception:
  300. preempt_stop(CLBR_ANY)
  301. ret_from_intr:
  302. GET_THREAD_INFO(%ebp)
  303. check_userspace:
  304. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  305. movb PT_CS(%esp), %al
  306. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  307. cmpl $USER_RPL, %eax
  308. jb resume_kernel # not returning to v8086 or userspace
  309. ENTRY(resume_userspace)
  310. LOCKDEP_SYS_EXIT
  311. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  312. # setting need_resched or sigpending
  313. # between sampling and the iret
  314. TRACE_IRQS_OFF
  315. movl TI_flags(%ebp), %ecx
  316. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  317. # int/exception return?
  318. jne work_pending
  319. jmp restore_all
  320. END(ret_from_exception)
  321. #ifdef CONFIG_PREEMPT
  322. ENTRY(resume_kernel)
  323. DISABLE_INTERRUPTS(CLBR_ANY)
  324. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  325. jnz restore_all
  326. need_resched:
  327. movl TI_flags(%ebp), %ecx # need_resched set ?
  328. testb $_TIF_NEED_RESCHED, %cl
  329. jz restore_all
  330. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  331. jz restore_all
  332. call preempt_schedule_irq
  333. jmp need_resched
  334. END(resume_kernel)
  335. #endif
  336. CFI_ENDPROC
  337. /*
  338. * End of kprobes section
  339. */
  340. .popsection
  341. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  342. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  343. # sysenter call handler stub
  344. ENTRY(ia32_sysenter_target)
  345. CFI_STARTPROC simple
  346. CFI_SIGNAL_FRAME
  347. CFI_DEF_CFA esp, 0
  348. CFI_REGISTER esp, ebp
  349. movl TSS_sysenter_sp0(%esp),%esp
  350. sysenter_past_esp:
  351. /*
  352. * Interrupts are disabled here, but we can't trace it until
  353. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  354. * we immediately enable interrupts at that point anyway.
  355. */
  356. pushl_cfi $__USER_DS
  357. /*CFI_REL_OFFSET ss, 0*/
  358. pushl_cfi %ebp
  359. CFI_REL_OFFSET esp, 0
  360. pushfl_cfi
  361. orl $X86_EFLAGS_IF, (%esp)
  362. pushl_cfi $__USER_CS
  363. /*CFI_REL_OFFSET cs, 0*/
  364. /*
  365. * Push current_thread_info()->sysenter_return to the stack.
  366. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  367. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  368. */
  369. pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
  370. CFI_REL_OFFSET eip, 0
  371. pushl_cfi %eax
  372. SAVE_ALL
  373. ENABLE_INTERRUPTS(CLBR_NONE)
  374. /*
  375. * Load the potential sixth argument from user stack.
  376. * Careful about security.
  377. */
  378. cmpl $__PAGE_OFFSET-3,%ebp
  379. jae syscall_fault
  380. 1: movl (%ebp),%ebp
  381. movl %ebp,PT_EBP(%esp)
  382. .section __ex_table,"a"
  383. .align 4
  384. .long 1b,syscall_fault
  385. .previous
  386. GET_THREAD_INFO(%ebp)
  387. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  388. jnz sysenter_audit
  389. sysenter_do_call:
  390. cmpl $(nr_syscalls), %eax
  391. jae syscall_badsys
  392. call *sys_call_table(,%eax,4)
  393. movl %eax,PT_EAX(%esp)
  394. LOCKDEP_SYS_EXIT
  395. DISABLE_INTERRUPTS(CLBR_ANY)
  396. TRACE_IRQS_OFF
  397. movl TI_flags(%ebp), %ecx
  398. testl $_TIF_ALLWORK_MASK, %ecx
  399. jne sysexit_audit
  400. sysenter_exit:
  401. /* if something modifies registers it must also disable sysexit */
  402. movl PT_EIP(%esp), %edx
  403. movl PT_OLDESP(%esp), %ecx
  404. xorl %ebp,%ebp
  405. TRACE_IRQS_ON
  406. 1: mov PT_FS(%esp), %fs
  407. PTGS_TO_GS
  408. ENABLE_INTERRUPTS_SYSEXIT
  409. #ifdef CONFIG_AUDITSYSCALL
  410. sysenter_audit:
  411. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  412. jnz syscall_trace_entry
  413. addl $4,%esp
  414. CFI_ADJUST_CFA_OFFSET -4
  415. /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
  416. /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
  417. /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
  418. movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
  419. movl %eax,%edx /* 2nd arg: syscall number */
  420. movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
  421. call audit_syscall_entry
  422. pushl_cfi %ebx
  423. movl PT_EAX(%esp),%eax /* reload syscall number */
  424. jmp sysenter_do_call
  425. sysexit_audit:
  426. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  427. jne syscall_exit_work
  428. TRACE_IRQS_ON
  429. ENABLE_INTERRUPTS(CLBR_ANY)
  430. movl %eax,%edx /* second arg, syscall return value */
  431. cmpl $0,%eax /* is it < 0? */
  432. setl %al /* 1 if so, 0 if not */
  433. movzbl %al,%eax /* zero-extend that */
  434. inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
  435. call audit_syscall_exit
  436. DISABLE_INTERRUPTS(CLBR_ANY)
  437. TRACE_IRQS_OFF
  438. movl TI_flags(%ebp), %ecx
  439. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  440. jne syscall_exit_work
  441. movl PT_EAX(%esp),%eax /* reload syscall return value */
  442. jmp sysenter_exit
  443. #endif
  444. CFI_ENDPROC
  445. .pushsection .fixup,"ax"
  446. 2: movl $0,PT_FS(%esp)
  447. jmp 1b
  448. .section __ex_table,"a"
  449. .align 4
  450. .long 1b,2b
  451. .popsection
  452. PTGS_TO_GS_EX
  453. ENDPROC(ia32_sysenter_target)
  454. /*
  455. * syscall stub including irq exit should be protected against kprobes
  456. */
  457. .pushsection .kprobes.text, "ax"
  458. # system call handler stub
  459. ENTRY(system_call)
  460. RING0_INT_FRAME # can't unwind into user space anyway
  461. pushl_cfi %eax # save orig_eax
  462. SAVE_ALL
  463. GET_THREAD_INFO(%ebp)
  464. # system call tracing in operation / emulation
  465. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  466. jnz syscall_trace_entry
  467. cmpl $(nr_syscalls), %eax
  468. jae syscall_badsys
  469. syscall_call:
  470. call *sys_call_table(,%eax,4)
  471. movl %eax,PT_EAX(%esp) # store the return value
  472. syscall_exit:
  473. LOCKDEP_SYS_EXIT
  474. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  475. # setting need_resched or sigpending
  476. # between sampling and the iret
  477. TRACE_IRQS_OFF
  478. movl TI_flags(%ebp), %ecx
  479. testl $_TIF_ALLWORK_MASK, %ecx # current->work
  480. jne syscall_exit_work
  481. restore_all:
  482. TRACE_IRQS_IRET
  483. restore_all_notrace:
  484. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  485. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  486. # are returning to the kernel.
  487. # See comments in process.c:copy_thread() for details.
  488. movb PT_OLDSS(%esp), %ah
  489. movb PT_CS(%esp), %al
  490. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  491. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  492. CFI_REMEMBER_STATE
  493. je ldt_ss # returning to user-space with LDT SS
  494. restore_nocheck:
  495. RESTORE_REGS 4 # skip orig_eax/error_code
  496. irq_return:
  497. INTERRUPT_RETURN
  498. .section .fixup,"ax"
  499. ENTRY(iret_exc)
  500. pushl $0 # no error code
  501. pushl $do_iret_error
  502. jmp error_code
  503. .previous
  504. .section __ex_table,"a"
  505. .align 4
  506. .long irq_return,iret_exc
  507. .previous
  508. CFI_RESTORE_STATE
  509. ldt_ss:
  510. larl PT_OLDSS(%esp), %eax
  511. jnz restore_nocheck
  512. testl $0x00400000, %eax # returning to 32bit stack?
  513. jnz restore_nocheck # allright, normal return
  514. #ifdef CONFIG_PARAVIRT
  515. /*
  516. * The kernel can't run on a non-flat stack if paravirt mode
  517. * is active. Rather than try to fixup the high bits of
  518. * ESP, bypass this code entirely. This may break DOSemu
  519. * and/or Wine support in a paravirt VM, although the option
  520. * is still available to implement the setting of the high
  521. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  522. */
  523. cmpl $0, pv_info+PARAVIRT_enabled
  524. jne restore_nocheck
  525. #endif
  526. /*
  527. * Setup and switch to ESPFIX stack
  528. *
  529. * We're returning to userspace with a 16 bit stack. The CPU will not
  530. * restore the high word of ESP for us on executing iret... This is an
  531. * "official" bug of all the x86-compatible CPUs, which we can work
  532. * around to make dosemu and wine happy. We do this by preloading the
  533. * high word of ESP with the high word of the userspace ESP while
  534. * compensating for the offset by changing to the ESPFIX segment with
  535. * a base address that matches for the difference.
  536. */
  537. #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
  538. mov %esp, %edx /* load kernel esp */
  539. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  540. mov %dx, %ax /* eax: new kernel esp */
  541. sub %eax, %edx /* offset (low word is 0) */
  542. shr $16, %edx
  543. mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
  544. mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
  545. pushl_cfi $__ESPFIX_SS
  546. pushl_cfi %eax /* new kernel esp */
  547. /* Disable interrupts, but do not irqtrace this section: we
  548. * will soon execute iret and the tracer was already set to
  549. * the irqstate after the iret */
  550. DISABLE_INTERRUPTS(CLBR_EAX)
  551. lss (%esp), %esp /* switch to espfix segment */
  552. CFI_ADJUST_CFA_OFFSET -8
  553. jmp restore_nocheck
  554. CFI_ENDPROC
  555. ENDPROC(system_call)
  556. # perform work that needs to be done immediately before resumption
  557. ALIGN
  558. RING0_PTREGS_FRAME # can't unwind into user space anyway
  559. work_pending:
  560. testb $_TIF_NEED_RESCHED, %cl
  561. jz work_notifysig
  562. work_resched:
  563. call schedule
  564. LOCKDEP_SYS_EXIT
  565. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  566. # setting need_resched or sigpending
  567. # between sampling and the iret
  568. TRACE_IRQS_OFF
  569. movl TI_flags(%ebp), %ecx
  570. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  571. # than syscall tracing?
  572. jz restore_all
  573. testb $_TIF_NEED_RESCHED, %cl
  574. jnz work_resched
  575. work_notifysig: # deal with pending signals and
  576. # notify-resume requests
  577. #ifdef CONFIG_VM86
  578. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  579. movl %esp, %eax
  580. jne work_notifysig_v86 # returning to kernel-space or
  581. # vm86-space
  582. TRACE_IRQS_ON
  583. ENABLE_INTERRUPTS(CLBR_NONE)
  584. xorl %edx, %edx
  585. call do_notify_resume
  586. jmp resume_userspace_sig
  587. ALIGN
  588. work_notifysig_v86:
  589. pushl_cfi %ecx # save ti_flags for do_notify_resume
  590. call save_v86_state # %eax contains pt_regs pointer
  591. popl_cfi %ecx
  592. movl %eax, %esp
  593. #else
  594. movl %esp, %eax
  595. #endif
  596. TRACE_IRQS_ON
  597. ENABLE_INTERRUPTS(CLBR_NONE)
  598. xorl %edx, %edx
  599. call do_notify_resume
  600. jmp resume_userspace_sig
  601. END(work_pending)
  602. # perform syscall exit tracing
  603. ALIGN
  604. syscall_trace_entry:
  605. movl $-ENOSYS,PT_EAX(%esp)
  606. movl %esp, %eax
  607. call syscall_trace_enter
  608. /* What it returned is what we'll actually use. */
  609. cmpl $(nr_syscalls), %eax
  610. jnae syscall_call
  611. jmp syscall_exit
  612. END(syscall_trace_entry)
  613. # perform syscall exit tracing
  614. ALIGN
  615. syscall_exit_work:
  616. testl $_TIF_WORK_SYSCALL_EXIT, %ecx
  617. jz work_pending
  618. TRACE_IRQS_ON
  619. ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
  620. # schedule() instead
  621. movl %esp, %eax
  622. call syscall_trace_leave
  623. jmp resume_userspace
  624. END(syscall_exit_work)
  625. CFI_ENDPROC
  626. RING0_INT_FRAME # can't unwind into user space anyway
  627. syscall_fault:
  628. GET_THREAD_INFO(%ebp)
  629. movl $-EFAULT,PT_EAX(%esp)
  630. jmp resume_userspace
  631. END(syscall_fault)
  632. syscall_badsys:
  633. movl $-ENOSYS,PT_EAX(%esp)
  634. jmp resume_userspace
  635. END(syscall_badsys)
  636. CFI_ENDPROC
  637. /*
  638. * End of kprobes section
  639. */
  640. .popsection
  641. /*
  642. * System calls that need a pt_regs pointer.
  643. */
  644. #define PTREGSCALL0(name) \
  645. ALIGN; \
  646. ptregs_##name: \
  647. leal 4(%esp),%eax; \
  648. jmp sys_##name;
  649. #define PTREGSCALL1(name) \
  650. ALIGN; \
  651. ptregs_##name: \
  652. leal 4(%esp),%edx; \
  653. movl (PT_EBX+4)(%esp),%eax; \
  654. jmp sys_##name;
  655. #define PTREGSCALL2(name) \
  656. ALIGN; \
  657. ptregs_##name: \
  658. leal 4(%esp),%ecx; \
  659. movl (PT_ECX+4)(%esp),%edx; \
  660. movl (PT_EBX+4)(%esp),%eax; \
  661. jmp sys_##name;
  662. #define PTREGSCALL3(name) \
  663. ALIGN; \
  664. ptregs_##name: \
  665. CFI_STARTPROC; \
  666. leal 4(%esp),%eax; \
  667. pushl_cfi %eax; \
  668. movl PT_EDX(%eax),%ecx; \
  669. movl PT_ECX(%eax),%edx; \
  670. movl PT_EBX(%eax),%eax; \
  671. call sys_##name; \
  672. addl $4,%esp; \
  673. CFI_ADJUST_CFA_OFFSET -4; \
  674. ret; \
  675. CFI_ENDPROC; \
  676. ENDPROC(ptregs_##name)
  677. PTREGSCALL1(iopl)
  678. PTREGSCALL0(fork)
  679. PTREGSCALL0(vfork)
  680. PTREGSCALL3(execve)
  681. PTREGSCALL2(sigaltstack)
  682. PTREGSCALL0(sigreturn)
  683. PTREGSCALL0(rt_sigreturn)
  684. PTREGSCALL2(vm86)
  685. PTREGSCALL1(vm86old)
  686. /* Clone is an oddball. The 4th arg is in %edi */
  687. ALIGN;
  688. ptregs_clone:
  689. CFI_STARTPROC
  690. leal 4(%esp),%eax
  691. pushl_cfi %eax
  692. pushl_cfi PT_EDI(%eax)
  693. movl PT_EDX(%eax),%ecx
  694. movl PT_ECX(%eax),%edx
  695. movl PT_EBX(%eax),%eax
  696. call sys_clone
  697. addl $8,%esp
  698. CFI_ADJUST_CFA_OFFSET -8
  699. ret
  700. CFI_ENDPROC
  701. ENDPROC(ptregs_clone)
  702. .macro FIXUP_ESPFIX_STACK
  703. /*
  704. * Switch back for ESPFIX stack to the normal zerobased stack
  705. *
  706. * We can't call C functions using the ESPFIX stack. This code reads
  707. * the high word of the segment base from the GDT and swiches to the
  708. * normal stack and adjusts ESP with the matching offset.
  709. */
  710. /* fixup the stack */
  711. mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
  712. mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
  713. shl $16, %eax
  714. addl %esp, %eax /* the adjusted stack pointer */
  715. pushl_cfi $__KERNEL_DS
  716. pushl_cfi %eax
  717. lss (%esp), %esp /* switch to the normal stack segment */
  718. CFI_ADJUST_CFA_OFFSET -8
  719. .endm
  720. .macro UNWIND_ESPFIX_STACK
  721. movl %ss, %eax
  722. /* see if on espfix stack */
  723. cmpw $__ESPFIX_SS, %ax
  724. jne 27f
  725. movl $__KERNEL_DS, %eax
  726. movl %eax, %ds
  727. movl %eax, %es
  728. /* switch to normal stack */
  729. FIXUP_ESPFIX_STACK
  730. 27:
  731. .endm
  732. /*
  733. * Build the entry stubs and pointer table with some assembler magic.
  734. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  735. * single cache line on all modern x86 implementations.
  736. */
  737. .section .init.rodata,"a"
  738. ENTRY(interrupt)
  739. .section .entry.text, "ax"
  740. .p2align 5
  741. .p2align CONFIG_X86_L1_CACHE_SHIFT
  742. ENTRY(irq_entries_start)
  743. RING0_INT_FRAME
  744. vector=FIRST_EXTERNAL_VECTOR
  745. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  746. .balign 32
  747. .rept 7
  748. .if vector < NR_VECTORS
  749. .if vector <> FIRST_EXTERNAL_VECTOR
  750. CFI_ADJUST_CFA_OFFSET -4
  751. .endif
  752. 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
  753. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  754. jmp 2f
  755. .endif
  756. .previous
  757. .long 1b
  758. .section .entry.text, "ax"
  759. vector=vector+1
  760. .endif
  761. .endr
  762. 2: jmp common_interrupt
  763. .endr
  764. END(irq_entries_start)
  765. .previous
  766. END(interrupt)
  767. .previous
  768. /*
  769. * the CPU automatically disables interrupts when executing an IRQ vector,
  770. * so IRQ-flags tracing has to follow that:
  771. */
  772. .p2align CONFIG_X86_L1_CACHE_SHIFT
  773. common_interrupt:
  774. addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
  775. SAVE_ALL
  776. TRACE_IRQS_OFF
  777. movl %esp,%eax
  778. call do_IRQ
  779. jmp ret_from_intr
  780. ENDPROC(common_interrupt)
  781. CFI_ENDPROC
  782. /*
  783. * Irq entries should be protected against kprobes
  784. */
  785. .pushsection .kprobes.text, "ax"
  786. #define BUILD_INTERRUPT3(name, nr, fn) \
  787. ENTRY(name) \
  788. RING0_INT_FRAME; \
  789. pushl_cfi $~(nr); \
  790. SAVE_ALL; \
  791. TRACE_IRQS_OFF \
  792. movl %esp,%eax; \
  793. call fn; \
  794. jmp ret_from_intr; \
  795. CFI_ENDPROC; \
  796. ENDPROC(name)
  797. #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
  798. /* The include is where all of the SMP etc. interrupts come from */
  799. #include <asm/entry_arch.h>
  800. ENTRY(coprocessor_error)
  801. RING0_INT_FRAME
  802. pushl_cfi $0
  803. pushl_cfi $do_coprocessor_error
  804. jmp error_code
  805. CFI_ENDPROC
  806. END(coprocessor_error)
  807. ENTRY(simd_coprocessor_error)
  808. RING0_INT_FRAME
  809. pushl_cfi $0
  810. #ifdef CONFIG_X86_INVD_BUG
  811. /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
  812. 661: pushl_cfi $do_general_protection
  813. 662:
  814. .section .altinstructions,"a"
  815. altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
  816. .previous
  817. .section .altinstr_replacement,"ax"
  818. 663: pushl $do_simd_coprocessor_error
  819. 664:
  820. .previous
  821. #else
  822. pushl_cfi $do_simd_coprocessor_error
  823. #endif
  824. jmp error_code
  825. CFI_ENDPROC
  826. END(simd_coprocessor_error)
  827. ENTRY(device_not_available)
  828. RING0_INT_FRAME
  829. pushl_cfi $-1 # mark this as an int
  830. pushl_cfi $do_device_not_available
  831. jmp error_code
  832. CFI_ENDPROC
  833. END(device_not_available)
  834. #ifdef CONFIG_PARAVIRT
  835. ENTRY(native_iret)
  836. iret
  837. .section __ex_table,"a"
  838. .align 4
  839. .long native_iret, iret_exc
  840. .previous
  841. END(native_iret)
  842. ENTRY(native_irq_enable_sysexit)
  843. sti
  844. sysexit
  845. END(native_irq_enable_sysexit)
  846. #endif
  847. ENTRY(overflow)
  848. RING0_INT_FRAME
  849. pushl_cfi $0
  850. pushl_cfi $do_overflow
  851. jmp error_code
  852. CFI_ENDPROC
  853. END(overflow)
  854. ENTRY(bounds)
  855. RING0_INT_FRAME
  856. pushl_cfi $0
  857. pushl_cfi $do_bounds
  858. jmp error_code
  859. CFI_ENDPROC
  860. END(bounds)
  861. ENTRY(invalid_op)
  862. RING0_INT_FRAME
  863. pushl_cfi $0
  864. pushl_cfi $do_invalid_op
  865. jmp error_code
  866. CFI_ENDPROC
  867. END(invalid_op)
  868. ENTRY(coprocessor_segment_overrun)
  869. RING0_INT_FRAME
  870. pushl_cfi $0
  871. pushl_cfi $do_coprocessor_segment_overrun
  872. jmp error_code
  873. CFI_ENDPROC
  874. END(coprocessor_segment_overrun)
  875. ENTRY(invalid_TSS)
  876. RING0_EC_FRAME
  877. pushl_cfi $do_invalid_TSS
  878. jmp error_code
  879. CFI_ENDPROC
  880. END(invalid_TSS)
  881. ENTRY(segment_not_present)
  882. RING0_EC_FRAME
  883. pushl_cfi $do_segment_not_present
  884. jmp error_code
  885. CFI_ENDPROC
  886. END(segment_not_present)
  887. ENTRY(stack_segment)
  888. RING0_EC_FRAME
  889. pushl_cfi $do_stack_segment
  890. jmp error_code
  891. CFI_ENDPROC
  892. END(stack_segment)
  893. ENTRY(alignment_check)
  894. RING0_EC_FRAME
  895. pushl_cfi $do_alignment_check
  896. jmp error_code
  897. CFI_ENDPROC
  898. END(alignment_check)
  899. ENTRY(divide_error)
  900. RING0_INT_FRAME
  901. pushl_cfi $0 # no error code
  902. pushl_cfi $do_divide_error
  903. jmp error_code
  904. CFI_ENDPROC
  905. END(divide_error)
  906. #ifdef CONFIG_X86_MCE
  907. ENTRY(machine_check)
  908. RING0_INT_FRAME
  909. pushl_cfi $0
  910. pushl_cfi machine_check_vector
  911. jmp error_code
  912. CFI_ENDPROC
  913. END(machine_check)
  914. #endif
  915. ENTRY(spurious_interrupt_bug)
  916. RING0_INT_FRAME
  917. pushl_cfi $0
  918. pushl_cfi $do_spurious_interrupt_bug
  919. jmp error_code
  920. CFI_ENDPROC
  921. END(spurious_interrupt_bug)
  922. /*
  923. * End of kprobes section
  924. */
  925. .popsection
  926. ENTRY(kernel_thread_helper)
  927. pushl $0 # fake return address for unwinder
  928. CFI_STARTPROC
  929. movl %edi,%eax
  930. call *%esi
  931. call do_exit
  932. ud2 # padding for call trace
  933. CFI_ENDPROC
  934. ENDPROC(kernel_thread_helper)
  935. #ifdef CONFIG_XEN
  936. /* Xen doesn't set %esp to be precisely what the normal sysenter
  937. entrypoint expects, so fix it up before using the normal path. */
  938. ENTRY(xen_sysenter_target)
  939. RING0_INT_FRAME
  940. addl $5*4, %esp /* remove xen-provided frame */
  941. CFI_ADJUST_CFA_OFFSET -5*4
  942. jmp sysenter_past_esp
  943. CFI_ENDPROC
  944. ENTRY(xen_hypervisor_callback)
  945. CFI_STARTPROC
  946. pushl_cfi $0
  947. SAVE_ALL
  948. TRACE_IRQS_OFF
  949. /* Check to see if we got the event in the critical
  950. region in xen_iret_direct, after we've reenabled
  951. events and checked for pending events. This simulates
  952. iret instruction's behaviour where it delivers a
  953. pending interrupt when enabling interrupts. */
  954. movl PT_EIP(%esp),%eax
  955. cmpl $xen_iret_start_crit,%eax
  956. jb 1f
  957. cmpl $xen_iret_end_crit,%eax
  958. jae 1f
  959. jmp xen_iret_crit_fixup
  960. ENTRY(xen_do_upcall)
  961. 1: mov %esp, %eax
  962. call xen_evtchn_do_upcall
  963. jmp ret_from_intr
  964. CFI_ENDPROC
  965. ENDPROC(xen_hypervisor_callback)
  966. # Hypervisor uses this for application faults while it executes.
  967. # We get here for two reasons:
  968. # 1. Fault while reloading DS, ES, FS or GS
  969. # 2. Fault while executing IRET
  970. # Category 1 we fix up by reattempting the load, and zeroing the segment
  971. # register if the load fails.
  972. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  973. # normal Linux return path in this case because if we use the IRET hypercall
  974. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  975. # We distinguish between categories by maintaining a status value in EAX.
  976. ENTRY(xen_failsafe_callback)
  977. CFI_STARTPROC
  978. pushl_cfi %eax
  979. movl $1,%eax
  980. 1: mov 4(%esp),%ds
  981. 2: mov 8(%esp),%es
  982. 3: mov 12(%esp),%fs
  983. 4: mov 16(%esp),%gs
  984. testl %eax,%eax
  985. popl_cfi %eax
  986. lea 16(%esp),%esp
  987. CFI_ADJUST_CFA_OFFSET -16
  988. jz 5f
  989. addl $16,%esp
  990. jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
  991. 5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
  992. SAVE_ALL
  993. jmp ret_from_exception
  994. CFI_ENDPROC
  995. .section .fixup,"ax"
  996. 6: xorl %eax,%eax
  997. movl %eax,4(%esp)
  998. jmp 1b
  999. 7: xorl %eax,%eax
  1000. movl %eax,8(%esp)
  1001. jmp 2b
  1002. 8: xorl %eax,%eax
  1003. movl %eax,12(%esp)
  1004. jmp 3b
  1005. 9: xorl %eax,%eax
  1006. movl %eax,16(%esp)
  1007. jmp 4b
  1008. .previous
  1009. .section __ex_table,"a"
  1010. .align 4
  1011. .long 1b,6b
  1012. .long 2b,7b
  1013. .long 3b,8b
  1014. .long 4b,9b
  1015. .previous
  1016. ENDPROC(xen_failsafe_callback)
  1017. BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
  1018. xen_evtchn_do_upcall)
  1019. #endif /* CONFIG_XEN */
  1020. #ifdef CONFIG_FUNCTION_TRACER
  1021. #ifdef CONFIG_DYNAMIC_FTRACE
  1022. ENTRY(mcount)
  1023. ret
  1024. END(mcount)
  1025. ENTRY(ftrace_caller)
  1026. cmpl $0, function_trace_stop
  1027. jne ftrace_stub
  1028. pushl %eax
  1029. pushl %ecx
  1030. pushl %edx
  1031. movl 0xc(%esp), %eax
  1032. movl 0x4(%ebp), %edx
  1033. subl $MCOUNT_INSN_SIZE, %eax
  1034. .globl ftrace_call
  1035. ftrace_call:
  1036. call ftrace_stub
  1037. popl %edx
  1038. popl %ecx
  1039. popl %eax
  1040. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1041. .globl ftrace_graph_call
  1042. ftrace_graph_call:
  1043. jmp ftrace_stub
  1044. #endif
  1045. .globl ftrace_stub
  1046. ftrace_stub:
  1047. ret
  1048. END(ftrace_caller)
  1049. #else /* ! CONFIG_DYNAMIC_FTRACE */
  1050. ENTRY(mcount)
  1051. cmpl $0, function_trace_stop
  1052. jne ftrace_stub
  1053. cmpl $ftrace_stub, ftrace_trace_function
  1054. jnz trace
  1055. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1056. cmpl $ftrace_stub, ftrace_graph_return
  1057. jnz ftrace_graph_caller
  1058. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  1059. jnz ftrace_graph_caller
  1060. #endif
  1061. .globl ftrace_stub
  1062. ftrace_stub:
  1063. ret
  1064. /* taken from glibc */
  1065. trace:
  1066. pushl %eax
  1067. pushl %ecx
  1068. pushl %edx
  1069. movl 0xc(%esp), %eax
  1070. movl 0x4(%ebp), %edx
  1071. subl $MCOUNT_INSN_SIZE, %eax
  1072. call *ftrace_trace_function
  1073. popl %edx
  1074. popl %ecx
  1075. popl %eax
  1076. jmp ftrace_stub
  1077. END(mcount)
  1078. #endif /* CONFIG_DYNAMIC_FTRACE */
  1079. #endif /* CONFIG_FUNCTION_TRACER */
  1080. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1081. ENTRY(ftrace_graph_caller)
  1082. cmpl $0, function_trace_stop
  1083. jne ftrace_stub
  1084. pushl %eax
  1085. pushl %ecx
  1086. pushl %edx
  1087. movl 0xc(%esp), %edx
  1088. lea 0x4(%ebp), %eax
  1089. movl (%ebp), %ecx
  1090. subl $MCOUNT_INSN_SIZE, %edx
  1091. call prepare_ftrace_return
  1092. popl %edx
  1093. popl %ecx
  1094. popl %eax
  1095. ret
  1096. END(ftrace_graph_caller)
  1097. .globl return_to_handler
  1098. return_to_handler:
  1099. pushl %eax
  1100. pushl %edx
  1101. movl %ebp, %eax
  1102. call ftrace_return_to_handler
  1103. movl %eax, %ecx
  1104. popl %edx
  1105. popl %eax
  1106. jmp *%ecx
  1107. #endif
  1108. .section .rodata,"a"
  1109. #include "syscall_table_32.S"
  1110. syscall_table_size=(.-sys_call_table)
  1111. /*
  1112. * Some functions should be protected against kprobes
  1113. */
  1114. .pushsection .kprobes.text, "ax"
  1115. ENTRY(page_fault)
  1116. RING0_EC_FRAME
  1117. pushl_cfi $do_page_fault
  1118. ALIGN
  1119. error_code:
  1120. /* the function address is in %gs's slot on the stack */
  1121. pushl_cfi %fs
  1122. /*CFI_REL_OFFSET fs, 0*/
  1123. pushl_cfi %es
  1124. /*CFI_REL_OFFSET es, 0*/
  1125. pushl_cfi %ds
  1126. /*CFI_REL_OFFSET ds, 0*/
  1127. pushl_cfi %eax
  1128. CFI_REL_OFFSET eax, 0
  1129. pushl_cfi %ebp
  1130. CFI_REL_OFFSET ebp, 0
  1131. pushl_cfi %edi
  1132. CFI_REL_OFFSET edi, 0
  1133. pushl_cfi %esi
  1134. CFI_REL_OFFSET esi, 0
  1135. pushl_cfi %edx
  1136. CFI_REL_OFFSET edx, 0
  1137. pushl_cfi %ecx
  1138. CFI_REL_OFFSET ecx, 0
  1139. pushl_cfi %ebx
  1140. CFI_REL_OFFSET ebx, 0
  1141. cld
  1142. movl $(__KERNEL_PERCPU), %ecx
  1143. movl %ecx, %fs
  1144. UNWIND_ESPFIX_STACK
  1145. GS_TO_REG %ecx
  1146. movl PT_GS(%esp), %edi # get the function address
  1147. movl PT_ORIG_EAX(%esp), %edx # get the error code
  1148. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  1149. REG_TO_PTGS %ecx
  1150. SET_KERNEL_GS %ecx
  1151. movl $(__USER_DS), %ecx
  1152. movl %ecx, %ds
  1153. movl %ecx, %es
  1154. TRACE_IRQS_OFF
  1155. movl %esp,%eax # pt_regs pointer
  1156. call *%edi
  1157. jmp ret_from_exception
  1158. CFI_ENDPROC
  1159. END(page_fault)
  1160. /*
  1161. * Debug traps and NMI can happen at the one SYSENTER instruction
  1162. * that sets up the real kernel stack. Check here, since we can't
  1163. * allow the wrong stack to be used.
  1164. *
  1165. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  1166. * already pushed 3 words if it hits on the sysenter instruction:
  1167. * eflags, cs and eip.
  1168. *
  1169. * We just load the right stack, and push the three (known) values
  1170. * by hand onto the new stack - while updating the return eip past
  1171. * the instruction that would have done it for sysenter.
  1172. */
  1173. .macro FIX_STACK offset ok label
  1174. cmpw $__KERNEL_CS, 4(%esp)
  1175. jne \ok
  1176. \label:
  1177. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  1178. CFI_DEF_CFA esp, 0
  1179. CFI_UNDEFINED eip
  1180. pushfl_cfi
  1181. pushl_cfi $__KERNEL_CS
  1182. pushl_cfi $sysenter_past_esp
  1183. CFI_REL_OFFSET eip, 0
  1184. .endm
  1185. ENTRY(debug)
  1186. RING0_INT_FRAME
  1187. cmpl $ia32_sysenter_target,(%esp)
  1188. jne debug_stack_correct
  1189. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  1190. debug_stack_correct:
  1191. pushl_cfi $-1 # mark this as an int
  1192. SAVE_ALL
  1193. TRACE_IRQS_OFF
  1194. xorl %edx,%edx # error code 0
  1195. movl %esp,%eax # pt_regs pointer
  1196. call do_debug
  1197. jmp ret_from_exception
  1198. CFI_ENDPROC
  1199. END(debug)
  1200. /*
  1201. * NMI is doubly nasty. It can happen _while_ we're handling
  1202. * a debug fault, and the debug fault hasn't yet been able to
  1203. * clear up the stack. So we first check whether we got an
  1204. * NMI on the sysenter entry path, but after that we need to
  1205. * check whether we got an NMI on the debug path where the debug
  1206. * fault happened on the sysenter path.
  1207. */
  1208. ENTRY(nmi)
  1209. RING0_INT_FRAME
  1210. pushl_cfi %eax
  1211. movl %ss, %eax
  1212. cmpw $__ESPFIX_SS, %ax
  1213. popl_cfi %eax
  1214. je nmi_espfix_stack
  1215. cmpl $ia32_sysenter_target,(%esp)
  1216. je nmi_stack_fixup
  1217. pushl_cfi %eax
  1218. movl %esp,%eax
  1219. /* Do not access memory above the end of our stack page,
  1220. * it might not exist.
  1221. */
  1222. andl $(THREAD_SIZE-1),%eax
  1223. cmpl $(THREAD_SIZE-20),%eax
  1224. popl_cfi %eax
  1225. jae nmi_stack_correct
  1226. cmpl $ia32_sysenter_target,12(%esp)
  1227. je nmi_debug_stack_check
  1228. nmi_stack_correct:
  1229. /* We have a RING0_INT_FRAME here */
  1230. pushl_cfi %eax
  1231. SAVE_ALL
  1232. xorl %edx,%edx # zero error code
  1233. movl %esp,%eax # pt_regs pointer
  1234. call do_nmi
  1235. jmp restore_all_notrace
  1236. CFI_ENDPROC
  1237. nmi_stack_fixup:
  1238. RING0_INT_FRAME
  1239. FIX_STACK 12, nmi_stack_correct, 1
  1240. jmp nmi_stack_correct
  1241. nmi_debug_stack_check:
  1242. /* We have a RING0_INT_FRAME here */
  1243. cmpw $__KERNEL_CS,16(%esp)
  1244. jne nmi_stack_correct
  1245. cmpl $debug,(%esp)
  1246. jb nmi_stack_correct
  1247. cmpl $debug_esp_fix_insn,(%esp)
  1248. ja nmi_stack_correct
  1249. FIX_STACK 24, nmi_stack_correct, 1
  1250. jmp nmi_stack_correct
  1251. nmi_espfix_stack:
  1252. /* We have a RING0_INT_FRAME here.
  1253. *
  1254. * create the pointer to lss back
  1255. */
  1256. pushl_cfi %ss
  1257. pushl_cfi %esp
  1258. addl $4, (%esp)
  1259. /* copy the iret frame of 12 bytes */
  1260. .rept 3
  1261. pushl_cfi 16(%esp)
  1262. .endr
  1263. pushl_cfi %eax
  1264. SAVE_ALL
  1265. FIXUP_ESPFIX_STACK # %eax == %esp
  1266. xorl %edx,%edx # zero error code
  1267. call do_nmi
  1268. RESTORE_REGS
  1269. lss 12+4(%esp), %esp # back to espfix stack
  1270. CFI_ADJUST_CFA_OFFSET -24
  1271. jmp irq_return
  1272. CFI_ENDPROC
  1273. END(nmi)
  1274. ENTRY(int3)
  1275. RING0_INT_FRAME
  1276. pushl_cfi $-1 # mark this as an int
  1277. SAVE_ALL
  1278. TRACE_IRQS_OFF
  1279. xorl %edx,%edx # zero error code
  1280. movl %esp,%eax # pt_regs pointer
  1281. call do_int3
  1282. jmp ret_from_exception
  1283. CFI_ENDPROC
  1284. END(int3)
  1285. ENTRY(general_protection)
  1286. RING0_EC_FRAME
  1287. pushl_cfi $do_general_protection
  1288. jmp error_code
  1289. CFI_ENDPROC
  1290. END(general_protection)
  1291. #ifdef CONFIG_KVM_GUEST
  1292. ENTRY(async_page_fault)
  1293. RING0_EC_FRAME
  1294. pushl_cfi $do_async_page_fault
  1295. jmp error_code
  1296. CFI_ENDPROC
  1297. END(async_page_fault)
  1298. #endif
  1299. /*
  1300. * End of kprobes section
  1301. */
  1302. .popsection