entry_32.S 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  33. * 2C(%esp) - orig_eax
  34. * 30(%esp) - %eip
  35. * 34(%esp) - %cs
  36. * 38(%esp) - %eflags
  37. * 3C(%esp) - %oldesp
  38. * 40(%esp) - %oldss
  39. *
  40. * "current" is in register %ebx during any slow entries.
  41. */
  42. #include <linux/linkage.h>
  43. #include <linux/err.h>
  44. #include <asm/thread_info.h>
  45. #include <asm/irqflags.h>
  46. #include <asm/errno.h>
  47. #include <asm/segment.h>
  48. #include <asm/smp.h>
  49. #include <asm/page_types.h>
  50. #include <asm/percpu.h>
  51. #include <asm/dwarf2.h>
  52. #include <asm/processor-flags.h>
  53. #include <asm/ftrace.h>
  54. #include <asm/irq_vectors.h>
  55. #include <asm/cpufeature.h>
  56. #include <asm/alternative-asm.h>
  57. #include <asm/asm.h>
  58. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  59. #include <linux/elf-em.h>
  60. #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
  61. #define __AUDIT_ARCH_LE 0x40000000
  62. #ifndef CONFIG_AUDITSYSCALL
  63. #define sysenter_audit syscall_trace_entry
  64. #define sysexit_audit syscall_exit_work
  65. #endif
  66. .section .entry.text, "ax"
  67. /*
  68. * We use macros for low-level operations which need to be overridden
  69. * for paravirtualization. The following will never clobber any registers:
  70. * INTERRUPT_RETURN (aka. "iret")
  71. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  72. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  73. *
  74. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  75. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  76. * Allowing a register to be clobbered can shrink the paravirt replacement
  77. * enough to patch inline, increasing performance.
  78. */
  79. #ifdef CONFIG_PREEMPT
  80. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  81. #else
  82. #define preempt_stop(clobbers)
  83. #define resume_kernel restore_all
  84. #endif
  85. .macro TRACE_IRQS_IRET
  86. #ifdef CONFIG_TRACE_IRQFLAGS
  87. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  88. jz 1f
  89. TRACE_IRQS_ON
  90. 1:
  91. #endif
  92. .endm
  93. /*
  94. * User gs save/restore
  95. *
  96. * %gs is used for userland TLS and kernel only uses it for stack
  97. * canary which is required to be at %gs:20 by gcc. Read the comment
  98. * at the top of stackprotector.h for more info.
  99. *
  100. * Local labels 98 and 99 are used.
  101. */
  102. #ifdef CONFIG_X86_32_LAZY_GS
  103. /* unfortunately push/pop can't be no-op */
  104. .macro PUSH_GS
  105. pushl_cfi $0
  106. .endm
  107. .macro POP_GS pop=0
  108. addl $(4 + \pop), %esp
  109. CFI_ADJUST_CFA_OFFSET -(4 + \pop)
  110. .endm
  111. .macro POP_GS_EX
  112. .endm
  113. /* all the rest are no-op */
  114. .macro PTGS_TO_GS
  115. .endm
  116. .macro PTGS_TO_GS_EX
  117. .endm
  118. .macro GS_TO_REG reg
  119. .endm
  120. .macro REG_TO_PTGS reg
  121. .endm
  122. .macro SET_KERNEL_GS reg
  123. .endm
  124. #else /* CONFIG_X86_32_LAZY_GS */
  125. .macro PUSH_GS
  126. pushl_cfi %gs
  127. /*CFI_REL_OFFSET gs, 0*/
  128. .endm
  129. .macro POP_GS pop=0
  130. 98: popl_cfi %gs
  131. /*CFI_RESTORE gs*/
  132. .if \pop <> 0
  133. add $\pop, %esp
  134. CFI_ADJUST_CFA_OFFSET -\pop
  135. .endif
  136. .endm
  137. .macro POP_GS_EX
  138. .pushsection .fixup, "ax"
  139. 99: movl $0, (%esp)
  140. jmp 98b
  141. .popsection
  142. _ASM_EXTABLE(98b,99b)
  143. .endm
  144. .macro PTGS_TO_GS
  145. 98: mov PT_GS(%esp), %gs
  146. .endm
  147. .macro PTGS_TO_GS_EX
  148. .pushsection .fixup, "ax"
  149. 99: movl $0, PT_GS(%esp)
  150. jmp 98b
  151. .popsection
  152. _ASM_EXTABLE(98b,99b)
  153. .endm
  154. .macro GS_TO_REG reg
  155. movl %gs, \reg
  156. /*CFI_REGISTER gs, \reg*/
  157. .endm
  158. .macro REG_TO_PTGS reg
  159. movl \reg, PT_GS(%esp)
  160. /*CFI_REL_OFFSET gs, PT_GS*/
  161. .endm
  162. .macro SET_KERNEL_GS reg
  163. movl $(__KERNEL_STACK_CANARY), \reg
  164. movl \reg, %gs
  165. .endm
  166. #endif /* CONFIG_X86_32_LAZY_GS */
  167. .macro SAVE_ALL
  168. cld
  169. PUSH_GS
  170. pushl_cfi %fs
  171. /*CFI_REL_OFFSET fs, 0;*/
  172. pushl_cfi %es
  173. /*CFI_REL_OFFSET es, 0;*/
  174. pushl_cfi %ds
  175. /*CFI_REL_OFFSET ds, 0;*/
  176. pushl_cfi %eax
  177. CFI_REL_OFFSET eax, 0
  178. pushl_cfi %ebp
  179. CFI_REL_OFFSET ebp, 0
  180. pushl_cfi %edi
  181. CFI_REL_OFFSET edi, 0
  182. pushl_cfi %esi
  183. CFI_REL_OFFSET esi, 0
  184. pushl_cfi %edx
  185. CFI_REL_OFFSET edx, 0
  186. pushl_cfi %ecx
  187. CFI_REL_OFFSET ecx, 0
  188. pushl_cfi %ebx
  189. CFI_REL_OFFSET ebx, 0
  190. movl $(__USER_DS), %edx
  191. movl %edx, %ds
  192. movl %edx, %es
  193. movl $(__KERNEL_PERCPU), %edx
  194. movl %edx, %fs
  195. SET_KERNEL_GS %edx
  196. .endm
  197. .macro RESTORE_INT_REGS
  198. popl_cfi %ebx
  199. CFI_RESTORE ebx
  200. popl_cfi %ecx
  201. CFI_RESTORE ecx
  202. popl_cfi %edx
  203. CFI_RESTORE edx
  204. popl_cfi %esi
  205. CFI_RESTORE esi
  206. popl_cfi %edi
  207. CFI_RESTORE edi
  208. popl_cfi %ebp
  209. CFI_RESTORE ebp
  210. popl_cfi %eax
  211. CFI_RESTORE eax
  212. .endm
  213. .macro RESTORE_REGS pop=0
  214. RESTORE_INT_REGS
  215. 1: popl_cfi %ds
  216. /*CFI_RESTORE ds;*/
  217. 2: popl_cfi %es
  218. /*CFI_RESTORE es;*/
  219. 3: popl_cfi %fs
  220. /*CFI_RESTORE fs;*/
  221. POP_GS \pop
  222. .pushsection .fixup, "ax"
  223. 4: movl $0, (%esp)
  224. jmp 1b
  225. 5: movl $0, (%esp)
  226. jmp 2b
  227. 6: movl $0, (%esp)
  228. jmp 3b
  229. .popsection
  230. _ASM_EXTABLE(1b,4b)
  231. _ASM_EXTABLE(2b,5b)
  232. _ASM_EXTABLE(3b,6b)
  233. POP_GS_EX
  234. .endm
  235. .macro RING0_INT_FRAME
  236. CFI_STARTPROC simple
  237. CFI_SIGNAL_FRAME
  238. CFI_DEF_CFA esp, 3*4
  239. /*CFI_OFFSET cs, -2*4;*/
  240. CFI_OFFSET eip, -3*4
  241. .endm
  242. .macro RING0_EC_FRAME
  243. CFI_STARTPROC simple
  244. CFI_SIGNAL_FRAME
  245. CFI_DEF_CFA esp, 4*4
  246. /*CFI_OFFSET cs, -2*4;*/
  247. CFI_OFFSET eip, -3*4
  248. .endm
  249. .macro RING0_PTREGS_FRAME
  250. CFI_STARTPROC simple
  251. CFI_SIGNAL_FRAME
  252. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
  253. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
  254. CFI_OFFSET eip, PT_EIP-PT_OLDESP
  255. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
  256. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
  257. CFI_OFFSET eax, PT_EAX-PT_OLDESP
  258. CFI_OFFSET ebp, PT_EBP-PT_OLDESP
  259. CFI_OFFSET edi, PT_EDI-PT_OLDESP
  260. CFI_OFFSET esi, PT_ESI-PT_OLDESP
  261. CFI_OFFSET edx, PT_EDX-PT_OLDESP
  262. CFI_OFFSET ecx, PT_ECX-PT_OLDESP
  263. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  264. .endm
  265. ENTRY(ret_from_fork)
  266. CFI_STARTPROC
  267. pushl_cfi %eax
  268. call schedule_tail
  269. GET_THREAD_INFO(%ebp)
  270. popl_cfi %eax
  271. pushl_cfi $0x0202 # Reset kernel eflags
  272. popfl_cfi
  273. jmp syscall_exit
  274. CFI_ENDPROC
  275. END(ret_from_fork)
  276. /*
  277. * Interrupt exit functions should be protected against kprobes
  278. */
  279. .pushsection .kprobes.text, "ax"
  280. /*
  281. * Return to user mode is not as complex as all this looks,
  282. * but we want the default path for a system call return to
  283. * go as quickly as possible which is why some of this is
  284. * less clear than it otherwise should be.
  285. */
  286. # userspace resumption stub bypassing syscall exit tracing
  287. ALIGN
  288. RING0_PTREGS_FRAME
  289. ret_from_exception:
  290. preempt_stop(CLBR_ANY)
  291. ret_from_intr:
  292. GET_THREAD_INFO(%ebp)
  293. #ifdef CONFIG_VM86
  294. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  295. movb PT_CS(%esp), %al
  296. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  297. #else
  298. /*
  299. * We can be coming here from a syscall done in the kernel space,
  300. * e.g. a failed kernel_execve().
  301. */
  302. movl PT_CS(%esp), %eax
  303. andl $SEGMENT_RPL_MASK, %eax
  304. #endif
  305. cmpl $USER_RPL, %eax
  306. jb resume_kernel # not returning to v8086 or userspace
  307. ENTRY(resume_userspace)
  308. LOCKDEP_SYS_EXIT
  309. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  310. # setting need_resched or sigpending
  311. # between sampling and the iret
  312. TRACE_IRQS_OFF
  313. movl TI_flags(%ebp), %ecx
  314. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  315. # int/exception return?
  316. jne work_pending
  317. jmp restore_all
  318. END(ret_from_exception)
  319. #ifdef CONFIG_PREEMPT
  320. ENTRY(resume_kernel)
  321. DISABLE_INTERRUPTS(CLBR_ANY)
  322. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  323. jnz restore_all
  324. need_resched:
  325. movl TI_flags(%ebp), %ecx # need_resched set ?
  326. testb $_TIF_NEED_RESCHED, %cl
  327. jz restore_all
  328. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  329. jz restore_all
  330. call preempt_schedule_irq
  331. jmp need_resched
  332. END(resume_kernel)
  333. #endif
  334. CFI_ENDPROC
  335. /*
  336. * End of kprobes section
  337. */
  338. .popsection
  339. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  340. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  341. # sysenter call handler stub
  342. ENTRY(ia32_sysenter_target)
  343. CFI_STARTPROC simple
  344. CFI_SIGNAL_FRAME
  345. CFI_DEF_CFA esp, 0
  346. CFI_REGISTER esp, ebp
  347. movl TSS_sysenter_sp0(%esp),%esp
  348. sysenter_past_esp:
  349. /*
  350. * Interrupts are disabled here, but we can't trace it until
  351. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  352. * we immediately enable interrupts at that point anyway.
  353. */
  354. pushl_cfi $__USER_DS
  355. /*CFI_REL_OFFSET ss, 0*/
  356. pushl_cfi %ebp
  357. CFI_REL_OFFSET esp, 0
  358. pushfl_cfi
  359. orl $X86_EFLAGS_IF, (%esp)
  360. pushl_cfi $__USER_CS
  361. /*CFI_REL_OFFSET cs, 0*/
  362. /*
  363. * Push current_thread_info()->sysenter_return to the stack.
  364. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  365. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  366. */
  367. pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
  368. CFI_REL_OFFSET eip, 0
  369. pushl_cfi %eax
  370. SAVE_ALL
  371. ENABLE_INTERRUPTS(CLBR_NONE)
  372. /*
  373. * Load the potential sixth argument from user stack.
  374. * Careful about security.
  375. */
  376. cmpl $__PAGE_OFFSET-3,%ebp
  377. jae syscall_fault
  378. 1: movl (%ebp),%ebp
  379. movl %ebp,PT_EBP(%esp)
  380. _ASM_EXTABLE(1b,syscall_fault)
  381. GET_THREAD_INFO(%ebp)
  382. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  383. jnz sysenter_audit
  384. sysenter_do_call:
  385. cmpl $(NR_syscalls), %eax
  386. jae syscall_badsys
  387. call *sys_call_table(,%eax,4)
  388. movl %eax,PT_EAX(%esp)
  389. LOCKDEP_SYS_EXIT
  390. DISABLE_INTERRUPTS(CLBR_ANY)
  391. TRACE_IRQS_OFF
  392. movl TI_flags(%ebp), %ecx
  393. testl $_TIF_ALLWORK_MASK, %ecx
  394. jne sysexit_audit
  395. sysenter_exit:
  396. /* if something modifies registers it must also disable sysexit */
  397. movl PT_EIP(%esp), %edx
  398. movl PT_OLDESP(%esp), %ecx
  399. xorl %ebp,%ebp
  400. TRACE_IRQS_ON
  401. 1: mov PT_FS(%esp), %fs
  402. PTGS_TO_GS
  403. ENABLE_INTERRUPTS_SYSEXIT
  404. #ifdef CONFIG_AUDITSYSCALL
  405. sysenter_audit:
  406. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  407. jnz syscall_trace_entry
  408. addl $4,%esp
  409. CFI_ADJUST_CFA_OFFSET -4
  410. /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
  411. /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
  412. /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
  413. movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
  414. movl %eax,%edx /* 2nd arg: syscall number */
  415. movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
  416. call __audit_syscall_entry
  417. pushl_cfi %ebx
  418. movl PT_EAX(%esp),%eax /* reload syscall number */
  419. jmp sysenter_do_call
  420. sysexit_audit:
  421. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  422. jne syscall_exit_work
  423. TRACE_IRQS_ON
  424. ENABLE_INTERRUPTS(CLBR_ANY)
  425. movl %eax,%edx /* second arg, syscall return value */
  426. cmpl $-MAX_ERRNO,%eax /* is it an error ? */
  427. setbe %al /* 1 if so, 0 if not */
  428. movzbl %al,%eax /* zero-extend that */
  429. call __audit_syscall_exit
  430. DISABLE_INTERRUPTS(CLBR_ANY)
  431. TRACE_IRQS_OFF
  432. movl TI_flags(%ebp), %ecx
  433. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  434. jne syscall_exit_work
  435. movl PT_EAX(%esp),%eax /* reload syscall return value */
  436. jmp sysenter_exit
  437. #endif
  438. CFI_ENDPROC
  439. .pushsection .fixup,"ax"
  440. 2: movl $0,PT_FS(%esp)
  441. jmp 1b
  442. .popsection
  443. _ASM_EXTABLE(1b,2b)
  444. PTGS_TO_GS_EX
  445. ENDPROC(ia32_sysenter_target)
  446. /*
  447. * syscall stub including irq exit should be protected against kprobes
  448. */
  449. .pushsection .kprobes.text, "ax"
  450. # system call handler stub
  451. ENTRY(system_call)
  452. RING0_INT_FRAME # can't unwind into user space anyway
  453. pushl_cfi %eax # save orig_eax
  454. SAVE_ALL
  455. GET_THREAD_INFO(%ebp)
  456. # system call tracing in operation / emulation
  457. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  458. jnz syscall_trace_entry
  459. cmpl $(NR_syscalls), %eax
  460. jae syscall_badsys
  461. syscall_call:
  462. call *sys_call_table(,%eax,4)
  463. movl %eax,PT_EAX(%esp) # store the return value
  464. syscall_exit:
  465. LOCKDEP_SYS_EXIT
  466. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  467. # setting need_resched or sigpending
  468. # between sampling and the iret
  469. TRACE_IRQS_OFF
  470. movl TI_flags(%ebp), %ecx
  471. testl $_TIF_ALLWORK_MASK, %ecx # current->work
  472. jne syscall_exit_work
  473. restore_all:
  474. TRACE_IRQS_IRET
  475. restore_all_notrace:
  476. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  477. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  478. # are returning to the kernel.
  479. # See comments in process.c:copy_thread() for details.
  480. movb PT_OLDSS(%esp), %ah
  481. movb PT_CS(%esp), %al
  482. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  483. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  484. CFI_REMEMBER_STATE
  485. je ldt_ss # returning to user-space with LDT SS
  486. restore_nocheck:
  487. RESTORE_REGS 4 # skip orig_eax/error_code
  488. irq_return:
  489. INTERRUPT_RETURN
  490. .section .fixup,"ax"
  491. ENTRY(iret_exc)
  492. pushl $0 # no error code
  493. pushl $do_iret_error
  494. jmp error_code
  495. .previous
  496. _ASM_EXTABLE(irq_return,iret_exc)
  497. CFI_RESTORE_STATE
  498. ldt_ss:
  499. larl PT_OLDSS(%esp), %eax
  500. jnz restore_nocheck
  501. testl $0x00400000, %eax # returning to 32bit stack?
  502. jnz restore_nocheck # allright, normal return
  503. #ifdef CONFIG_PARAVIRT
  504. /*
  505. * The kernel can't run on a non-flat stack if paravirt mode
  506. * is active. Rather than try to fixup the high bits of
  507. * ESP, bypass this code entirely. This may break DOSemu
  508. * and/or Wine support in a paravirt VM, although the option
  509. * is still available to implement the setting of the high
  510. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  511. */
  512. cmpl $0, pv_info+PARAVIRT_enabled
  513. jne restore_nocheck
  514. #endif
  515. /*
  516. * Setup and switch to ESPFIX stack
  517. *
  518. * We're returning to userspace with a 16 bit stack. The CPU will not
  519. * restore the high word of ESP for us on executing iret... This is an
  520. * "official" bug of all the x86-compatible CPUs, which we can work
  521. * around to make dosemu and wine happy. We do this by preloading the
  522. * high word of ESP with the high word of the userspace ESP while
  523. * compensating for the offset by changing to the ESPFIX segment with
  524. * a base address that matches for the difference.
  525. */
  526. #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
  527. mov %esp, %edx /* load kernel esp */
  528. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  529. mov %dx, %ax /* eax: new kernel esp */
  530. sub %eax, %edx /* offset (low word is 0) */
  531. shr $16, %edx
  532. mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
  533. mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
  534. pushl_cfi $__ESPFIX_SS
  535. pushl_cfi %eax /* new kernel esp */
  536. /* Disable interrupts, but do not irqtrace this section: we
  537. * will soon execute iret and the tracer was already set to
  538. * the irqstate after the iret */
  539. DISABLE_INTERRUPTS(CLBR_EAX)
  540. lss (%esp), %esp /* switch to espfix segment */
  541. CFI_ADJUST_CFA_OFFSET -8
  542. jmp restore_nocheck
  543. CFI_ENDPROC
  544. ENDPROC(system_call)
  545. # perform work that needs to be done immediately before resumption
  546. ALIGN
  547. RING0_PTREGS_FRAME # can't unwind into user space anyway
  548. work_pending:
  549. testb $_TIF_NEED_RESCHED, %cl
  550. jz work_notifysig
  551. work_resched:
  552. call schedule
  553. LOCKDEP_SYS_EXIT
  554. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  555. # setting need_resched or sigpending
  556. # between sampling and the iret
  557. TRACE_IRQS_OFF
  558. movl TI_flags(%ebp), %ecx
  559. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  560. # than syscall tracing?
  561. jz restore_all
  562. testb $_TIF_NEED_RESCHED, %cl
  563. jnz work_resched
  564. work_notifysig: # deal with pending signals and
  565. # notify-resume requests
  566. #ifdef CONFIG_VM86
  567. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  568. movl %esp, %eax
  569. jne work_notifysig_v86 # returning to kernel-space or
  570. # vm86-space
  571. TRACE_IRQS_ON
  572. ENABLE_INTERRUPTS(CLBR_NONE)
  573. movb PT_CS(%esp), %bl
  574. andb $SEGMENT_RPL_MASK, %bl
  575. cmpb $USER_RPL, %bl
  576. jb resume_kernel
  577. xorl %edx, %edx
  578. call do_notify_resume
  579. jmp resume_userspace
  580. ALIGN
  581. work_notifysig_v86:
  582. pushl_cfi %ecx # save ti_flags for do_notify_resume
  583. call save_v86_state # %eax contains pt_regs pointer
  584. popl_cfi %ecx
  585. movl %eax, %esp
  586. #else
  587. movl %esp, %eax
  588. #endif
  589. TRACE_IRQS_ON
  590. ENABLE_INTERRUPTS(CLBR_NONE)
  591. movb PT_CS(%esp), %bl
  592. andb $SEGMENT_RPL_MASK, %bl
  593. cmpb $USER_RPL, %bl
  594. jb resume_kernel
  595. xorl %edx, %edx
  596. call do_notify_resume
  597. jmp resume_userspace
  598. END(work_pending)
  599. # perform syscall exit tracing
  600. ALIGN
  601. syscall_trace_entry:
  602. movl $-ENOSYS,PT_EAX(%esp)
  603. movl %esp, %eax
  604. call syscall_trace_enter
  605. /* What it returned is what we'll actually use. */
  606. cmpl $(NR_syscalls), %eax
  607. jnae syscall_call
  608. jmp syscall_exit
  609. END(syscall_trace_entry)
  610. # perform syscall exit tracing
  611. ALIGN
  612. syscall_exit_work:
  613. testl $_TIF_WORK_SYSCALL_EXIT, %ecx
  614. jz work_pending
  615. TRACE_IRQS_ON
  616. ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
  617. # schedule() instead
  618. movl %esp, %eax
  619. call syscall_trace_leave
  620. jmp resume_userspace
  621. END(syscall_exit_work)
  622. CFI_ENDPROC
  623. RING0_INT_FRAME # can't unwind into user space anyway
  624. syscall_fault:
  625. GET_THREAD_INFO(%ebp)
  626. movl $-EFAULT,PT_EAX(%esp)
  627. jmp resume_userspace
  628. END(syscall_fault)
  629. syscall_badsys:
  630. movl $-ENOSYS,PT_EAX(%esp)
  631. jmp resume_userspace
  632. END(syscall_badsys)
  633. CFI_ENDPROC
  634. /*
  635. * End of kprobes section
  636. */
  637. .popsection
  638. /*
  639. * System calls that need a pt_regs pointer.
  640. */
  641. #define PTREGSCALL0(name) \
  642. ENTRY(ptregs_##name) ; \
  643. leal 4(%esp),%eax; \
  644. jmp sys_##name; \
  645. ENDPROC(ptregs_##name)
  646. #define PTREGSCALL1(name) \
  647. ENTRY(ptregs_##name) ; \
  648. leal 4(%esp),%edx; \
  649. movl (PT_EBX+4)(%esp),%eax; \
  650. jmp sys_##name; \
  651. ENDPROC(ptregs_##name)
  652. #define PTREGSCALL2(name) \
  653. ENTRY(ptregs_##name) ; \
  654. leal 4(%esp),%ecx; \
  655. movl (PT_ECX+4)(%esp),%edx; \
  656. movl (PT_EBX+4)(%esp),%eax; \
  657. jmp sys_##name; \
  658. ENDPROC(ptregs_##name)
  659. #define PTREGSCALL3(name) \
  660. ENTRY(ptregs_##name) ; \
  661. CFI_STARTPROC; \
  662. leal 4(%esp),%eax; \
  663. pushl_cfi %eax; \
  664. movl PT_EDX(%eax),%ecx; \
  665. movl PT_ECX(%eax),%edx; \
  666. movl PT_EBX(%eax),%eax; \
  667. call sys_##name; \
  668. addl $4,%esp; \
  669. CFI_ADJUST_CFA_OFFSET -4; \
  670. ret; \
  671. CFI_ENDPROC; \
  672. ENDPROC(ptregs_##name)
  673. PTREGSCALL1(iopl)
  674. PTREGSCALL0(fork)
  675. PTREGSCALL0(vfork)
  676. PTREGSCALL3(execve)
  677. PTREGSCALL2(sigaltstack)
  678. PTREGSCALL0(sigreturn)
  679. PTREGSCALL0(rt_sigreturn)
  680. PTREGSCALL2(vm86)
  681. PTREGSCALL1(vm86old)
  682. /* Clone is an oddball. The 4th arg is in %edi */
  683. ENTRY(ptregs_clone)
  684. CFI_STARTPROC
  685. leal 4(%esp),%eax
  686. pushl_cfi %eax
  687. pushl_cfi PT_EDI(%eax)
  688. movl PT_EDX(%eax),%ecx
  689. movl PT_ECX(%eax),%edx
  690. movl PT_EBX(%eax),%eax
  691. call sys_clone
  692. addl $8,%esp
  693. CFI_ADJUST_CFA_OFFSET -8
  694. ret
  695. CFI_ENDPROC
  696. ENDPROC(ptregs_clone)
  697. .macro FIXUP_ESPFIX_STACK
  698. /*
  699. * Switch back for ESPFIX stack to the normal zerobased stack
  700. *
  701. * We can't call C functions using the ESPFIX stack. This code reads
  702. * the high word of the segment base from the GDT and swiches to the
  703. * normal stack and adjusts ESP with the matching offset.
  704. */
  705. /* fixup the stack */
  706. mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
  707. mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
  708. shl $16, %eax
  709. addl %esp, %eax /* the adjusted stack pointer */
  710. pushl_cfi $__KERNEL_DS
  711. pushl_cfi %eax
  712. lss (%esp), %esp /* switch to the normal stack segment */
  713. CFI_ADJUST_CFA_OFFSET -8
  714. .endm
  715. .macro UNWIND_ESPFIX_STACK
  716. movl %ss, %eax
  717. /* see if on espfix stack */
  718. cmpw $__ESPFIX_SS, %ax
  719. jne 27f
  720. movl $__KERNEL_DS, %eax
  721. movl %eax, %ds
  722. movl %eax, %es
  723. /* switch to normal stack */
  724. FIXUP_ESPFIX_STACK
  725. 27:
  726. .endm
  727. /*
  728. * Build the entry stubs and pointer table with some assembler magic.
  729. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  730. * single cache line on all modern x86 implementations.
  731. */
  732. .section .init.rodata,"a"
  733. ENTRY(interrupt)
  734. .section .entry.text, "ax"
  735. .p2align 5
  736. .p2align CONFIG_X86_L1_CACHE_SHIFT
  737. ENTRY(irq_entries_start)
  738. RING0_INT_FRAME
  739. vector=FIRST_EXTERNAL_VECTOR
  740. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  741. .balign 32
  742. .rept 7
  743. .if vector < NR_VECTORS
  744. .if vector <> FIRST_EXTERNAL_VECTOR
  745. CFI_ADJUST_CFA_OFFSET -4
  746. .endif
  747. 1: pushl_cfi $(~vector+0x80) /* Note: always in signed byte range */
  748. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  749. jmp 2f
  750. .endif
  751. .previous
  752. .long 1b
  753. .section .entry.text, "ax"
  754. vector=vector+1
  755. .endif
  756. .endr
  757. 2: jmp common_interrupt
  758. .endr
  759. END(irq_entries_start)
  760. .previous
  761. END(interrupt)
  762. .previous
  763. /*
  764. * the CPU automatically disables interrupts when executing an IRQ vector,
  765. * so IRQ-flags tracing has to follow that:
  766. */
  767. .p2align CONFIG_X86_L1_CACHE_SHIFT
  768. common_interrupt:
  769. addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
  770. SAVE_ALL
  771. TRACE_IRQS_OFF
  772. movl %esp,%eax
  773. call do_IRQ
  774. jmp ret_from_intr
  775. ENDPROC(common_interrupt)
  776. CFI_ENDPROC
  777. /*
  778. * Irq entries should be protected against kprobes
  779. */
  780. .pushsection .kprobes.text, "ax"
  781. #define BUILD_INTERRUPT3(name, nr, fn) \
  782. ENTRY(name) \
  783. RING0_INT_FRAME; \
  784. pushl_cfi $~(nr); \
  785. SAVE_ALL; \
  786. TRACE_IRQS_OFF \
  787. movl %esp,%eax; \
  788. call fn; \
  789. jmp ret_from_intr; \
  790. CFI_ENDPROC; \
  791. ENDPROC(name)
  792. #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
  793. /* The include is where all of the SMP etc. interrupts come from */
  794. #include <asm/entry_arch.h>
  795. ENTRY(coprocessor_error)
  796. RING0_INT_FRAME
  797. pushl_cfi $0
  798. pushl_cfi $do_coprocessor_error
  799. jmp error_code
  800. CFI_ENDPROC
  801. END(coprocessor_error)
  802. ENTRY(simd_coprocessor_error)
  803. RING0_INT_FRAME
  804. pushl_cfi $0
  805. #ifdef CONFIG_X86_INVD_BUG
  806. /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
  807. 661: pushl_cfi $do_general_protection
  808. 662:
  809. .section .altinstructions,"a"
  810. altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
  811. .previous
  812. .section .altinstr_replacement,"ax"
  813. 663: pushl $do_simd_coprocessor_error
  814. 664:
  815. .previous
  816. #else
  817. pushl_cfi $do_simd_coprocessor_error
  818. #endif
  819. jmp error_code
  820. CFI_ENDPROC
  821. END(simd_coprocessor_error)
  822. ENTRY(device_not_available)
  823. RING0_INT_FRAME
  824. pushl_cfi $-1 # mark this as an int
  825. pushl_cfi $do_device_not_available
  826. jmp error_code
  827. CFI_ENDPROC
  828. END(device_not_available)
  829. #ifdef CONFIG_PARAVIRT
  830. ENTRY(native_iret)
  831. iret
  832. _ASM_EXTABLE(native_iret, iret_exc)
  833. END(native_iret)
  834. ENTRY(native_irq_enable_sysexit)
  835. sti
  836. sysexit
  837. END(native_irq_enable_sysexit)
  838. #endif
  839. ENTRY(overflow)
  840. RING0_INT_FRAME
  841. pushl_cfi $0
  842. pushl_cfi $do_overflow
  843. jmp error_code
  844. CFI_ENDPROC
  845. END(overflow)
  846. ENTRY(bounds)
  847. RING0_INT_FRAME
  848. pushl_cfi $0
  849. pushl_cfi $do_bounds
  850. jmp error_code
  851. CFI_ENDPROC
  852. END(bounds)
  853. ENTRY(invalid_op)
  854. RING0_INT_FRAME
  855. pushl_cfi $0
  856. pushl_cfi $do_invalid_op
  857. jmp error_code
  858. CFI_ENDPROC
  859. END(invalid_op)
  860. ENTRY(coprocessor_segment_overrun)
  861. RING0_INT_FRAME
  862. pushl_cfi $0
  863. pushl_cfi $do_coprocessor_segment_overrun
  864. jmp error_code
  865. CFI_ENDPROC
  866. END(coprocessor_segment_overrun)
  867. ENTRY(invalid_TSS)
  868. RING0_EC_FRAME
  869. pushl_cfi $do_invalid_TSS
  870. jmp error_code
  871. CFI_ENDPROC
  872. END(invalid_TSS)
  873. ENTRY(segment_not_present)
  874. RING0_EC_FRAME
  875. pushl_cfi $do_segment_not_present
  876. jmp error_code
  877. CFI_ENDPROC
  878. END(segment_not_present)
  879. ENTRY(stack_segment)
  880. RING0_EC_FRAME
  881. pushl_cfi $do_stack_segment
  882. jmp error_code
  883. CFI_ENDPROC
  884. END(stack_segment)
  885. ENTRY(alignment_check)
  886. RING0_EC_FRAME
  887. pushl_cfi $do_alignment_check
  888. jmp error_code
  889. CFI_ENDPROC
  890. END(alignment_check)
  891. ENTRY(divide_error)
  892. RING0_INT_FRAME
  893. pushl_cfi $0 # no error code
  894. pushl_cfi $do_divide_error
  895. jmp error_code
  896. CFI_ENDPROC
  897. END(divide_error)
  898. #ifdef CONFIG_X86_MCE
  899. ENTRY(machine_check)
  900. RING0_INT_FRAME
  901. pushl_cfi $0
  902. pushl_cfi machine_check_vector
  903. jmp error_code
  904. CFI_ENDPROC
  905. END(machine_check)
  906. #endif
  907. ENTRY(spurious_interrupt_bug)
  908. RING0_INT_FRAME
  909. pushl_cfi $0
  910. pushl_cfi $do_spurious_interrupt_bug
  911. jmp error_code
  912. CFI_ENDPROC
  913. END(spurious_interrupt_bug)
  914. /*
  915. * End of kprobes section
  916. */
  917. .popsection
  918. ENTRY(kernel_thread_helper)
  919. pushl $0 # fake return address for unwinder
  920. CFI_STARTPROC
  921. movl %edi,%eax
  922. call *%esi
  923. call do_exit
  924. ud2 # padding for call trace
  925. CFI_ENDPROC
  926. ENDPROC(kernel_thread_helper)
  927. #ifdef CONFIG_XEN
  928. /* Xen doesn't set %esp to be precisely what the normal sysenter
  929. entrypoint expects, so fix it up before using the normal path. */
  930. ENTRY(xen_sysenter_target)
  931. RING0_INT_FRAME
  932. addl $5*4, %esp /* remove xen-provided frame */
  933. CFI_ADJUST_CFA_OFFSET -5*4
  934. jmp sysenter_past_esp
  935. CFI_ENDPROC
  936. ENTRY(xen_hypervisor_callback)
  937. CFI_STARTPROC
  938. pushl_cfi $0
  939. SAVE_ALL
  940. TRACE_IRQS_OFF
  941. /* Check to see if we got the event in the critical
  942. region in xen_iret_direct, after we've reenabled
  943. events and checked for pending events. This simulates
  944. iret instruction's behaviour where it delivers a
  945. pending interrupt when enabling interrupts. */
  946. movl PT_EIP(%esp),%eax
  947. cmpl $xen_iret_start_crit,%eax
  948. jb 1f
  949. cmpl $xen_iret_end_crit,%eax
  950. jae 1f
  951. jmp xen_iret_crit_fixup
  952. ENTRY(xen_do_upcall)
  953. 1: mov %esp, %eax
  954. call xen_evtchn_do_upcall
  955. jmp ret_from_intr
  956. CFI_ENDPROC
  957. ENDPROC(xen_hypervisor_callback)
  958. # Hypervisor uses this for application faults while it executes.
  959. # We get here for two reasons:
  960. # 1. Fault while reloading DS, ES, FS or GS
  961. # 2. Fault while executing IRET
  962. # Category 1 we fix up by reattempting the load, and zeroing the segment
  963. # register if the load fails.
  964. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  965. # normal Linux return path in this case because if we use the IRET hypercall
  966. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  967. # We distinguish between categories by maintaining a status value in EAX.
  968. ENTRY(xen_failsafe_callback)
  969. CFI_STARTPROC
  970. pushl_cfi %eax
  971. movl $1,%eax
  972. 1: mov 4(%esp),%ds
  973. 2: mov 8(%esp),%es
  974. 3: mov 12(%esp),%fs
  975. 4: mov 16(%esp),%gs
  976. testl %eax,%eax
  977. popl_cfi %eax
  978. lea 16(%esp),%esp
  979. CFI_ADJUST_CFA_OFFSET -16
  980. jz 5f
  981. addl $16,%esp
  982. jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
  983. 5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment)
  984. SAVE_ALL
  985. jmp ret_from_exception
  986. CFI_ENDPROC
  987. .section .fixup,"ax"
  988. 6: xorl %eax,%eax
  989. movl %eax,4(%esp)
  990. jmp 1b
  991. 7: xorl %eax,%eax
  992. movl %eax,8(%esp)
  993. jmp 2b
  994. 8: xorl %eax,%eax
  995. movl %eax,12(%esp)
  996. jmp 3b
  997. 9: xorl %eax,%eax
  998. movl %eax,16(%esp)
  999. jmp 4b
  1000. .previous
  1001. _ASM_EXTABLE(1b,6b)
  1002. _ASM_EXTABLE(2b,7b)
  1003. _ASM_EXTABLE(3b,8b)
  1004. _ASM_EXTABLE(4b,9b)
  1005. ENDPROC(xen_failsafe_callback)
  1006. BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
  1007. xen_evtchn_do_upcall)
  1008. #endif /* CONFIG_XEN */
  1009. #ifdef CONFIG_FUNCTION_TRACER
  1010. #ifdef CONFIG_DYNAMIC_FTRACE
  1011. ENTRY(mcount)
  1012. ret
  1013. END(mcount)
  1014. ENTRY(ftrace_caller)
  1015. cmpl $0, function_trace_stop
  1016. jne ftrace_stub
  1017. pushl %eax
  1018. pushl %ecx
  1019. pushl %edx
  1020. movl 0xc(%esp), %eax
  1021. movl 0x4(%ebp), %edx
  1022. subl $MCOUNT_INSN_SIZE, %eax
  1023. .globl ftrace_call
  1024. ftrace_call:
  1025. call ftrace_stub
  1026. popl %edx
  1027. popl %ecx
  1028. popl %eax
  1029. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1030. .globl ftrace_graph_call
  1031. ftrace_graph_call:
  1032. jmp ftrace_stub
  1033. #endif
  1034. .globl ftrace_stub
  1035. ftrace_stub:
  1036. ret
  1037. END(ftrace_caller)
  1038. #else /* ! CONFIG_DYNAMIC_FTRACE */
  1039. ENTRY(mcount)
  1040. cmpl $0, function_trace_stop
  1041. jne ftrace_stub
  1042. cmpl $ftrace_stub, ftrace_trace_function
  1043. jnz trace
  1044. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1045. cmpl $ftrace_stub, ftrace_graph_return
  1046. jnz ftrace_graph_caller
  1047. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  1048. jnz ftrace_graph_caller
  1049. #endif
  1050. .globl ftrace_stub
  1051. ftrace_stub:
  1052. ret
  1053. /* taken from glibc */
  1054. trace:
  1055. pushl %eax
  1056. pushl %ecx
  1057. pushl %edx
  1058. movl 0xc(%esp), %eax
  1059. movl 0x4(%ebp), %edx
  1060. subl $MCOUNT_INSN_SIZE, %eax
  1061. call *ftrace_trace_function
  1062. popl %edx
  1063. popl %ecx
  1064. popl %eax
  1065. jmp ftrace_stub
  1066. END(mcount)
  1067. #endif /* CONFIG_DYNAMIC_FTRACE */
  1068. #endif /* CONFIG_FUNCTION_TRACER */
  1069. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1070. ENTRY(ftrace_graph_caller)
  1071. cmpl $0, function_trace_stop
  1072. jne ftrace_stub
  1073. pushl %eax
  1074. pushl %ecx
  1075. pushl %edx
  1076. movl 0xc(%esp), %edx
  1077. lea 0x4(%ebp), %eax
  1078. movl (%ebp), %ecx
  1079. subl $MCOUNT_INSN_SIZE, %edx
  1080. call prepare_ftrace_return
  1081. popl %edx
  1082. popl %ecx
  1083. popl %eax
  1084. ret
  1085. END(ftrace_graph_caller)
  1086. .globl return_to_handler
  1087. return_to_handler:
  1088. pushl %eax
  1089. pushl %edx
  1090. movl %ebp, %eax
  1091. call ftrace_return_to_handler
  1092. movl %eax, %ecx
  1093. popl %edx
  1094. popl %eax
  1095. jmp *%ecx
  1096. #endif
  1097. /*
  1098. * Some functions should be protected against kprobes
  1099. */
  1100. .pushsection .kprobes.text, "ax"
  1101. ENTRY(page_fault)
  1102. RING0_EC_FRAME
  1103. pushl_cfi $do_page_fault
  1104. ALIGN
  1105. error_code:
  1106. /* the function address is in %gs's slot on the stack */
  1107. pushl_cfi %fs
  1108. /*CFI_REL_OFFSET fs, 0*/
  1109. pushl_cfi %es
  1110. /*CFI_REL_OFFSET es, 0*/
  1111. pushl_cfi %ds
  1112. /*CFI_REL_OFFSET ds, 0*/
  1113. pushl_cfi %eax
  1114. CFI_REL_OFFSET eax, 0
  1115. pushl_cfi %ebp
  1116. CFI_REL_OFFSET ebp, 0
  1117. pushl_cfi %edi
  1118. CFI_REL_OFFSET edi, 0
  1119. pushl_cfi %esi
  1120. CFI_REL_OFFSET esi, 0
  1121. pushl_cfi %edx
  1122. CFI_REL_OFFSET edx, 0
  1123. pushl_cfi %ecx
  1124. CFI_REL_OFFSET ecx, 0
  1125. pushl_cfi %ebx
  1126. CFI_REL_OFFSET ebx, 0
  1127. cld
  1128. movl $(__KERNEL_PERCPU), %ecx
  1129. movl %ecx, %fs
  1130. UNWIND_ESPFIX_STACK
  1131. GS_TO_REG %ecx
  1132. movl PT_GS(%esp), %edi # get the function address
  1133. movl PT_ORIG_EAX(%esp), %edx # get the error code
  1134. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  1135. REG_TO_PTGS %ecx
  1136. SET_KERNEL_GS %ecx
  1137. movl $(__USER_DS), %ecx
  1138. movl %ecx, %ds
  1139. movl %ecx, %es
  1140. TRACE_IRQS_OFF
  1141. movl %esp,%eax # pt_regs pointer
  1142. call *%edi
  1143. jmp ret_from_exception
  1144. CFI_ENDPROC
  1145. END(page_fault)
  1146. /*
  1147. * Debug traps and NMI can happen at the one SYSENTER instruction
  1148. * that sets up the real kernel stack. Check here, since we can't
  1149. * allow the wrong stack to be used.
  1150. *
  1151. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  1152. * already pushed 3 words if it hits on the sysenter instruction:
  1153. * eflags, cs and eip.
  1154. *
  1155. * We just load the right stack, and push the three (known) values
  1156. * by hand onto the new stack - while updating the return eip past
  1157. * the instruction that would have done it for sysenter.
  1158. */
  1159. .macro FIX_STACK offset ok label
  1160. cmpw $__KERNEL_CS, 4(%esp)
  1161. jne \ok
  1162. \label:
  1163. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  1164. CFI_DEF_CFA esp, 0
  1165. CFI_UNDEFINED eip
  1166. pushfl_cfi
  1167. pushl_cfi $__KERNEL_CS
  1168. pushl_cfi $sysenter_past_esp
  1169. CFI_REL_OFFSET eip, 0
  1170. .endm
  1171. ENTRY(debug)
  1172. RING0_INT_FRAME
  1173. cmpl $ia32_sysenter_target,(%esp)
  1174. jne debug_stack_correct
  1175. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  1176. debug_stack_correct:
  1177. pushl_cfi $-1 # mark this as an int
  1178. SAVE_ALL
  1179. TRACE_IRQS_OFF
  1180. xorl %edx,%edx # error code 0
  1181. movl %esp,%eax # pt_regs pointer
  1182. call do_debug
  1183. jmp ret_from_exception
  1184. CFI_ENDPROC
  1185. END(debug)
  1186. /*
  1187. * NMI is doubly nasty. It can happen _while_ we're handling
  1188. * a debug fault, and the debug fault hasn't yet been able to
  1189. * clear up the stack. So we first check whether we got an
  1190. * NMI on the sysenter entry path, but after that we need to
  1191. * check whether we got an NMI on the debug path where the debug
  1192. * fault happened on the sysenter path.
  1193. */
  1194. ENTRY(nmi)
  1195. RING0_INT_FRAME
  1196. pushl_cfi %eax
  1197. movl %ss, %eax
  1198. cmpw $__ESPFIX_SS, %ax
  1199. popl_cfi %eax
  1200. je nmi_espfix_stack
  1201. cmpl $ia32_sysenter_target,(%esp)
  1202. je nmi_stack_fixup
  1203. pushl_cfi %eax
  1204. movl %esp,%eax
  1205. /* Do not access memory above the end of our stack page,
  1206. * it might not exist.
  1207. */
  1208. andl $(THREAD_SIZE-1),%eax
  1209. cmpl $(THREAD_SIZE-20),%eax
  1210. popl_cfi %eax
  1211. jae nmi_stack_correct
  1212. cmpl $ia32_sysenter_target,12(%esp)
  1213. je nmi_debug_stack_check
  1214. nmi_stack_correct:
  1215. /* We have a RING0_INT_FRAME here */
  1216. pushl_cfi %eax
  1217. SAVE_ALL
  1218. xorl %edx,%edx # zero error code
  1219. movl %esp,%eax # pt_regs pointer
  1220. call do_nmi
  1221. jmp restore_all_notrace
  1222. CFI_ENDPROC
  1223. nmi_stack_fixup:
  1224. RING0_INT_FRAME
  1225. FIX_STACK 12, nmi_stack_correct, 1
  1226. jmp nmi_stack_correct
  1227. nmi_debug_stack_check:
  1228. /* We have a RING0_INT_FRAME here */
  1229. cmpw $__KERNEL_CS,16(%esp)
  1230. jne nmi_stack_correct
  1231. cmpl $debug,(%esp)
  1232. jb nmi_stack_correct
  1233. cmpl $debug_esp_fix_insn,(%esp)
  1234. ja nmi_stack_correct
  1235. FIX_STACK 24, nmi_stack_correct, 1
  1236. jmp nmi_stack_correct
  1237. nmi_espfix_stack:
  1238. /* We have a RING0_INT_FRAME here.
  1239. *
  1240. * create the pointer to lss back
  1241. */
  1242. pushl_cfi %ss
  1243. pushl_cfi %esp
  1244. addl $4, (%esp)
  1245. /* copy the iret frame of 12 bytes */
  1246. .rept 3
  1247. pushl_cfi 16(%esp)
  1248. .endr
  1249. pushl_cfi %eax
  1250. SAVE_ALL
  1251. FIXUP_ESPFIX_STACK # %eax == %esp
  1252. xorl %edx,%edx # zero error code
  1253. call do_nmi
  1254. RESTORE_REGS
  1255. lss 12+4(%esp), %esp # back to espfix stack
  1256. CFI_ADJUST_CFA_OFFSET -24
  1257. jmp irq_return
  1258. CFI_ENDPROC
  1259. END(nmi)
  1260. ENTRY(int3)
  1261. RING0_INT_FRAME
  1262. pushl_cfi $-1 # mark this as an int
  1263. SAVE_ALL
  1264. TRACE_IRQS_OFF
  1265. xorl %edx,%edx # zero error code
  1266. movl %esp,%eax # pt_regs pointer
  1267. call do_int3
  1268. jmp ret_from_exception
  1269. CFI_ENDPROC
  1270. END(int3)
  1271. ENTRY(general_protection)
  1272. RING0_EC_FRAME
  1273. pushl_cfi $do_general_protection
  1274. jmp error_code
  1275. CFI_ENDPROC
  1276. END(general_protection)
  1277. #ifdef CONFIG_KVM_GUEST
  1278. ENTRY(async_page_fault)
  1279. RING0_EC_FRAME
  1280. pushl_cfi $do_async_page_fault
  1281. jmp error_code
  1282. CFI_ENDPROC
  1283. END(async_page_fault)
  1284. #endif
  1285. /*
  1286. * End of kprobes section
  1287. */
  1288. .popsection