entry_32.S 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  33. * 2C(%esp) - orig_eax
  34. * 30(%esp) - %eip
  35. * 34(%esp) - %cs
  36. * 38(%esp) - %eflags
  37. * 3C(%esp) - %oldesp
  38. * 40(%esp) - %oldss
  39. *
  40. * "current" is in register %ebx during any slow entries.
  41. */
  42. #include <linux/linkage.h>
  43. #include <asm/thread_info.h>
  44. #include <asm/irqflags.h>
  45. #include <asm/errno.h>
  46. #include <asm/segment.h>
  47. #include <asm/smp.h>
  48. #include <asm/page_types.h>
  49. #include <asm/percpu.h>
  50. #include <asm/dwarf2.h>
  51. #include <asm/processor-flags.h>
  52. #include <asm/ftrace.h>
  53. #include <asm/irq_vectors.h>
  54. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  55. #include <linux/elf-em.h>
  56. #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
  57. #define __AUDIT_ARCH_LE 0x40000000
  58. #ifndef CONFIG_AUDITSYSCALL
  59. #define sysenter_audit syscall_trace_entry
  60. #define sysexit_audit syscall_exit_work
  61. #endif
  62. /*
  63. * We use macros for low-level operations which need to be overridden
  64. * for paravirtualization. The following will never clobber any registers:
  65. * INTERRUPT_RETURN (aka. "iret")
  66. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  67. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  68. *
  69. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  70. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  71. * Allowing a register to be clobbered can shrink the paravirt replacement
  72. * enough to patch inline, increasing performance.
  73. */
  74. #define nr_syscalls ((syscall_table_size)/4)
  75. #ifdef CONFIG_PREEMPT
  76. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  77. #else
  78. #define preempt_stop(clobbers)
  79. #define resume_kernel restore_all
  80. #endif
  81. .macro TRACE_IRQS_IRET
  82. #ifdef CONFIG_TRACE_IRQFLAGS
  83. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  84. jz 1f
  85. TRACE_IRQS_ON
  86. 1:
  87. #endif
  88. .endm
  89. #ifdef CONFIG_VM86
  90. #define resume_userspace_sig check_userspace
  91. #else
  92. #define resume_userspace_sig resume_userspace
  93. #endif
  94. /*
  95. * User gs save/restore
  96. *
  97. * %gs is used for userland TLS and kernel only uses it for stack
  98. * canary which is required to be at %gs:20 by gcc. Read the comment
  99. * at the top of stackprotector.h for more info.
  100. *
  101. * Local labels 98 and 99 are used.
  102. */
  103. #ifdef CONFIG_X86_32_LAZY_GS
  104. /* unfortunately push/pop can't be no-op */
  105. .macro PUSH_GS
  106. pushl $0
  107. CFI_ADJUST_CFA_OFFSET 4
  108. .endm
  109. .macro POP_GS pop=0
  110. addl $(4 + \pop), %esp
  111. CFI_ADJUST_CFA_OFFSET -(4 + \pop)
  112. .endm
  113. .macro POP_GS_EX
  114. .endm
  115. /* all the rest are no-op */
  116. .macro PTGS_TO_GS
  117. .endm
  118. .macro PTGS_TO_GS_EX
  119. .endm
  120. .macro GS_TO_REG reg
  121. .endm
  122. .macro REG_TO_PTGS reg
  123. .endm
  124. .macro SET_KERNEL_GS reg
  125. .endm
  126. #else /* CONFIG_X86_32_LAZY_GS */
  127. .macro PUSH_GS
  128. pushl %gs
  129. CFI_ADJUST_CFA_OFFSET 4
  130. /*CFI_REL_OFFSET gs, 0*/
  131. .endm
  132. .macro POP_GS pop=0
  133. 98: popl %gs
  134. CFI_ADJUST_CFA_OFFSET -4
  135. /*CFI_RESTORE gs*/
  136. .if \pop <> 0
  137. add $\pop, %esp
  138. CFI_ADJUST_CFA_OFFSET -\pop
  139. .endif
  140. .endm
  141. .macro POP_GS_EX
  142. .pushsection .fixup, "ax"
  143. 99: movl $0, (%esp)
  144. jmp 98b
  145. .section __ex_table, "a"
  146. .align 4
  147. .long 98b, 99b
  148. .popsection
  149. .endm
  150. .macro PTGS_TO_GS
  151. 98: mov PT_GS(%esp), %gs
  152. .endm
  153. .macro PTGS_TO_GS_EX
  154. .pushsection .fixup, "ax"
  155. 99: movl $0, PT_GS(%esp)
  156. jmp 98b
  157. .section __ex_table, "a"
  158. .align 4
  159. .long 98b, 99b
  160. .popsection
  161. .endm
  162. .macro GS_TO_REG reg
  163. movl %gs, \reg
  164. /*CFI_REGISTER gs, \reg*/
  165. .endm
  166. .macro REG_TO_PTGS reg
  167. movl \reg, PT_GS(%esp)
  168. /*CFI_REL_OFFSET gs, PT_GS*/
  169. .endm
  170. .macro SET_KERNEL_GS reg
  171. movl $(__KERNEL_STACK_CANARY), \reg
  172. movl \reg, %gs
  173. .endm
  174. #endif /* CONFIG_X86_32_LAZY_GS */
  175. .macro SAVE_ALL
  176. cld
  177. PUSH_GS
  178. pushl %fs
  179. CFI_ADJUST_CFA_OFFSET 4
  180. /*CFI_REL_OFFSET fs, 0;*/
  181. pushl %es
  182. CFI_ADJUST_CFA_OFFSET 4
  183. /*CFI_REL_OFFSET es, 0;*/
  184. pushl %ds
  185. CFI_ADJUST_CFA_OFFSET 4
  186. /*CFI_REL_OFFSET ds, 0;*/
  187. pushl %eax
  188. CFI_ADJUST_CFA_OFFSET 4
  189. CFI_REL_OFFSET eax, 0
  190. pushl %ebp
  191. CFI_ADJUST_CFA_OFFSET 4
  192. CFI_REL_OFFSET ebp, 0
  193. pushl %edi
  194. CFI_ADJUST_CFA_OFFSET 4
  195. CFI_REL_OFFSET edi, 0
  196. pushl %esi
  197. CFI_ADJUST_CFA_OFFSET 4
  198. CFI_REL_OFFSET esi, 0
  199. pushl %edx
  200. CFI_ADJUST_CFA_OFFSET 4
  201. CFI_REL_OFFSET edx, 0
  202. pushl %ecx
  203. CFI_ADJUST_CFA_OFFSET 4
  204. CFI_REL_OFFSET ecx, 0
  205. pushl %ebx
  206. CFI_ADJUST_CFA_OFFSET 4
  207. CFI_REL_OFFSET ebx, 0
  208. movl $(__USER_DS), %edx
  209. movl %edx, %ds
  210. movl %edx, %es
  211. movl $(__KERNEL_PERCPU), %edx
  212. movl %edx, %fs
  213. SET_KERNEL_GS %edx
  214. .endm
  215. .macro RESTORE_INT_REGS
  216. popl %ebx
  217. CFI_ADJUST_CFA_OFFSET -4
  218. CFI_RESTORE ebx
  219. popl %ecx
  220. CFI_ADJUST_CFA_OFFSET -4
  221. CFI_RESTORE ecx
  222. popl %edx
  223. CFI_ADJUST_CFA_OFFSET -4
  224. CFI_RESTORE edx
  225. popl %esi
  226. CFI_ADJUST_CFA_OFFSET -4
  227. CFI_RESTORE esi
  228. popl %edi
  229. CFI_ADJUST_CFA_OFFSET -4
  230. CFI_RESTORE edi
  231. popl %ebp
  232. CFI_ADJUST_CFA_OFFSET -4
  233. CFI_RESTORE ebp
  234. popl %eax
  235. CFI_ADJUST_CFA_OFFSET -4
  236. CFI_RESTORE eax
  237. .endm
  238. .macro RESTORE_REGS pop=0
  239. RESTORE_INT_REGS
  240. 1: popl %ds
  241. CFI_ADJUST_CFA_OFFSET -4
  242. /*CFI_RESTORE ds;*/
  243. 2: popl %es
  244. CFI_ADJUST_CFA_OFFSET -4
  245. /*CFI_RESTORE es;*/
  246. 3: popl %fs
  247. CFI_ADJUST_CFA_OFFSET -4
  248. /*CFI_RESTORE fs;*/
  249. POP_GS \pop
  250. .pushsection .fixup, "ax"
  251. 4: movl $0, (%esp)
  252. jmp 1b
  253. 5: movl $0, (%esp)
  254. jmp 2b
  255. 6: movl $0, (%esp)
  256. jmp 3b
  257. .section __ex_table, "a"
  258. .align 4
  259. .long 1b, 4b
  260. .long 2b, 5b
  261. .long 3b, 6b
  262. .popsection
  263. POP_GS_EX
  264. .endm
  265. .macro RING0_INT_FRAME
  266. CFI_STARTPROC simple
  267. CFI_SIGNAL_FRAME
  268. CFI_DEF_CFA esp, 3*4
  269. /*CFI_OFFSET cs, -2*4;*/
  270. CFI_OFFSET eip, -3*4
  271. .endm
  272. .macro RING0_EC_FRAME
  273. CFI_STARTPROC simple
  274. CFI_SIGNAL_FRAME
  275. CFI_DEF_CFA esp, 4*4
  276. /*CFI_OFFSET cs, -2*4;*/
  277. CFI_OFFSET eip, -3*4
  278. .endm
  279. .macro RING0_PTREGS_FRAME
  280. CFI_STARTPROC simple
  281. CFI_SIGNAL_FRAME
  282. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
  283. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
  284. CFI_OFFSET eip, PT_EIP-PT_OLDESP
  285. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
  286. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
  287. CFI_OFFSET eax, PT_EAX-PT_OLDESP
  288. CFI_OFFSET ebp, PT_EBP-PT_OLDESP
  289. CFI_OFFSET edi, PT_EDI-PT_OLDESP
  290. CFI_OFFSET esi, PT_ESI-PT_OLDESP
  291. CFI_OFFSET edx, PT_EDX-PT_OLDESP
  292. CFI_OFFSET ecx, PT_ECX-PT_OLDESP
  293. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  294. .endm
  295. ENTRY(ret_from_fork)
  296. CFI_STARTPROC
  297. pushl %eax
  298. CFI_ADJUST_CFA_OFFSET 4
  299. call schedule_tail
  300. GET_THREAD_INFO(%ebp)
  301. popl %eax
  302. CFI_ADJUST_CFA_OFFSET -4
  303. pushl $0x0202 # Reset kernel eflags
  304. CFI_ADJUST_CFA_OFFSET 4
  305. popfl
  306. CFI_ADJUST_CFA_OFFSET -4
  307. jmp syscall_exit
  308. CFI_ENDPROC
  309. END(ret_from_fork)
  310. /*
  311. * Interrupt exit functions should be protected against kprobes
  312. */
  313. .pushsection .kprobes.text, "ax"
  314. /*
  315. * Return to user mode is not as complex as all this looks,
  316. * but we want the default path for a system call return to
  317. * go as quickly as possible which is why some of this is
  318. * less clear than it otherwise should be.
  319. */
  320. # userspace resumption stub bypassing syscall exit tracing
  321. ALIGN
  322. RING0_PTREGS_FRAME
  323. ret_from_exception:
  324. preempt_stop(CLBR_ANY)
  325. ret_from_intr:
  326. GET_THREAD_INFO(%ebp)
  327. check_userspace:
  328. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  329. movb PT_CS(%esp), %al
  330. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  331. cmpl $USER_RPL, %eax
  332. jb resume_kernel # not returning to v8086 or userspace
  333. ENTRY(resume_userspace)
  334. LOCKDEP_SYS_EXIT
  335. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  336. # setting need_resched or sigpending
  337. # between sampling and the iret
  338. TRACE_IRQS_OFF
  339. movl TI_flags(%ebp), %ecx
  340. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  341. # int/exception return?
  342. jne work_pending
  343. jmp restore_all
  344. END(ret_from_exception)
  345. #ifdef CONFIG_PREEMPT
  346. ENTRY(resume_kernel)
  347. DISABLE_INTERRUPTS(CLBR_ANY)
  348. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  349. jnz restore_all
  350. need_resched:
  351. movl TI_flags(%ebp), %ecx # need_resched set ?
  352. testb $_TIF_NEED_RESCHED, %cl
  353. jz restore_all
  354. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  355. jz restore_all
  356. call preempt_schedule_irq
  357. jmp need_resched
  358. END(resume_kernel)
  359. #endif
  360. CFI_ENDPROC
  361. /*
  362. * End of kprobes section
  363. */
  364. .popsection
  365. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  366. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  367. # sysenter call handler stub
  368. ENTRY(ia32_sysenter_target)
  369. CFI_STARTPROC simple
  370. CFI_SIGNAL_FRAME
  371. CFI_DEF_CFA esp, 0
  372. CFI_REGISTER esp, ebp
  373. movl TSS_sysenter_sp0(%esp),%esp
  374. sysenter_past_esp:
  375. /*
  376. * Interrupts are disabled here, but we can't trace it until
  377. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  378. * we immediately enable interrupts at that point anyway.
  379. */
  380. pushl $(__USER_DS)
  381. CFI_ADJUST_CFA_OFFSET 4
  382. /*CFI_REL_OFFSET ss, 0*/
  383. pushl %ebp
  384. CFI_ADJUST_CFA_OFFSET 4
  385. CFI_REL_OFFSET esp, 0
  386. pushfl
  387. orl $X86_EFLAGS_IF, (%esp)
  388. CFI_ADJUST_CFA_OFFSET 4
  389. pushl $(__USER_CS)
  390. CFI_ADJUST_CFA_OFFSET 4
  391. /*CFI_REL_OFFSET cs, 0*/
  392. /*
  393. * Push current_thread_info()->sysenter_return to the stack.
  394. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  395. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  396. */
  397. pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
  398. CFI_ADJUST_CFA_OFFSET 4
  399. CFI_REL_OFFSET eip, 0
  400. pushl %eax
  401. CFI_ADJUST_CFA_OFFSET 4
  402. SAVE_ALL
  403. ENABLE_INTERRUPTS(CLBR_NONE)
  404. /*
  405. * Load the potential sixth argument from user stack.
  406. * Careful about security.
  407. */
  408. cmpl $__PAGE_OFFSET-3,%ebp
  409. jae syscall_fault
  410. 1: movl (%ebp),%ebp
  411. movl %ebp,PT_EBP(%esp)
  412. .section __ex_table,"a"
  413. .align 4
  414. .long 1b,syscall_fault
  415. .previous
  416. GET_THREAD_INFO(%ebp)
  417. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  418. jnz sysenter_audit
  419. sysenter_do_call:
  420. cmpl $(nr_syscalls), %eax
  421. jae syscall_badsys
  422. call *sys_call_table(,%eax,4)
  423. movl %eax,PT_EAX(%esp)
  424. LOCKDEP_SYS_EXIT
  425. DISABLE_INTERRUPTS(CLBR_ANY)
  426. TRACE_IRQS_OFF
  427. movl TI_flags(%ebp), %ecx
  428. testl $_TIF_ALLWORK_MASK, %ecx
  429. jne sysexit_audit
  430. sysenter_exit:
  431. /* if something modifies registers it must also disable sysexit */
  432. movl PT_EIP(%esp), %edx
  433. movl PT_OLDESP(%esp), %ecx
  434. xorl %ebp,%ebp
  435. TRACE_IRQS_ON
  436. 1: mov PT_FS(%esp), %fs
  437. PTGS_TO_GS
  438. ENABLE_INTERRUPTS_SYSEXIT
  439. #ifdef CONFIG_AUDITSYSCALL
  440. sysenter_audit:
  441. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  442. jnz syscall_trace_entry
  443. addl $4,%esp
  444. CFI_ADJUST_CFA_OFFSET -4
  445. /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
  446. /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
  447. /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
  448. movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
  449. movl %eax,%edx /* 2nd arg: syscall number */
  450. movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
  451. call audit_syscall_entry
  452. pushl %ebx
  453. CFI_ADJUST_CFA_OFFSET 4
  454. movl PT_EAX(%esp),%eax /* reload syscall number */
  455. jmp sysenter_do_call
  456. sysexit_audit:
  457. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  458. jne syscall_exit_work
  459. TRACE_IRQS_ON
  460. ENABLE_INTERRUPTS(CLBR_ANY)
  461. movl %eax,%edx /* second arg, syscall return value */
  462. cmpl $0,%eax /* is it < 0? */
  463. setl %al /* 1 if so, 0 if not */
  464. movzbl %al,%eax /* zero-extend that */
  465. inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
  466. call audit_syscall_exit
  467. DISABLE_INTERRUPTS(CLBR_ANY)
  468. TRACE_IRQS_OFF
  469. movl TI_flags(%ebp), %ecx
  470. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  471. jne syscall_exit_work
  472. movl PT_EAX(%esp),%eax /* reload syscall return value */
  473. jmp sysenter_exit
  474. #endif
  475. CFI_ENDPROC
  476. .pushsection .fixup,"ax"
  477. 2: movl $0,PT_FS(%esp)
  478. jmp 1b
  479. .section __ex_table,"a"
  480. .align 4
  481. .long 1b,2b
  482. .popsection
  483. PTGS_TO_GS_EX
  484. ENDPROC(ia32_sysenter_target)
  485. /*
  486. * syscall stub including irq exit should be protected against kprobes
  487. */
  488. .pushsection .kprobes.text, "ax"
  489. # system call handler stub
  490. ENTRY(system_call)
  491. RING0_INT_FRAME # can't unwind into user space anyway
  492. pushl %eax # save orig_eax
  493. CFI_ADJUST_CFA_OFFSET 4
  494. SAVE_ALL
  495. GET_THREAD_INFO(%ebp)
  496. # system call tracing in operation / emulation
  497. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  498. jnz syscall_trace_entry
  499. cmpl $(nr_syscalls), %eax
  500. jae syscall_badsys
  501. syscall_call:
  502. call *sys_call_table(,%eax,4)
  503. movl %eax,PT_EAX(%esp) # store the return value
  504. syscall_exit:
  505. LOCKDEP_SYS_EXIT
  506. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  507. # setting need_resched or sigpending
  508. # between sampling and the iret
  509. TRACE_IRQS_OFF
  510. movl TI_flags(%ebp), %ecx
  511. testl $_TIF_ALLWORK_MASK, %ecx # current->work
  512. jne syscall_exit_work
  513. restore_all:
  514. TRACE_IRQS_IRET
  515. restore_all_notrace:
  516. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  517. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  518. # are returning to the kernel.
  519. # See comments in process.c:copy_thread() for details.
  520. movb PT_OLDSS(%esp), %ah
  521. movb PT_CS(%esp), %al
  522. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  523. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  524. CFI_REMEMBER_STATE
  525. je ldt_ss # returning to user-space with LDT SS
  526. restore_nocheck:
  527. RESTORE_REGS 4 # skip orig_eax/error_code
  528. CFI_ADJUST_CFA_OFFSET -4
  529. irq_return:
  530. INTERRUPT_RETURN
  531. .section .fixup,"ax"
  532. ENTRY(iret_exc)
  533. pushl $0 # no error code
  534. pushl $do_iret_error
  535. jmp error_code
  536. .previous
  537. .section __ex_table,"a"
  538. .align 4
  539. .long irq_return,iret_exc
  540. .previous
  541. CFI_RESTORE_STATE
  542. ldt_ss:
  543. larl PT_OLDSS(%esp), %eax
  544. jnz restore_nocheck
  545. testl $0x00400000, %eax # returning to 32bit stack?
  546. jnz restore_nocheck # allright, normal return
  547. #ifdef CONFIG_PARAVIRT
  548. /*
  549. * The kernel can't run on a non-flat stack if paravirt mode
  550. * is active. Rather than try to fixup the high bits of
  551. * ESP, bypass this code entirely. This may break DOSemu
  552. * and/or Wine support in a paravirt VM, although the option
  553. * is still available to implement the setting of the high
  554. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  555. */
  556. cmpl $0, pv_info+PARAVIRT_enabled
  557. jne restore_nocheck
  558. #endif
  559. /*
  560. * Setup and switch to ESPFIX stack
  561. *
  562. * We're returning to userspace with a 16 bit stack. The CPU will not
  563. * restore the high word of ESP for us on executing iret... This is an
  564. * "official" bug of all the x86-compatible CPUs, which we can work
  565. * around to make dosemu and wine happy. We do this by preloading the
  566. * high word of ESP with the high word of the userspace ESP while
  567. * compensating for the offset by changing to the ESPFIX segment with
  568. * a base address that matches for the difference.
  569. */
  570. mov %esp, %edx /* load kernel esp */
  571. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  572. mov %dx, %ax /* eax: new kernel esp */
  573. sub %eax, %edx /* offset (low word is 0) */
  574. PER_CPU(gdt_page, %ebx)
  575. shr $16, %edx
  576. mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
  577. mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
  578. pushl $__ESPFIX_SS
  579. CFI_ADJUST_CFA_OFFSET 4
  580. push %eax /* new kernel esp */
  581. CFI_ADJUST_CFA_OFFSET 4
  582. /* Disable interrupts, but do not irqtrace this section: we
  583. * will soon execute iret and the tracer was already set to
  584. * the irqstate after the iret */
  585. DISABLE_INTERRUPTS(CLBR_EAX)
  586. lss (%esp), %esp /* switch to espfix segment */
  587. CFI_ADJUST_CFA_OFFSET -8
  588. jmp restore_nocheck
  589. CFI_ENDPROC
  590. ENDPROC(system_call)
  591. # perform work that needs to be done immediately before resumption
  592. ALIGN
  593. RING0_PTREGS_FRAME # can't unwind into user space anyway
  594. work_pending:
  595. testb $_TIF_NEED_RESCHED, %cl
  596. jz work_notifysig
  597. work_resched:
  598. call schedule
  599. LOCKDEP_SYS_EXIT
  600. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  601. # setting need_resched or sigpending
  602. # between sampling and the iret
  603. TRACE_IRQS_OFF
  604. movl TI_flags(%ebp), %ecx
  605. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  606. # than syscall tracing?
  607. jz restore_all
  608. testb $_TIF_NEED_RESCHED, %cl
  609. jnz work_resched
  610. work_notifysig: # deal with pending signals and
  611. # notify-resume requests
  612. #ifdef CONFIG_VM86
  613. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  614. movl %esp, %eax
  615. jne work_notifysig_v86 # returning to kernel-space or
  616. # vm86-space
  617. xorl %edx, %edx
  618. call do_notify_resume
  619. jmp resume_userspace_sig
  620. ALIGN
  621. work_notifysig_v86:
  622. pushl %ecx # save ti_flags for do_notify_resume
  623. CFI_ADJUST_CFA_OFFSET 4
  624. call save_v86_state # %eax contains pt_regs pointer
  625. popl %ecx
  626. CFI_ADJUST_CFA_OFFSET -4
  627. movl %eax, %esp
  628. #else
  629. movl %esp, %eax
  630. #endif
  631. xorl %edx, %edx
  632. call do_notify_resume
  633. jmp resume_userspace_sig
  634. END(work_pending)
  635. # perform syscall exit tracing
  636. ALIGN
  637. syscall_trace_entry:
  638. movl $-ENOSYS,PT_EAX(%esp)
  639. movl %esp, %eax
  640. call syscall_trace_enter
  641. /* What it returned is what we'll actually use. */
  642. cmpl $(nr_syscalls), %eax
  643. jnae syscall_call
  644. jmp syscall_exit
  645. END(syscall_trace_entry)
  646. # perform syscall exit tracing
  647. ALIGN
  648. syscall_exit_work:
  649. testl $_TIF_WORK_SYSCALL_EXIT, %ecx
  650. jz work_pending
  651. TRACE_IRQS_ON
  652. ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
  653. # schedule() instead
  654. movl %esp, %eax
  655. call syscall_trace_leave
  656. jmp resume_userspace
  657. END(syscall_exit_work)
  658. CFI_ENDPROC
  659. RING0_INT_FRAME # can't unwind into user space anyway
  660. syscall_fault:
  661. GET_THREAD_INFO(%ebp)
  662. movl $-EFAULT,PT_EAX(%esp)
  663. jmp resume_userspace
  664. END(syscall_fault)
  665. syscall_badsys:
  666. movl $-ENOSYS,PT_EAX(%esp)
  667. jmp resume_userspace
  668. END(syscall_badsys)
  669. CFI_ENDPROC
  670. /*
  671. * End of kprobes section
  672. */
  673. .popsection
  674. /*
  675. * System calls that need a pt_regs pointer.
  676. */
  677. #define PTREGSCALL(name) \
  678. ALIGN; \
  679. ptregs_##name: \
  680. leal 4(%esp),%eax; \
  681. jmp sys_##name;
  682. PTREGSCALL(iopl)
  683. PTREGSCALL(fork)
  684. PTREGSCALL(clone)
  685. PTREGSCALL(vfork)
  686. PTREGSCALL(execve)
  687. PTREGSCALL(sigaltstack)
  688. PTREGSCALL(sigreturn)
  689. PTREGSCALL(rt_sigreturn)
  690. PTREGSCALL(vm86)
  691. PTREGSCALL(vm86old)
  692. .macro FIXUP_ESPFIX_STACK
  693. /*
  694. * Switch back for ESPFIX stack to the normal zerobased stack
  695. *
  696. * We can't call C functions using the ESPFIX stack. This code reads
  697. * the high word of the segment base from the GDT and swiches to the
  698. * normal stack and adjusts ESP with the matching offset.
  699. */
  700. /* fixup the stack */
  701. PER_CPU(gdt_page, %ebx)
  702. mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
  703. mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
  704. shl $16, %eax
  705. addl %esp, %eax /* the adjusted stack pointer */
  706. pushl $__KERNEL_DS
  707. CFI_ADJUST_CFA_OFFSET 4
  708. pushl %eax
  709. CFI_ADJUST_CFA_OFFSET 4
  710. lss (%esp), %esp /* switch to the normal stack segment */
  711. CFI_ADJUST_CFA_OFFSET -8
  712. .endm
  713. .macro UNWIND_ESPFIX_STACK
  714. movl %ss, %eax
  715. /* see if on espfix stack */
  716. cmpw $__ESPFIX_SS, %ax
  717. jne 27f
  718. movl $__KERNEL_DS, %eax
  719. movl %eax, %ds
  720. movl %eax, %es
  721. /* switch to normal stack */
  722. FIXUP_ESPFIX_STACK
  723. 27:
  724. .endm
  725. /*
  726. * Build the entry stubs and pointer table with some assembler magic.
  727. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  728. * single cache line on all modern x86 implementations.
  729. */
  730. .section .init.rodata,"a"
  731. ENTRY(interrupt)
  732. .text
  733. .p2align 5
  734. .p2align CONFIG_X86_L1_CACHE_SHIFT
  735. ENTRY(irq_entries_start)
  736. RING0_INT_FRAME
  737. vector=FIRST_EXTERNAL_VECTOR
  738. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  739. .balign 32
  740. .rept 7
  741. .if vector < NR_VECTORS
  742. .if vector <> FIRST_EXTERNAL_VECTOR
  743. CFI_ADJUST_CFA_OFFSET -4
  744. .endif
  745. 1: pushl $(~vector+0x80) /* Note: always in signed byte range */
  746. CFI_ADJUST_CFA_OFFSET 4
  747. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  748. jmp 2f
  749. .endif
  750. .previous
  751. .long 1b
  752. .text
  753. vector=vector+1
  754. .endif
  755. .endr
  756. 2: jmp common_interrupt
  757. .endr
  758. END(irq_entries_start)
  759. .previous
  760. END(interrupt)
  761. .previous
  762. /*
  763. * the CPU automatically disables interrupts when executing an IRQ vector,
  764. * so IRQ-flags tracing has to follow that:
  765. */
  766. .p2align CONFIG_X86_L1_CACHE_SHIFT
  767. common_interrupt:
  768. addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
  769. SAVE_ALL
  770. TRACE_IRQS_OFF
  771. movl %esp,%eax
  772. call do_IRQ
  773. jmp ret_from_intr
  774. ENDPROC(common_interrupt)
  775. CFI_ENDPROC
  776. /*
  777. * Irq entries should be protected against kprobes
  778. */
  779. .pushsection .kprobes.text, "ax"
  780. #define BUILD_INTERRUPT3(name, nr, fn) \
  781. ENTRY(name) \
  782. RING0_INT_FRAME; \
  783. pushl $~(nr); \
  784. CFI_ADJUST_CFA_OFFSET 4; \
  785. SAVE_ALL; \
  786. TRACE_IRQS_OFF \
  787. movl %esp,%eax; \
  788. call fn; \
  789. jmp ret_from_intr; \
  790. CFI_ENDPROC; \
  791. ENDPROC(name)
  792. #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
  793. /* The include is where all of the SMP etc. interrupts come from */
  794. #include <asm/entry_arch.h>
  795. ENTRY(coprocessor_error)
  796. RING0_INT_FRAME
  797. pushl $0
  798. CFI_ADJUST_CFA_OFFSET 4
  799. pushl $do_coprocessor_error
  800. CFI_ADJUST_CFA_OFFSET 4
  801. jmp error_code
  802. CFI_ENDPROC
  803. END(coprocessor_error)
  804. ENTRY(simd_coprocessor_error)
  805. RING0_INT_FRAME
  806. pushl $0
  807. CFI_ADJUST_CFA_OFFSET 4
  808. pushl $do_simd_coprocessor_error
  809. CFI_ADJUST_CFA_OFFSET 4
  810. jmp error_code
  811. CFI_ENDPROC
  812. END(simd_coprocessor_error)
  813. ENTRY(device_not_available)
  814. RING0_INT_FRAME
  815. pushl $-1 # mark this as an int
  816. CFI_ADJUST_CFA_OFFSET 4
  817. pushl $do_device_not_available
  818. CFI_ADJUST_CFA_OFFSET 4
  819. jmp error_code
  820. CFI_ENDPROC
  821. END(device_not_available)
  822. #ifdef CONFIG_PARAVIRT
  823. ENTRY(native_iret)
  824. iret
  825. .section __ex_table,"a"
  826. .align 4
  827. .long native_iret, iret_exc
  828. .previous
  829. END(native_iret)
  830. ENTRY(native_irq_enable_sysexit)
  831. sti
  832. sysexit
  833. END(native_irq_enable_sysexit)
  834. #endif
  835. ENTRY(overflow)
  836. RING0_INT_FRAME
  837. pushl $0
  838. CFI_ADJUST_CFA_OFFSET 4
  839. pushl $do_overflow
  840. CFI_ADJUST_CFA_OFFSET 4
  841. jmp error_code
  842. CFI_ENDPROC
  843. END(overflow)
  844. ENTRY(bounds)
  845. RING0_INT_FRAME
  846. pushl $0
  847. CFI_ADJUST_CFA_OFFSET 4
  848. pushl $do_bounds
  849. CFI_ADJUST_CFA_OFFSET 4
  850. jmp error_code
  851. CFI_ENDPROC
  852. END(bounds)
  853. ENTRY(invalid_op)
  854. RING0_INT_FRAME
  855. pushl $0
  856. CFI_ADJUST_CFA_OFFSET 4
  857. pushl $do_invalid_op
  858. CFI_ADJUST_CFA_OFFSET 4
  859. jmp error_code
  860. CFI_ENDPROC
  861. END(invalid_op)
  862. ENTRY(coprocessor_segment_overrun)
  863. RING0_INT_FRAME
  864. pushl $0
  865. CFI_ADJUST_CFA_OFFSET 4
  866. pushl $do_coprocessor_segment_overrun
  867. CFI_ADJUST_CFA_OFFSET 4
  868. jmp error_code
  869. CFI_ENDPROC
  870. END(coprocessor_segment_overrun)
  871. ENTRY(invalid_TSS)
  872. RING0_EC_FRAME
  873. pushl $do_invalid_TSS
  874. CFI_ADJUST_CFA_OFFSET 4
  875. jmp error_code
  876. CFI_ENDPROC
  877. END(invalid_TSS)
  878. ENTRY(segment_not_present)
  879. RING0_EC_FRAME
  880. pushl $do_segment_not_present
  881. CFI_ADJUST_CFA_OFFSET 4
  882. jmp error_code
  883. CFI_ENDPROC
  884. END(segment_not_present)
  885. ENTRY(stack_segment)
  886. RING0_EC_FRAME
  887. pushl $do_stack_segment
  888. CFI_ADJUST_CFA_OFFSET 4
  889. jmp error_code
  890. CFI_ENDPROC
  891. END(stack_segment)
  892. ENTRY(alignment_check)
  893. RING0_EC_FRAME
  894. pushl $do_alignment_check
  895. CFI_ADJUST_CFA_OFFSET 4
  896. jmp error_code
  897. CFI_ENDPROC
  898. END(alignment_check)
  899. ENTRY(divide_error)
  900. RING0_INT_FRAME
  901. pushl $0 # no error code
  902. CFI_ADJUST_CFA_OFFSET 4
  903. pushl $do_divide_error
  904. CFI_ADJUST_CFA_OFFSET 4
  905. jmp error_code
  906. CFI_ENDPROC
  907. END(divide_error)
  908. #ifdef CONFIG_X86_MCE
  909. ENTRY(machine_check)
  910. RING0_INT_FRAME
  911. pushl $0
  912. CFI_ADJUST_CFA_OFFSET 4
  913. pushl machine_check_vector
  914. CFI_ADJUST_CFA_OFFSET 4
  915. jmp error_code
  916. CFI_ENDPROC
  917. END(machine_check)
  918. #endif
  919. ENTRY(spurious_interrupt_bug)
  920. RING0_INT_FRAME
  921. pushl $0
  922. CFI_ADJUST_CFA_OFFSET 4
  923. pushl $do_spurious_interrupt_bug
  924. CFI_ADJUST_CFA_OFFSET 4
  925. jmp error_code
  926. CFI_ENDPROC
  927. END(spurious_interrupt_bug)
  928. /*
  929. * End of kprobes section
  930. */
  931. .popsection
  932. ENTRY(kernel_thread_helper)
  933. pushl $0 # fake return address for unwinder
  934. CFI_STARTPROC
  935. movl %edx,%eax
  936. push %edx
  937. CFI_ADJUST_CFA_OFFSET 4
  938. call *%ebx
  939. push %eax
  940. CFI_ADJUST_CFA_OFFSET 4
  941. call do_exit
  942. ud2 # padding for call trace
  943. CFI_ENDPROC
  944. ENDPROC(kernel_thread_helper)
  945. #ifdef CONFIG_XEN
  946. /* Xen doesn't set %esp to be precisely what the normal sysenter
  947. entrypoint expects, so fix it up before using the normal path. */
  948. ENTRY(xen_sysenter_target)
  949. RING0_INT_FRAME
  950. addl $5*4, %esp /* remove xen-provided frame */
  951. CFI_ADJUST_CFA_OFFSET -5*4
  952. jmp sysenter_past_esp
  953. CFI_ENDPROC
  954. ENTRY(xen_hypervisor_callback)
  955. CFI_STARTPROC
  956. pushl $0
  957. CFI_ADJUST_CFA_OFFSET 4
  958. SAVE_ALL
  959. TRACE_IRQS_OFF
  960. /* Check to see if we got the event in the critical
  961. region in xen_iret_direct, after we've reenabled
  962. events and checked for pending events. This simulates
  963. iret instruction's behaviour where it delivers a
  964. pending interrupt when enabling interrupts. */
  965. movl PT_EIP(%esp),%eax
  966. cmpl $xen_iret_start_crit,%eax
  967. jb 1f
  968. cmpl $xen_iret_end_crit,%eax
  969. jae 1f
  970. jmp xen_iret_crit_fixup
  971. ENTRY(xen_do_upcall)
  972. 1: mov %esp, %eax
  973. call xen_evtchn_do_upcall
  974. jmp ret_from_intr
  975. CFI_ENDPROC
  976. ENDPROC(xen_hypervisor_callback)
  977. # Hypervisor uses this for application faults while it executes.
  978. # We get here for two reasons:
  979. # 1. Fault while reloading DS, ES, FS or GS
  980. # 2. Fault while executing IRET
  981. # Category 1 we fix up by reattempting the load, and zeroing the segment
  982. # register if the load fails.
  983. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  984. # normal Linux return path in this case because if we use the IRET hypercall
  985. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  986. # We distinguish between categories by maintaining a status value in EAX.
  987. ENTRY(xen_failsafe_callback)
  988. CFI_STARTPROC
  989. pushl %eax
  990. CFI_ADJUST_CFA_OFFSET 4
  991. movl $1,%eax
  992. 1: mov 4(%esp),%ds
  993. 2: mov 8(%esp),%es
  994. 3: mov 12(%esp),%fs
  995. 4: mov 16(%esp),%gs
  996. testl %eax,%eax
  997. popl %eax
  998. CFI_ADJUST_CFA_OFFSET -4
  999. lea 16(%esp),%esp
  1000. CFI_ADJUST_CFA_OFFSET -16
  1001. jz 5f
  1002. addl $16,%esp
  1003. jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
  1004. 5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
  1005. CFI_ADJUST_CFA_OFFSET 4
  1006. SAVE_ALL
  1007. jmp ret_from_exception
  1008. CFI_ENDPROC
  1009. .section .fixup,"ax"
  1010. 6: xorl %eax,%eax
  1011. movl %eax,4(%esp)
  1012. jmp 1b
  1013. 7: xorl %eax,%eax
  1014. movl %eax,8(%esp)
  1015. jmp 2b
  1016. 8: xorl %eax,%eax
  1017. movl %eax,12(%esp)
  1018. jmp 3b
  1019. 9: xorl %eax,%eax
  1020. movl %eax,16(%esp)
  1021. jmp 4b
  1022. .previous
  1023. .section __ex_table,"a"
  1024. .align 4
  1025. .long 1b,6b
  1026. .long 2b,7b
  1027. .long 3b,8b
  1028. .long 4b,9b
  1029. .previous
  1030. ENDPROC(xen_failsafe_callback)
  1031. #endif /* CONFIG_XEN */
  1032. #ifdef CONFIG_FUNCTION_TRACER
  1033. #ifdef CONFIG_DYNAMIC_FTRACE
  1034. ENTRY(mcount)
  1035. ret
  1036. END(mcount)
  1037. ENTRY(ftrace_caller)
  1038. cmpl $0, function_trace_stop
  1039. jne ftrace_stub
  1040. pushl %eax
  1041. pushl %ecx
  1042. pushl %edx
  1043. movl 0xc(%esp), %eax
  1044. movl 0x4(%ebp), %edx
  1045. subl $MCOUNT_INSN_SIZE, %eax
  1046. .globl ftrace_call
  1047. ftrace_call:
  1048. call ftrace_stub
  1049. popl %edx
  1050. popl %ecx
  1051. popl %eax
  1052. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1053. .globl ftrace_graph_call
  1054. ftrace_graph_call:
  1055. jmp ftrace_stub
  1056. #endif
  1057. .globl ftrace_stub
  1058. ftrace_stub:
  1059. ret
  1060. END(ftrace_caller)
  1061. #else /* ! CONFIG_DYNAMIC_FTRACE */
  1062. ENTRY(mcount)
  1063. cmpl $0, function_trace_stop
  1064. jne ftrace_stub
  1065. cmpl $ftrace_stub, ftrace_trace_function
  1066. jnz trace
  1067. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1068. cmpl $ftrace_stub, ftrace_graph_return
  1069. jnz ftrace_graph_caller
  1070. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  1071. jnz ftrace_graph_caller
  1072. #endif
  1073. .globl ftrace_stub
  1074. ftrace_stub:
  1075. ret
  1076. /* taken from glibc */
  1077. trace:
  1078. pushl %eax
  1079. pushl %ecx
  1080. pushl %edx
  1081. movl 0xc(%esp), %eax
  1082. movl 0x4(%ebp), %edx
  1083. subl $MCOUNT_INSN_SIZE, %eax
  1084. call *ftrace_trace_function
  1085. popl %edx
  1086. popl %ecx
  1087. popl %eax
  1088. jmp ftrace_stub
  1089. END(mcount)
  1090. #endif /* CONFIG_DYNAMIC_FTRACE */
  1091. #endif /* CONFIG_FUNCTION_TRACER */
  1092. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1093. ENTRY(ftrace_graph_caller)
  1094. cmpl $0, function_trace_stop
  1095. jne ftrace_stub
  1096. pushl %eax
  1097. pushl %ecx
  1098. pushl %edx
  1099. movl 0xc(%esp), %edx
  1100. lea 0x4(%ebp), %eax
  1101. movl (%ebp), %ecx
  1102. subl $MCOUNT_INSN_SIZE, %edx
  1103. call prepare_ftrace_return
  1104. popl %edx
  1105. popl %ecx
  1106. popl %eax
  1107. ret
  1108. END(ftrace_graph_caller)
  1109. .globl return_to_handler
  1110. return_to_handler:
  1111. pushl %eax
  1112. pushl %edx
  1113. movl %ebp, %eax
  1114. call ftrace_return_to_handler
  1115. movl %eax, %ecx
  1116. popl %edx
  1117. popl %eax
  1118. jmp *%ecx
  1119. #endif
  1120. .section .rodata,"a"
  1121. #include "syscall_table_32.S"
  1122. syscall_table_size=(.-sys_call_table)
  1123. /*
  1124. * Some functions should be protected against kprobes
  1125. */
  1126. .pushsection .kprobes.text, "ax"
  1127. ENTRY(page_fault)
  1128. RING0_EC_FRAME
  1129. pushl $do_page_fault
  1130. CFI_ADJUST_CFA_OFFSET 4
  1131. ALIGN
  1132. error_code:
  1133. /* the function address is in %gs's slot on the stack */
  1134. pushl %fs
  1135. CFI_ADJUST_CFA_OFFSET 4
  1136. /*CFI_REL_OFFSET fs, 0*/
  1137. pushl %es
  1138. CFI_ADJUST_CFA_OFFSET 4
  1139. /*CFI_REL_OFFSET es, 0*/
  1140. pushl %ds
  1141. CFI_ADJUST_CFA_OFFSET 4
  1142. /*CFI_REL_OFFSET ds, 0*/
  1143. pushl %eax
  1144. CFI_ADJUST_CFA_OFFSET 4
  1145. CFI_REL_OFFSET eax, 0
  1146. pushl %ebp
  1147. CFI_ADJUST_CFA_OFFSET 4
  1148. CFI_REL_OFFSET ebp, 0
  1149. pushl %edi
  1150. CFI_ADJUST_CFA_OFFSET 4
  1151. CFI_REL_OFFSET edi, 0
  1152. pushl %esi
  1153. CFI_ADJUST_CFA_OFFSET 4
  1154. CFI_REL_OFFSET esi, 0
  1155. pushl %edx
  1156. CFI_ADJUST_CFA_OFFSET 4
  1157. CFI_REL_OFFSET edx, 0
  1158. pushl %ecx
  1159. CFI_ADJUST_CFA_OFFSET 4
  1160. CFI_REL_OFFSET ecx, 0
  1161. pushl %ebx
  1162. CFI_ADJUST_CFA_OFFSET 4
  1163. CFI_REL_OFFSET ebx, 0
  1164. cld
  1165. movl $(__KERNEL_PERCPU), %ecx
  1166. movl %ecx, %fs
  1167. UNWIND_ESPFIX_STACK
  1168. GS_TO_REG %ecx
  1169. movl PT_GS(%esp), %edi # get the function address
  1170. movl PT_ORIG_EAX(%esp), %edx # get the error code
  1171. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  1172. REG_TO_PTGS %ecx
  1173. SET_KERNEL_GS %ecx
  1174. movl $(__USER_DS), %ecx
  1175. movl %ecx, %ds
  1176. movl %ecx, %es
  1177. TRACE_IRQS_OFF
  1178. movl %esp,%eax # pt_regs pointer
  1179. call *%edi
  1180. jmp ret_from_exception
  1181. CFI_ENDPROC
  1182. END(page_fault)
  1183. /*
  1184. * Debug traps and NMI can happen at the one SYSENTER instruction
  1185. * that sets up the real kernel stack. Check here, since we can't
  1186. * allow the wrong stack to be used.
  1187. *
  1188. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  1189. * already pushed 3 words if it hits on the sysenter instruction:
  1190. * eflags, cs and eip.
  1191. *
  1192. * We just load the right stack, and push the three (known) values
  1193. * by hand onto the new stack - while updating the return eip past
  1194. * the instruction that would have done it for sysenter.
  1195. */
  1196. .macro FIX_STACK offset ok label
  1197. cmpw $__KERNEL_CS, 4(%esp)
  1198. jne \ok
  1199. \label:
  1200. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  1201. CFI_DEF_CFA esp, 0
  1202. CFI_UNDEFINED eip
  1203. pushfl
  1204. CFI_ADJUST_CFA_OFFSET 4
  1205. pushl $__KERNEL_CS
  1206. CFI_ADJUST_CFA_OFFSET 4
  1207. pushl $sysenter_past_esp
  1208. CFI_ADJUST_CFA_OFFSET 4
  1209. CFI_REL_OFFSET eip, 0
  1210. .endm
  1211. ENTRY(debug)
  1212. RING0_INT_FRAME
  1213. cmpl $ia32_sysenter_target,(%esp)
  1214. jne debug_stack_correct
  1215. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  1216. debug_stack_correct:
  1217. pushl $-1 # mark this as an int
  1218. CFI_ADJUST_CFA_OFFSET 4
  1219. SAVE_ALL
  1220. TRACE_IRQS_OFF
  1221. xorl %edx,%edx # error code 0
  1222. movl %esp,%eax # pt_regs pointer
  1223. call do_debug
  1224. jmp ret_from_exception
  1225. CFI_ENDPROC
  1226. END(debug)
  1227. /*
  1228. * NMI is doubly nasty. It can happen _while_ we're handling
  1229. * a debug fault, and the debug fault hasn't yet been able to
  1230. * clear up the stack. So we first check whether we got an
  1231. * NMI on the sysenter entry path, but after that we need to
  1232. * check whether we got an NMI on the debug path where the debug
  1233. * fault happened on the sysenter path.
  1234. */
  1235. ENTRY(nmi)
  1236. RING0_INT_FRAME
  1237. pushl %eax
  1238. CFI_ADJUST_CFA_OFFSET 4
  1239. movl %ss, %eax
  1240. cmpw $__ESPFIX_SS, %ax
  1241. popl %eax
  1242. CFI_ADJUST_CFA_OFFSET -4
  1243. je nmi_espfix_stack
  1244. cmpl $ia32_sysenter_target,(%esp)
  1245. je nmi_stack_fixup
  1246. pushl %eax
  1247. CFI_ADJUST_CFA_OFFSET 4
  1248. movl %esp,%eax
  1249. /* Do not access memory above the end of our stack page,
  1250. * it might not exist.
  1251. */
  1252. andl $(THREAD_SIZE-1),%eax
  1253. cmpl $(THREAD_SIZE-20),%eax
  1254. popl %eax
  1255. CFI_ADJUST_CFA_OFFSET -4
  1256. jae nmi_stack_correct
  1257. cmpl $ia32_sysenter_target,12(%esp)
  1258. je nmi_debug_stack_check
  1259. nmi_stack_correct:
  1260. /* We have a RING0_INT_FRAME here */
  1261. pushl %eax
  1262. CFI_ADJUST_CFA_OFFSET 4
  1263. SAVE_ALL
  1264. xorl %edx,%edx # zero error code
  1265. movl %esp,%eax # pt_regs pointer
  1266. call do_nmi
  1267. jmp restore_all_notrace
  1268. CFI_ENDPROC
  1269. nmi_stack_fixup:
  1270. RING0_INT_FRAME
  1271. FIX_STACK 12, nmi_stack_correct, 1
  1272. jmp nmi_stack_correct
  1273. nmi_debug_stack_check:
  1274. /* We have a RING0_INT_FRAME here */
  1275. cmpw $__KERNEL_CS,16(%esp)
  1276. jne nmi_stack_correct
  1277. cmpl $debug,(%esp)
  1278. jb nmi_stack_correct
  1279. cmpl $debug_esp_fix_insn,(%esp)
  1280. ja nmi_stack_correct
  1281. FIX_STACK 24, nmi_stack_correct, 1
  1282. jmp nmi_stack_correct
  1283. nmi_espfix_stack:
  1284. /* We have a RING0_INT_FRAME here.
  1285. *
  1286. * create the pointer to lss back
  1287. */
  1288. pushl %ss
  1289. CFI_ADJUST_CFA_OFFSET 4
  1290. pushl %esp
  1291. CFI_ADJUST_CFA_OFFSET 4
  1292. addl $4, (%esp)
  1293. /* copy the iret frame of 12 bytes */
  1294. .rept 3
  1295. pushl 16(%esp)
  1296. CFI_ADJUST_CFA_OFFSET 4
  1297. .endr
  1298. pushl %eax
  1299. CFI_ADJUST_CFA_OFFSET 4
  1300. SAVE_ALL
  1301. FIXUP_ESPFIX_STACK # %eax == %esp
  1302. xorl %edx,%edx # zero error code
  1303. call do_nmi
  1304. RESTORE_REGS
  1305. lss 12+4(%esp), %esp # back to espfix stack
  1306. CFI_ADJUST_CFA_OFFSET -24
  1307. jmp irq_return
  1308. CFI_ENDPROC
  1309. END(nmi)
  1310. ENTRY(int3)
  1311. RING0_INT_FRAME
  1312. pushl $-1 # mark this as an int
  1313. CFI_ADJUST_CFA_OFFSET 4
  1314. SAVE_ALL
  1315. TRACE_IRQS_OFF
  1316. xorl %edx,%edx # zero error code
  1317. movl %esp,%eax # pt_regs pointer
  1318. call do_int3
  1319. jmp ret_from_exception
  1320. CFI_ENDPROC
  1321. END(int3)
  1322. ENTRY(general_protection)
  1323. RING0_EC_FRAME
  1324. pushl $do_general_protection
  1325. CFI_ADJUST_CFA_OFFSET 4
  1326. jmp error_code
  1327. CFI_ENDPROC
  1328. END(general_protection)
  1329. /*
  1330. * End of kprobes section
  1331. */
  1332. .popsection