entry_32.S 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  33. * 2C(%esp) - orig_eax
  34. * 30(%esp) - %eip
  35. * 34(%esp) - %cs
  36. * 38(%esp) - %eflags
  37. * 3C(%esp) - %oldesp
  38. * 40(%esp) - %oldss
  39. *
  40. * "current" is in register %ebx during any slow entries.
  41. */
  42. #include <linux/linkage.h>
  43. #include <asm/thread_info.h>
  44. #include <asm/irqflags.h>
  45. #include <asm/errno.h>
  46. #include <asm/segment.h>
  47. #include <asm/smp.h>
  48. #include <asm/page_types.h>
  49. #include <asm/percpu.h>
  50. #include <asm/dwarf2.h>
  51. #include <asm/processor-flags.h>
  52. #include <asm/ftrace.h>
  53. #include <asm/irq_vectors.h>
  54. #include <asm/cpufeature.h>
  55. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  56. #include <linux/elf-em.h>
  57. #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
  58. #define __AUDIT_ARCH_LE 0x40000000
  59. #ifndef CONFIG_AUDITSYSCALL
  60. #define sysenter_audit syscall_trace_entry
  61. #define sysexit_audit syscall_exit_work
  62. #endif
  63. /*
  64. * We use macros for low-level operations which need to be overridden
  65. * for paravirtualization. The following will never clobber any registers:
  66. * INTERRUPT_RETURN (aka. "iret")
  67. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  68. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  69. *
  70. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  71. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  72. * Allowing a register to be clobbered can shrink the paravirt replacement
  73. * enough to patch inline, increasing performance.
  74. */
  75. #define nr_syscalls ((syscall_table_size)/4)
  76. #ifdef CONFIG_PREEMPT
  77. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  78. #else
  79. #define preempt_stop(clobbers)
  80. #define resume_kernel restore_all
  81. #endif
  82. .macro TRACE_IRQS_IRET
  83. #ifdef CONFIG_TRACE_IRQFLAGS
  84. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  85. jz 1f
  86. TRACE_IRQS_ON
  87. 1:
  88. #endif
  89. .endm
  90. #ifdef CONFIG_VM86
  91. #define resume_userspace_sig check_userspace
  92. #else
  93. #define resume_userspace_sig resume_userspace
  94. #endif
  95. /*
  96. * User gs save/restore
  97. *
  98. * %gs is used for userland TLS and kernel only uses it for stack
  99. * canary which is required to be at %gs:20 by gcc. Read the comment
  100. * at the top of stackprotector.h for more info.
  101. *
  102. * Local labels 98 and 99 are used.
  103. */
  104. #ifdef CONFIG_X86_32_LAZY_GS
  105. /* unfortunately push/pop can't be no-op */
  106. .macro PUSH_GS
  107. pushl $0
  108. CFI_ADJUST_CFA_OFFSET 4
  109. .endm
  110. .macro POP_GS pop=0
  111. addl $(4 + \pop), %esp
  112. CFI_ADJUST_CFA_OFFSET -(4 + \pop)
  113. .endm
  114. .macro POP_GS_EX
  115. .endm
  116. /* all the rest are no-op */
  117. .macro PTGS_TO_GS
  118. .endm
  119. .macro PTGS_TO_GS_EX
  120. .endm
  121. .macro GS_TO_REG reg
  122. .endm
  123. .macro REG_TO_PTGS reg
  124. .endm
  125. .macro SET_KERNEL_GS reg
  126. .endm
  127. #else /* CONFIG_X86_32_LAZY_GS */
  128. .macro PUSH_GS
  129. pushl %gs
  130. CFI_ADJUST_CFA_OFFSET 4
  131. /*CFI_REL_OFFSET gs, 0*/
  132. .endm
  133. .macro POP_GS pop=0
  134. 98: popl %gs
  135. CFI_ADJUST_CFA_OFFSET -4
  136. /*CFI_RESTORE gs*/
  137. .if \pop <> 0
  138. add $\pop, %esp
  139. CFI_ADJUST_CFA_OFFSET -\pop
  140. .endif
  141. .endm
  142. .macro POP_GS_EX
  143. .pushsection .fixup, "ax"
  144. 99: movl $0, (%esp)
  145. jmp 98b
  146. .section __ex_table, "a"
  147. .align 4
  148. .long 98b, 99b
  149. .popsection
  150. .endm
  151. .macro PTGS_TO_GS
  152. 98: mov PT_GS(%esp), %gs
  153. .endm
  154. .macro PTGS_TO_GS_EX
  155. .pushsection .fixup, "ax"
  156. 99: movl $0, PT_GS(%esp)
  157. jmp 98b
  158. .section __ex_table, "a"
  159. .align 4
  160. .long 98b, 99b
  161. .popsection
  162. .endm
  163. .macro GS_TO_REG reg
  164. movl %gs, \reg
  165. /*CFI_REGISTER gs, \reg*/
  166. .endm
  167. .macro REG_TO_PTGS reg
  168. movl \reg, PT_GS(%esp)
  169. /*CFI_REL_OFFSET gs, PT_GS*/
  170. .endm
  171. .macro SET_KERNEL_GS reg
  172. movl $(__KERNEL_STACK_CANARY), \reg
  173. movl \reg, %gs
  174. .endm
  175. #endif /* CONFIG_X86_32_LAZY_GS */
  176. .macro SAVE_ALL
  177. cld
  178. PUSH_GS
  179. pushl %fs
  180. CFI_ADJUST_CFA_OFFSET 4
  181. /*CFI_REL_OFFSET fs, 0;*/
  182. pushl %es
  183. CFI_ADJUST_CFA_OFFSET 4
  184. /*CFI_REL_OFFSET es, 0;*/
  185. pushl %ds
  186. CFI_ADJUST_CFA_OFFSET 4
  187. /*CFI_REL_OFFSET ds, 0;*/
  188. pushl %eax
  189. CFI_ADJUST_CFA_OFFSET 4
  190. CFI_REL_OFFSET eax, 0
  191. pushl %ebp
  192. CFI_ADJUST_CFA_OFFSET 4
  193. CFI_REL_OFFSET ebp, 0
  194. pushl %edi
  195. CFI_ADJUST_CFA_OFFSET 4
  196. CFI_REL_OFFSET edi, 0
  197. pushl %esi
  198. CFI_ADJUST_CFA_OFFSET 4
  199. CFI_REL_OFFSET esi, 0
  200. pushl %edx
  201. CFI_ADJUST_CFA_OFFSET 4
  202. CFI_REL_OFFSET edx, 0
  203. pushl %ecx
  204. CFI_ADJUST_CFA_OFFSET 4
  205. CFI_REL_OFFSET ecx, 0
  206. pushl %ebx
  207. CFI_ADJUST_CFA_OFFSET 4
  208. CFI_REL_OFFSET ebx, 0
  209. movl $(__USER_DS), %edx
  210. movl %edx, %ds
  211. movl %edx, %es
  212. movl $(__KERNEL_PERCPU), %edx
  213. movl %edx, %fs
  214. SET_KERNEL_GS %edx
  215. .endm
  216. .macro RESTORE_INT_REGS
  217. popl %ebx
  218. CFI_ADJUST_CFA_OFFSET -4
  219. CFI_RESTORE ebx
  220. popl %ecx
  221. CFI_ADJUST_CFA_OFFSET -4
  222. CFI_RESTORE ecx
  223. popl %edx
  224. CFI_ADJUST_CFA_OFFSET -4
  225. CFI_RESTORE edx
  226. popl %esi
  227. CFI_ADJUST_CFA_OFFSET -4
  228. CFI_RESTORE esi
  229. popl %edi
  230. CFI_ADJUST_CFA_OFFSET -4
  231. CFI_RESTORE edi
  232. popl %ebp
  233. CFI_ADJUST_CFA_OFFSET -4
  234. CFI_RESTORE ebp
  235. popl %eax
  236. CFI_ADJUST_CFA_OFFSET -4
  237. CFI_RESTORE eax
  238. .endm
  239. .macro RESTORE_REGS pop=0
  240. RESTORE_INT_REGS
  241. 1: popl %ds
  242. CFI_ADJUST_CFA_OFFSET -4
  243. /*CFI_RESTORE ds;*/
  244. 2: popl %es
  245. CFI_ADJUST_CFA_OFFSET -4
  246. /*CFI_RESTORE es;*/
  247. 3: popl %fs
  248. CFI_ADJUST_CFA_OFFSET -4
  249. /*CFI_RESTORE fs;*/
  250. POP_GS \pop
  251. .pushsection .fixup, "ax"
  252. 4: movl $0, (%esp)
  253. jmp 1b
  254. 5: movl $0, (%esp)
  255. jmp 2b
  256. 6: movl $0, (%esp)
  257. jmp 3b
  258. .section __ex_table, "a"
  259. .align 4
  260. .long 1b, 4b
  261. .long 2b, 5b
  262. .long 3b, 6b
  263. .popsection
  264. POP_GS_EX
  265. .endm
  266. .macro RING0_INT_FRAME
  267. CFI_STARTPROC simple
  268. CFI_SIGNAL_FRAME
  269. CFI_DEF_CFA esp, 3*4
  270. /*CFI_OFFSET cs, -2*4;*/
  271. CFI_OFFSET eip, -3*4
  272. .endm
  273. .macro RING0_EC_FRAME
  274. CFI_STARTPROC simple
  275. CFI_SIGNAL_FRAME
  276. CFI_DEF_CFA esp, 4*4
  277. /*CFI_OFFSET cs, -2*4;*/
  278. CFI_OFFSET eip, -3*4
  279. .endm
  280. .macro RING0_PTREGS_FRAME
  281. CFI_STARTPROC simple
  282. CFI_SIGNAL_FRAME
  283. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
  284. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
  285. CFI_OFFSET eip, PT_EIP-PT_OLDESP
  286. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
  287. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
  288. CFI_OFFSET eax, PT_EAX-PT_OLDESP
  289. CFI_OFFSET ebp, PT_EBP-PT_OLDESP
  290. CFI_OFFSET edi, PT_EDI-PT_OLDESP
  291. CFI_OFFSET esi, PT_ESI-PT_OLDESP
  292. CFI_OFFSET edx, PT_EDX-PT_OLDESP
  293. CFI_OFFSET ecx, PT_ECX-PT_OLDESP
  294. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  295. .endm
  296. ENTRY(ret_from_fork)
  297. CFI_STARTPROC
  298. pushl %eax
  299. CFI_ADJUST_CFA_OFFSET 4
  300. call schedule_tail
  301. GET_THREAD_INFO(%ebp)
  302. popl %eax
  303. CFI_ADJUST_CFA_OFFSET -4
  304. pushl $0x0202 # Reset kernel eflags
  305. CFI_ADJUST_CFA_OFFSET 4
  306. popfl
  307. CFI_ADJUST_CFA_OFFSET -4
  308. jmp syscall_exit
  309. CFI_ENDPROC
  310. END(ret_from_fork)
  311. /*
  312. * Interrupt exit functions should be protected against kprobes
  313. */
  314. .pushsection .kprobes.text, "ax"
  315. /*
  316. * Return to user mode is not as complex as all this looks,
  317. * but we want the default path for a system call return to
  318. * go as quickly as possible which is why some of this is
  319. * less clear than it otherwise should be.
  320. */
  321. # userspace resumption stub bypassing syscall exit tracing
  322. ALIGN
  323. RING0_PTREGS_FRAME
  324. ret_from_exception:
  325. preempt_stop(CLBR_ANY)
  326. ret_from_intr:
  327. GET_THREAD_INFO(%ebp)
  328. check_userspace:
  329. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  330. movb PT_CS(%esp), %al
  331. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  332. cmpl $USER_RPL, %eax
  333. jb resume_kernel # not returning to v8086 or userspace
  334. ENTRY(resume_userspace)
  335. LOCKDEP_SYS_EXIT
  336. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  337. # setting need_resched or sigpending
  338. # between sampling and the iret
  339. TRACE_IRQS_OFF
  340. movl TI_flags(%ebp), %ecx
  341. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  342. # int/exception return?
  343. jne work_pending
  344. jmp restore_all
  345. END(ret_from_exception)
  346. #ifdef CONFIG_PREEMPT
  347. ENTRY(resume_kernel)
  348. DISABLE_INTERRUPTS(CLBR_ANY)
  349. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  350. jnz restore_all
  351. need_resched:
  352. movl TI_flags(%ebp), %ecx # need_resched set ?
  353. testb $_TIF_NEED_RESCHED, %cl
  354. jz restore_all
  355. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  356. jz restore_all
  357. call preempt_schedule_irq
  358. jmp need_resched
  359. END(resume_kernel)
  360. #endif
  361. CFI_ENDPROC
  362. /*
  363. * End of kprobes section
  364. */
  365. .popsection
  366. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  367. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  368. # sysenter call handler stub
  369. ENTRY(ia32_sysenter_target)
  370. CFI_STARTPROC simple
  371. CFI_SIGNAL_FRAME
  372. CFI_DEF_CFA esp, 0
  373. CFI_REGISTER esp, ebp
  374. movl TSS_sysenter_sp0(%esp),%esp
  375. sysenter_past_esp:
  376. /*
  377. * Interrupts are disabled here, but we can't trace it until
  378. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  379. * we immediately enable interrupts at that point anyway.
  380. */
  381. pushl $(__USER_DS)
  382. CFI_ADJUST_CFA_OFFSET 4
  383. /*CFI_REL_OFFSET ss, 0*/
  384. pushl %ebp
  385. CFI_ADJUST_CFA_OFFSET 4
  386. CFI_REL_OFFSET esp, 0
  387. pushfl
  388. orl $X86_EFLAGS_IF, (%esp)
  389. CFI_ADJUST_CFA_OFFSET 4
  390. pushl $(__USER_CS)
  391. CFI_ADJUST_CFA_OFFSET 4
  392. /*CFI_REL_OFFSET cs, 0*/
  393. /*
  394. * Push current_thread_info()->sysenter_return to the stack.
  395. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  396. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  397. */
  398. pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
  399. CFI_ADJUST_CFA_OFFSET 4
  400. CFI_REL_OFFSET eip, 0
  401. pushl %eax
  402. CFI_ADJUST_CFA_OFFSET 4
  403. SAVE_ALL
  404. ENABLE_INTERRUPTS(CLBR_NONE)
  405. /*
  406. * Load the potential sixth argument from user stack.
  407. * Careful about security.
  408. */
  409. cmpl $__PAGE_OFFSET-3,%ebp
  410. jae syscall_fault
  411. 1: movl (%ebp),%ebp
  412. movl %ebp,PT_EBP(%esp)
  413. .section __ex_table,"a"
  414. .align 4
  415. .long 1b,syscall_fault
  416. .previous
  417. GET_THREAD_INFO(%ebp)
  418. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  419. jnz sysenter_audit
  420. sysenter_do_call:
  421. cmpl $(nr_syscalls), %eax
  422. jae syscall_badsys
  423. call *sys_call_table(,%eax,4)
  424. movl %eax,PT_EAX(%esp)
  425. LOCKDEP_SYS_EXIT
  426. DISABLE_INTERRUPTS(CLBR_ANY)
  427. TRACE_IRQS_OFF
  428. movl TI_flags(%ebp), %ecx
  429. testl $_TIF_ALLWORK_MASK, %ecx
  430. jne sysexit_audit
  431. sysenter_exit:
  432. /* if something modifies registers it must also disable sysexit */
  433. movl PT_EIP(%esp), %edx
  434. movl PT_OLDESP(%esp), %ecx
  435. xorl %ebp,%ebp
  436. TRACE_IRQS_ON
  437. 1: mov PT_FS(%esp), %fs
  438. PTGS_TO_GS
  439. ENABLE_INTERRUPTS_SYSEXIT
  440. #ifdef CONFIG_AUDITSYSCALL
  441. sysenter_audit:
  442. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  443. jnz syscall_trace_entry
  444. addl $4,%esp
  445. CFI_ADJUST_CFA_OFFSET -4
  446. /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
  447. /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
  448. /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
  449. movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
  450. movl %eax,%edx /* 2nd arg: syscall number */
  451. movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
  452. call audit_syscall_entry
  453. pushl %ebx
  454. CFI_ADJUST_CFA_OFFSET 4
  455. movl PT_EAX(%esp),%eax /* reload syscall number */
  456. jmp sysenter_do_call
  457. sysexit_audit:
  458. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  459. jne syscall_exit_work
  460. TRACE_IRQS_ON
  461. ENABLE_INTERRUPTS(CLBR_ANY)
  462. movl %eax,%edx /* second arg, syscall return value */
  463. cmpl $0,%eax /* is it < 0? */
  464. setl %al /* 1 if so, 0 if not */
  465. movzbl %al,%eax /* zero-extend that */
  466. inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
  467. call audit_syscall_exit
  468. DISABLE_INTERRUPTS(CLBR_ANY)
  469. TRACE_IRQS_OFF
  470. movl TI_flags(%ebp), %ecx
  471. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  472. jne syscall_exit_work
  473. movl PT_EAX(%esp),%eax /* reload syscall return value */
  474. jmp sysenter_exit
  475. #endif
  476. CFI_ENDPROC
  477. .pushsection .fixup,"ax"
  478. 2: movl $0,PT_FS(%esp)
  479. jmp 1b
  480. .section __ex_table,"a"
  481. .align 4
  482. .long 1b,2b
  483. .popsection
  484. PTGS_TO_GS_EX
  485. ENDPROC(ia32_sysenter_target)
  486. /*
  487. * syscall stub including irq exit should be protected against kprobes
  488. */
  489. .pushsection .kprobes.text, "ax"
  490. # system call handler stub
  491. ENTRY(system_call)
  492. RING0_INT_FRAME # can't unwind into user space anyway
  493. pushl %eax # save orig_eax
  494. CFI_ADJUST_CFA_OFFSET 4
  495. SAVE_ALL
  496. GET_THREAD_INFO(%ebp)
  497. # system call tracing in operation / emulation
  498. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  499. jnz syscall_trace_entry
  500. cmpl $(nr_syscalls), %eax
  501. jae syscall_badsys
  502. syscall_call:
  503. call *sys_call_table(,%eax,4)
  504. movl %eax,PT_EAX(%esp) # store the return value
  505. syscall_exit:
  506. LOCKDEP_SYS_EXIT
  507. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  508. # setting need_resched or sigpending
  509. # between sampling and the iret
  510. TRACE_IRQS_OFF
  511. movl TI_flags(%ebp), %ecx
  512. testl $_TIF_ALLWORK_MASK, %ecx # current->work
  513. jne syscall_exit_work
  514. restore_all:
  515. TRACE_IRQS_IRET
  516. restore_all_notrace:
  517. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  518. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  519. # are returning to the kernel.
  520. # See comments in process.c:copy_thread() for details.
  521. movb PT_OLDSS(%esp), %ah
  522. movb PT_CS(%esp), %al
  523. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  524. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  525. CFI_REMEMBER_STATE
  526. je ldt_ss # returning to user-space with LDT SS
  527. restore_nocheck:
  528. RESTORE_REGS 4 # skip orig_eax/error_code
  529. CFI_ADJUST_CFA_OFFSET -4
  530. irq_return:
  531. INTERRUPT_RETURN
  532. .section .fixup,"ax"
  533. ENTRY(iret_exc)
  534. pushl $0 # no error code
  535. pushl $do_iret_error
  536. jmp error_code
  537. .previous
  538. .section __ex_table,"a"
  539. .align 4
  540. .long irq_return,iret_exc
  541. .previous
  542. CFI_RESTORE_STATE
  543. ldt_ss:
  544. larl PT_OLDSS(%esp), %eax
  545. jnz restore_nocheck
  546. testl $0x00400000, %eax # returning to 32bit stack?
  547. jnz restore_nocheck # allright, normal return
  548. #ifdef CONFIG_PARAVIRT
  549. /*
  550. * The kernel can't run on a non-flat stack if paravirt mode
  551. * is active. Rather than try to fixup the high bits of
  552. * ESP, bypass this code entirely. This may break DOSemu
  553. * and/or Wine support in a paravirt VM, although the option
  554. * is still available to implement the setting of the high
  555. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  556. */
  557. cmpl $0, pv_info+PARAVIRT_enabled
  558. jne restore_nocheck
  559. #endif
  560. /*
  561. * Setup and switch to ESPFIX stack
  562. *
  563. * We're returning to userspace with a 16 bit stack. The CPU will not
  564. * restore the high word of ESP for us on executing iret... This is an
  565. * "official" bug of all the x86-compatible CPUs, which we can work
  566. * around to make dosemu and wine happy. We do this by preloading the
  567. * high word of ESP with the high word of the userspace ESP while
  568. * compensating for the offset by changing to the ESPFIX segment with
  569. * a base address that matches for the difference.
  570. */
  571. mov %esp, %edx /* load kernel esp */
  572. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  573. mov %dx, %ax /* eax: new kernel esp */
  574. sub %eax, %edx /* offset (low word is 0) */
  575. PER_CPU(gdt_page, %ebx)
  576. shr $16, %edx
  577. mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
  578. mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
  579. pushl $__ESPFIX_SS
  580. CFI_ADJUST_CFA_OFFSET 4
  581. push %eax /* new kernel esp */
  582. CFI_ADJUST_CFA_OFFSET 4
  583. /* Disable interrupts, but do not irqtrace this section: we
  584. * will soon execute iret and the tracer was already set to
  585. * the irqstate after the iret */
  586. DISABLE_INTERRUPTS(CLBR_EAX)
  587. lss (%esp), %esp /* switch to espfix segment */
  588. CFI_ADJUST_CFA_OFFSET -8
  589. jmp restore_nocheck
  590. CFI_ENDPROC
  591. ENDPROC(system_call)
  592. # perform work that needs to be done immediately before resumption
  593. ALIGN
  594. RING0_PTREGS_FRAME # can't unwind into user space anyway
  595. work_pending:
  596. testb $_TIF_NEED_RESCHED, %cl
  597. jz work_notifysig
  598. work_resched:
  599. call schedule
  600. LOCKDEP_SYS_EXIT
  601. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  602. # setting need_resched or sigpending
  603. # between sampling and the iret
  604. TRACE_IRQS_OFF
  605. movl TI_flags(%ebp), %ecx
  606. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  607. # than syscall tracing?
  608. jz restore_all
  609. testb $_TIF_NEED_RESCHED, %cl
  610. jnz work_resched
  611. work_notifysig: # deal with pending signals and
  612. # notify-resume requests
  613. #ifdef CONFIG_VM86
  614. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  615. movl %esp, %eax
  616. jne work_notifysig_v86 # returning to kernel-space or
  617. # vm86-space
  618. xorl %edx, %edx
  619. call do_notify_resume
  620. jmp resume_userspace_sig
  621. ALIGN
  622. work_notifysig_v86:
  623. pushl %ecx # save ti_flags for do_notify_resume
  624. CFI_ADJUST_CFA_OFFSET 4
  625. call save_v86_state # %eax contains pt_regs pointer
  626. popl %ecx
  627. CFI_ADJUST_CFA_OFFSET -4
  628. movl %eax, %esp
  629. #else
  630. movl %esp, %eax
  631. #endif
  632. xorl %edx, %edx
  633. call do_notify_resume
  634. jmp resume_userspace_sig
  635. END(work_pending)
  636. # perform syscall exit tracing
  637. ALIGN
  638. syscall_trace_entry:
  639. movl $-ENOSYS,PT_EAX(%esp)
  640. movl %esp, %eax
  641. call syscall_trace_enter
  642. /* What it returned is what we'll actually use. */
  643. cmpl $(nr_syscalls), %eax
  644. jnae syscall_call
  645. jmp syscall_exit
  646. END(syscall_trace_entry)
  647. # perform syscall exit tracing
  648. ALIGN
  649. syscall_exit_work:
  650. testl $_TIF_WORK_SYSCALL_EXIT, %ecx
  651. jz work_pending
  652. TRACE_IRQS_ON
  653. ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
  654. # schedule() instead
  655. movl %esp, %eax
  656. call syscall_trace_leave
  657. jmp resume_userspace
  658. END(syscall_exit_work)
  659. CFI_ENDPROC
  660. RING0_INT_FRAME # can't unwind into user space anyway
  661. syscall_fault:
  662. GET_THREAD_INFO(%ebp)
  663. movl $-EFAULT,PT_EAX(%esp)
  664. jmp resume_userspace
  665. END(syscall_fault)
  666. syscall_badsys:
  667. movl $-ENOSYS,PT_EAX(%esp)
  668. jmp resume_userspace
  669. END(syscall_badsys)
  670. CFI_ENDPROC
  671. /*
  672. * End of kprobes section
  673. */
  674. .popsection
  675. /*
  676. * System calls that need a pt_regs pointer.
  677. */
  678. #define PTREGSCALL0(name) \
  679. ALIGN; \
  680. ptregs_##name: \
  681. leal 4(%esp),%eax; \
  682. jmp sys_##name;
  683. #define PTREGSCALL1(name) \
  684. ALIGN; \
  685. ptregs_##name: \
  686. leal 4(%esp),%edx; \
  687. movl (PT_EBX+4)(%esp),%eax; \
  688. jmp sys_##name;
  689. #define PTREGSCALL2(name) \
  690. ALIGN; \
  691. ptregs_##name: \
  692. leal 4(%esp),%ecx; \
  693. movl (PT_ECX+4)(%esp),%edx; \
  694. movl (PT_EBX+4)(%esp),%eax; \
  695. jmp sys_##name;
  696. #define PTREGSCALL3(name) \
  697. ALIGN; \
  698. ptregs_##name: \
  699. leal 4(%esp),%eax; \
  700. pushl %eax; \
  701. movl PT_EDX(%eax),%ecx; \
  702. movl PT_ECX(%eax),%edx; \
  703. movl PT_EBX(%eax),%eax; \
  704. call sys_##name; \
  705. addl $4,%esp; \
  706. ret
  707. PTREGSCALL1(iopl)
  708. PTREGSCALL0(fork)
  709. PTREGSCALL0(vfork)
  710. PTREGSCALL3(execve)
  711. PTREGSCALL2(sigaltstack)
  712. PTREGSCALL0(sigreturn)
  713. PTREGSCALL0(rt_sigreturn)
  714. PTREGSCALL2(vm86)
  715. PTREGSCALL1(vm86old)
  716. /* Clone is an oddball. The 4th arg is in %edi */
  717. ALIGN;
  718. ptregs_clone:
  719. leal 4(%esp),%eax
  720. pushl %eax
  721. pushl PT_EDI(%eax)
  722. movl PT_EDX(%eax),%ecx
  723. movl PT_ECX(%eax),%edx
  724. movl PT_EBX(%eax),%eax
  725. call sys_clone
  726. addl $8,%esp
  727. ret
  728. .macro FIXUP_ESPFIX_STACK
  729. /*
  730. * Switch back for ESPFIX stack to the normal zerobased stack
  731. *
  732. * We can't call C functions using the ESPFIX stack. This code reads
  733. * the high word of the segment base from the GDT and swiches to the
  734. * normal stack and adjusts ESP with the matching offset.
  735. */
  736. /* fixup the stack */
  737. PER_CPU(gdt_page, %ebx)
  738. mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
  739. mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
  740. shl $16, %eax
  741. addl %esp, %eax /* the adjusted stack pointer */
  742. pushl $__KERNEL_DS
  743. CFI_ADJUST_CFA_OFFSET 4
  744. pushl %eax
  745. CFI_ADJUST_CFA_OFFSET 4
  746. lss (%esp), %esp /* switch to the normal stack segment */
  747. CFI_ADJUST_CFA_OFFSET -8
  748. .endm
  749. .macro UNWIND_ESPFIX_STACK
  750. movl %ss, %eax
  751. /* see if on espfix stack */
  752. cmpw $__ESPFIX_SS, %ax
  753. jne 27f
  754. movl $__KERNEL_DS, %eax
  755. movl %eax, %ds
  756. movl %eax, %es
  757. /* switch to normal stack */
  758. FIXUP_ESPFIX_STACK
  759. 27:
  760. .endm
  761. /*
  762. * Build the entry stubs and pointer table with some assembler magic.
  763. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  764. * single cache line on all modern x86 implementations.
  765. */
  766. .section .init.rodata,"a"
  767. ENTRY(interrupt)
  768. .text
  769. .p2align 5
  770. .p2align CONFIG_X86_L1_CACHE_SHIFT
  771. ENTRY(irq_entries_start)
  772. RING0_INT_FRAME
  773. vector=FIRST_EXTERNAL_VECTOR
  774. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  775. .balign 32
  776. .rept 7
  777. .if vector < NR_VECTORS
  778. .if vector <> FIRST_EXTERNAL_VECTOR
  779. CFI_ADJUST_CFA_OFFSET -4
  780. .endif
  781. 1: pushl $(~vector+0x80) /* Note: always in signed byte range */
  782. CFI_ADJUST_CFA_OFFSET 4
  783. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  784. jmp 2f
  785. .endif
  786. .previous
  787. .long 1b
  788. .text
  789. vector=vector+1
  790. .endif
  791. .endr
  792. 2: jmp common_interrupt
  793. .endr
  794. END(irq_entries_start)
  795. .previous
  796. END(interrupt)
  797. .previous
  798. /*
  799. * the CPU automatically disables interrupts when executing an IRQ vector,
  800. * so IRQ-flags tracing has to follow that:
  801. */
  802. .p2align CONFIG_X86_L1_CACHE_SHIFT
  803. common_interrupt:
  804. addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
  805. SAVE_ALL
  806. TRACE_IRQS_OFF
  807. movl %esp,%eax
  808. call do_IRQ
  809. jmp ret_from_intr
  810. ENDPROC(common_interrupt)
  811. CFI_ENDPROC
  812. /*
  813. * Irq entries should be protected against kprobes
  814. */
  815. .pushsection .kprobes.text, "ax"
  816. #define BUILD_INTERRUPT3(name, nr, fn) \
  817. ENTRY(name) \
  818. RING0_INT_FRAME; \
  819. pushl $~(nr); \
  820. CFI_ADJUST_CFA_OFFSET 4; \
  821. SAVE_ALL; \
  822. TRACE_IRQS_OFF \
  823. movl %esp,%eax; \
  824. call fn; \
  825. jmp ret_from_intr; \
  826. CFI_ENDPROC; \
  827. ENDPROC(name)
  828. #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
  829. /* The include is where all of the SMP etc. interrupts come from */
  830. #include <asm/entry_arch.h>
  831. ENTRY(coprocessor_error)
  832. RING0_INT_FRAME
  833. pushl $0
  834. CFI_ADJUST_CFA_OFFSET 4
  835. pushl $do_coprocessor_error
  836. CFI_ADJUST_CFA_OFFSET 4
  837. jmp error_code
  838. CFI_ENDPROC
  839. END(coprocessor_error)
  840. ENTRY(simd_coprocessor_error)
  841. RING0_INT_FRAME
  842. pushl $0
  843. CFI_ADJUST_CFA_OFFSET 4
  844. #ifdef CONFIG_X86_INVD_BUG
  845. /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
  846. 661: pushl $do_general_protection
  847. 662:
  848. .section .altinstructions,"a"
  849. .balign 4
  850. .long 661b
  851. .long 663f
  852. .byte X86_FEATURE_XMM
  853. .byte 662b-661b
  854. .byte 664f-663f
  855. .previous
  856. .section .altinstr_replacement,"ax"
  857. 663: pushl $do_simd_coprocessor_error
  858. 664:
  859. .previous
  860. #else
  861. pushl $do_simd_coprocessor_error
  862. #endif
  863. CFI_ADJUST_CFA_OFFSET 4
  864. jmp error_code
  865. CFI_ENDPROC
  866. END(simd_coprocessor_error)
  867. ENTRY(device_not_available)
  868. RING0_INT_FRAME
  869. pushl $-1 # mark this as an int
  870. CFI_ADJUST_CFA_OFFSET 4
  871. pushl $do_device_not_available
  872. CFI_ADJUST_CFA_OFFSET 4
  873. jmp error_code
  874. CFI_ENDPROC
  875. END(device_not_available)
  876. #ifdef CONFIG_PARAVIRT
  877. ENTRY(native_iret)
  878. iret
  879. .section __ex_table,"a"
  880. .align 4
  881. .long native_iret, iret_exc
  882. .previous
  883. END(native_iret)
  884. ENTRY(native_irq_enable_sysexit)
  885. sti
  886. sysexit
  887. END(native_irq_enable_sysexit)
  888. #endif
  889. ENTRY(overflow)
  890. RING0_INT_FRAME
  891. pushl $0
  892. CFI_ADJUST_CFA_OFFSET 4
  893. pushl $do_overflow
  894. CFI_ADJUST_CFA_OFFSET 4
  895. jmp error_code
  896. CFI_ENDPROC
  897. END(overflow)
  898. ENTRY(bounds)
  899. RING0_INT_FRAME
  900. pushl $0
  901. CFI_ADJUST_CFA_OFFSET 4
  902. pushl $do_bounds
  903. CFI_ADJUST_CFA_OFFSET 4
  904. jmp error_code
  905. CFI_ENDPROC
  906. END(bounds)
  907. ENTRY(invalid_op)
  908. RING0_INT_FRAME
  909. pushl $0
  910. CFI_ADJUST_CFA_OFFSET 4
  911. pushl $do_invalid_op
  912. CFI_ADJUST_CFA_OFFSET 4
  913. jmp error_code
  914. CFI_ENDPROC
  915. END(invalid_op)
  916. ENTRY(coprocessor_segment_overrun)
  917. RING0_INT_FRAME
  918. pushl $0
  919. CFI_ADJUST_CFA_OFFSET 4
  920. pushl $do_coprocessor_segment_overrun
  921. CFI_ADJUST_CFA_OFFSET 4
  922. jmp error_code
  923. CFI_ENDPROC
  924. END(coprocessor_segment_overrun)
  925. ENTRY(invalid_TSS)
  926. RING0_EC_FRAME
  927. pushl $do_invalid_TSS
  928. CFI_ADJUST_CFA_OFFSET 4
  929. jmp error_code
  930. CFI_ENDPROC
  931. END(invalid_TSS)
  932. ENTRY(segment_not_present)
  933. RING0_EC_FRAME
  934. pushl $do_segment_not_present
  935. CFI_ADJUST_CFA_OFFSET 4
  936. jmp error_code
  937. CFI_ENDPROC
  938. END(segment_not_present)
  939. ENTRY(stack_segment)
  940. RING0_EC_FRAME
  941. pushl $do_stack_segment
  942. CFI_ADJUST_CFA_OFFSET 4
  943. jmp error_code
  944. CFI_ENDPROC
  945. END(stack_segment)
  946. ENTRY(alignment_check)
  947. RING0_EC_FRAME
  948. pushl $do_alignment_check
  949. CFI_ADJUST_CFA_OFFSET 4
  950. jmp error_code
  951. CFI_ENDPROC
  952. END(alignment_check)
  953. ENTRY(divide_error)
  954. RING0_INT_FRAME
  955. pushl $0 # no error code
  956. CFI_ADJUST_CFA_OFFSET 4
  957. pushl $do_divide_error
  958. CFI_ADJUST_CFA_OFFSET 4
  959. jmp error_code
  960. CFI_ENDPROC
  961. END(divide_error)
  962. #ifdef CONFIG_X86_MCE
  963. ENTRY(machine_check)
  964. RING0_INT_FRAME
  965. pushl $0
  966. CFI_ADJUST_CFA_OFFSET 4
  967. pushl machine_check_vector
  968. CFI_ADJUST_CFA_OFFSET 4
  969. jmp error_code
  970. CFI_ENDPROC
  971. END(machine_check)
  972. #endif
  973. ENTRY(spurious_interrupt_bug)
  974. RING0_INT_FRAME
  975. pushl $0
  976. CFI_ADJUST_CFA_OFFSET 4
  977. pushl $do_spurious_interrupt_bug
  978. CFI_ADJUST_CFA_OFFSET 4
  979. jmp error_code
  980. CFI_ENDPROC
  981. END(spurious_interrupt_bug)
  982. /*
  983. * End of kprobes section
  984. */
  985. .popsection
  986. ENTRY(kernel_thread_helper)
  987. pushl $0 # fake return address for unwinder
  988. CFI_STARTPROC
  989. movl %edi,%eax
  990. call *%esi
  991. call do_exit
  992. ud2 # padding for call trace
  993. CFI_ENDPROC
  994. ENDPROC(kernel_thread_helper)
  995. #ifdef CONFIG_XEN
  996. /* Xen doesn't set %esp to be precisely what the normal sysenter
  997. entrypoint expects, so fix it up before using the normal path. */
  998. ENTRY(xen_sysenter_target)
  999. RING0_INT_FRAME
  1000. addl $5*4, %esp /* remove xen-provided frame */
  1001. CFI_ADJUST_CFA_OFFSET -5*4
  1002. jmp sysenter_past_esp
  1003. CFI_ENDPROC
  1004. ENTRY(xen_hypervisor_callback)
  1005. CFI_STARTPROC
  1006. pushl $0
  1007. CFI_ADJUST_CFA_OFFSET 4
  1008. SAVE_ALL
  1009. TRACE_IRQS_OFF
  1010. /* Check to see if we got the event in the critical
  1011. region in xen_iret_direct, after we've reenabled
  1012. events and checked for pending events. This simulates
  1013. iret instruction's behaviour where it delivers a
  1014. pending interrupt when enabling interrupts. */
  1015. movl PT_EIP(%esp),%eax
  1016. cmpl $xen_iret_start_crit,%eax
  1017. jb 1f
  1018. cmpl $xen_iret_end_crit,%eax
  1019. jae 1f
  1020. jmp xen_iret_crit_fixup
  1021. ENTRY(xen_do_upcall)
  1022. 1: mov %esp, %eax
  1023. call xen_evtchn_do_upcall
  1024. jmp ret_from_intr
  1025. CFI_ENDPROC
  1026. ENDPROC(xen_hypervisor_callback)
  1027. # Hypervisor uses this for application faults while it executes.
  1028. # We get here for two reasons:
  1029. # 1. Fault while reloading DS, ES, FS or GS
  1030. # 2. Fault while executing IRET
  1031. # Category 1 we fix up by reattempting the load, and zeroing the segment
  1032. # register if the load fails.
  1033. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  1034. # normal Linux return path in this case because if we use the IRET hypercall
  1035. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  1036. # We distinguish between categories by maintaining a status value in EAX.
  1037. ENTRY(xen_failsafe_callback)
  1038. CFI_STARTPROC
  1039. pushl %eax
  1040. CFI_ADJUST_CFA_OFFSET 4
  1041. movl $1,%eax
  1042. 1: mov 4(%esp),%ds
  1043. 2: mov 8(%esp),%es
  1044. 3: mov 12(%esp),%fs
  1045. 4: mov 16(%esp),%gs
  1046. testl %eax,%eax
  1047. popl %eax
  1048. CFI_ADJUST_CFA_OFFSET -4
  1049. lea 16(%esp),%esp
  1050. CFI_ADJUST_CFA_OFFSET -16
  1051. jz 5f
  1052. addl $16,%esp
  1053. jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
  1054. 5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
  1055. CFI_ADJUST_CFA_OFFSET 4
  1056. SAVE_ALL
  1057. jmp ret_from_exception
  1058. CFI_ENDPROC
  1059. .section .fixup,"ax"
  1060. 6: xorl %eax,%eax
  1061. movl %eax,4(%esp)
  1062. jmp 1b
  1063. 7: xorl %eax,%eax
  1064. movl %eax,8(%esp)
  1065. jmp 2b
  1066. 8: xorl %eax,%eax
  1067. movl %eax,12(%esp)
  1068. jmp 3b
  1069. 9: xorl %eax,%eax
  1070. movl %eax,16(%esp)
  1071. jmp 4b
  1072. .previous
  1073. .section __ex_table,"a"
  1074. .align 4
  1075. .long 1b,6b
  1076. .long 2b,7b
  1077. .long 3b,8b
  1078. .long 4b,9b
  1079. .previous
  1080. ENDPROC(xen_failsafe_callback)
  1081. #endif /* CONFIG_XEN */
  1082. #ifdef CONFIG_FUNCTION_TRACER
  1083. #ifdef CONFIG_DYNAMIC_FTRACE
  1084. ENTRY(mcount)
  1085. ret
  1086. END(mcount)
  1087. ENTRY(ftrace_caller)
  1088. cmpl $0, function_trace_stop
  1089. jne ftrace_stub
  1090. pushl %eax
  1091. pushl %ecx
  1092. pushl %edx
  1093. movl 0xc(%esp), %eax
  1094. movl 0x4(%ebp), %edx
  1095. subl $MCOUNT_INSN_SIZE, %eax
  1096. .globl ftrace_call
  1097. ftrace_call:
  1098. call ftrace_stub
  1099. popl %edx
  1100. popl %ecx
  1101. popl %eax
  1102. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1103. .globl ftrace_graph_call
  1104. ftrace_graph_call:
  1105. jmp ftrace_stub
  1106. #endif
  1107. .globl ftrace_stub
  1108. ftrace_stub:
  1109. ret
  1110. END(ftrace_caller)
  1111. #else /* ! CONFIG_DYNAMIC_FTRACE */
  1112. ENTRY(mcount)
  1113. cmpl $0, function_trace_stop
  1114. jne ftrace_stub
  1115. cmpl $ftrace_stub, ftrace_trace_function
  1116. jnz trace
  1117. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1118. cmpl $ftrace_stub, ftrace_graph_return
  1119. jnz ftrace_graph_caller
  1120. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  1121. jnz ftrace_graph_caller
  1122. #endif
  1123. .globl ftrace_stub
  1124. ftrace_stub:
  1125. ret
  1126. /* taken from glibc */
  1127. trace:
  1128. pushl %eax
  1129. pushl %ecx
  1130. pushl %edx
  1131. movl 0xc(%esp), %eax
  1132. movl 0x4(%ebp), %edx
  1133. subl $MCOUNT_INSN_SIZE, %eax
  1134. call *ftrace_trace_function
  1135. popl %edx
  1136. popl %ecx
  1137. popl %eax
  1138. jmp ftrace_stub
  1139. END(mcount)
  1140. #endif /* CONFIG_DYNAMIC_FTRACE */
  1141. #endif /* CONFIG_FUNCTION_TRACER */
  1142. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1143. ENTRY(ftrace_graph_caller)
  1144. cmpl $0, function_trace_stop
  1145. jne ftrace_stub
  1146. pushl %eax
  1147. pushl %ecx
  1148. pushl %edx
  1149. movl 0xc(%esp), %edx
  1150. lea 0x4(%ebp), %eax
  1151. movl (%ebp), %ecx
  1152. subl $MCOUNT_INSN_SIZE, %edx
  1153. call prepare_ftrace_return
  1154. popl %edx
  1155. popl %ecx
  1156. popl %eax
  1157. ret
  1158. END(ftrace_graph_caller)
  1159. .globl return_to_handler
  1160. return_to_handler:
  1161. pushl %eax
  1162. pushl %edx
  1163. movl %ebp, %eax
  1164. call ftrace_return_to_handler
  1165. movl %eax, %ecx
  1166. popl %edx
  1167. popl %eax
  1168. jmp *%ecx
  1169. #endif
  1170. .section .rodata,"a"
  1171. #include "syscall_table_32.S"
  1172. syscall_table_size=(.-sys_call_table)
  1173. /*
  1174. * Some functions should be protected against kprobes
  1175. */
  1176. .pushsection .kprobes.text, "ax"
  1177. ENTRY(page_fault)
  1178. RING0_EC_FRAME
  1179. pushl $do_page_fault
  1180. CFI_ADJUST_CFA_OFFSET 4
  1181. ALIGN
  1182. error_code:
  1183. /* the function address is in %gs's slot on the stack */
  1184. pushl %fs
  1185. CFI_ADJUST_CFA_OFFSET 4
  1186. /*CFI_REL_OFFSET fs, 0*/
  1187. pushl %es
  1188. CFI_ADJUST_CFA_OFFSET 4
  1189. /*CFI_REL_OFFSET es, 0*/
  1190. pushl %ds
  1191. CFI_ADJUST_CFA_OFFSET 4
  1192. /*CFI_REL_OFFSET ds, 0*/
  1193. pushl %eax
  1194. CFI_ADJUST_CFA_OFFSET 4
  1195. CFI_REL_OFFSET eax, 0
  1196. pushl %ebp
  1197. CFI_ADJUST_CFA_OFFSET 4
  1198. CFI_REL_OFFSET ebp, 0
  1199. pushl %edi
  1200. CFI_ADJUST_CFA_OFFSET 4
  1201. CFI_REL_OFFSET edi, 0
  1202. pushl %esi
  1203. CFI_ADJUST_CFA_OFFSET 4
  1204. CFI_REL_OFFSET esi, 0
  1205. pushl %edx
  1206. CFI_ADJUST_CFA_OFFSET 4
  1207. CFI_REL_OFFSET edx, 0
  1208. pushl %ecx
  1209. CFI_ADJUST_CFA_OFFSET 4
  1210. CFI_REL_OFFSET ecx, 0
  1211. pushl %ebx
  1212. CFI_ADJUST_CFA_OFFSET 4
  1213. CFI_REL_OFFSET ebx, 0
  1214. cld
  1215. movl $(__KERNEL_PERCPU), %ecx
  1216. movl %ecx, %fs
  1217. UNWIND_ESPFIX_STACK
  1218. GS_TO_REG %ecx
  1219. movl PT_GS(%esp), %edi # get the function address
  1220. movl PT_ORIG_EAX(%esp), %edx # get the error code
  1221. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  1222. REG_TO_PTGS %ecx
  1223. SET_KERNEL_GS %ecx
  1224. movl $(__USER_DS), %ecx
  1225. movl %ecx, %ds
  1226. movl %ecx, %es
  1227. TRACE_IRQS_OFF
  1228. movl %esp,%eax # pt_regs pointer
  1229. call *%edi
  1230. jmp ret_from_exception
  1231. CFI_ENDPROC
  1232. END(page_fault)
  1233. /*
  1234. * Debug traps and NMI can happen at the one SYSENTER instruction
  1235. * that sets up the real kernel stack. Check here, since we can't
  1236. * allow the wrong stack to be used.
  1237. *
  1238. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  1239. * already pushed 3 words if it hits on the sysenter instruction:
  1240. * eflags, cs and eip.
  1241. *
  1242. * We just load the right stack, and push the three (known) values
  1243. * by hand onto the new stack - while updating the return eip past
  1244. * the instruction that would have done it for sysenter.
  1245. */
  1246. .macro FIX_STACK offset ok label
  1247. cmpw $__KERNEL_CS, 4(%esp)
  1248. jne \ok
  1249. \label:
  1250. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  1251. CFI_DEF_CFA esp, 0
  1252. CFI_UNDEFINED eip
  1253. pushfl
  1254. CFI_ADJUST_CFA_OFFSET 4
  1255. pushl $__KERNEL_CS
  1256. CFI_ADJUST_CFA_OFFSET 4
  1257. pushl $sysenter_past_esp
  1258. CFI_ADJUST_CFA_OFFSET 4
  1259. CFI_REL_OFFSET eip, 0
  1260. .endm
  1261. ENTRY(debug)
  1262. RING0_INT_FRAME
  1263. cmpl $ia32_sysenter_target,(%esp)
  1264. jne debug_stack_correct
  1265. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  1266. debug_stack_correct:
  1267. pushl $-1 # mark this as an int
  1268. CFI_ADJUST_CFA_OFFSET 4
  1269. SAVE_ALL
  1270. TRACE_IRQS_OFF
  1271. xorl %edx,%edx # error code 0
  1272. movl %esp,%eax # pt_regs pointer
  1273. call do_debug
  1274. jmp ret_from_exception
  1275. CFI_ENDPROC
  1276. END(debug)
  1277. /*
  1278. * NMI is doubly nasty. It can happen _while_ we're handling
  1279. * a debug fault, and the debug fault hasn't yet been able to
  1280. * clear up the stack. So we first check whether we got an
  1281. * NMI on the sysenter entry path, but after that we need to
  1282. * check whether we got an NMI on the debug path where the debug
  1283. * fault happened on the sysenter path.
  1284. */
  1285. ENTRY(nmi)
  1286. RING0_INT_FRAME
  1287. pushl %eax
  1288. CFI_ADJUST_CFA_OFFSET 4
  1289. movl %ss, %eax
  1290. cmpw $__ESPFIX_SS, %ax
  1291. popl %eax
  1292. CFI_ADJUST_CFA_OFFSET -4
  1293. je nmi_espfix_stack
  1294. cmpl $ia32_sysenter_target,(%esp)
  1295. je nmi_stack_fixup
  1296. pushl %eax
  1297. CFI_ADJUST_CFA_OFFSET 4
  1298. movl %esp,%eax
  1299. /* Do not access memory above the end of our stack page,
  1300. * it might not exist.
  1301. */
  1302. andl $(THREAD_SIZE-1),%eax
  1303. cmpl $(THREAD_SIZE-20),%eax
  1304. popl %eax
  1305. CFI_ADJUST_CFA_OFFSET -4
  1306. jae nmi_stack_correct
  1307. cmpl $ia32_sysenter_target,12(%esp)
  1308. je nmi_debug_stack_check
  1309. nmi_stack_correct:
  1310. /* We have a RING0_INT_FRAME here */
  1311. pushl %eax
  1312. CFI_ADJUST_CFA_OFFSET 4
  1313. SAVE_ALL
  1314. xorl %edx,%edx # zero error code
  1315. movl %esp,%eax # pt_regs pointer
  1316. call do_nmi
  1317. jmp restore_all_notrace
  1318. CFI_ENDPROC
  1319. nmi_stack_fixup:
  1320. RING0_INT_FRAME
  1321. FIX_STACK 12, nmi_stack_correct, 1
  1322. jmp nmi_stack_correct
  1323. nmi_debug_stack_check:
  1324. /* We have a RING0_INT_FRAME here */
  1325. cmpw $__KERNEL_CS,16(%esp)
  1326. jne nmi_stack_correct
  1327. cmpl $debug,(%esp)
  1328. jb nmi_stack_correct
  1329. cmpl $debug_esp_fix_insn,(%esp)
  1330. ja nmi_stack_correct
  1331. FIX_STACK 24, nmi_stack_correct, 1
  1332. jmp nmi_stack_correct
  1333. nmi_espfix_stack:
  1334. /* We have a RING0_INT_FRAME here.
  1335. *
  1336. * create the pointer to lss back
  1337. */
  1338. pushl %ss
  1339. CFI_ADJUST_CFA_OFFSET 4
  1340. pushl %esp
  1341. CFI_ADJUST_CFA_OFFSET 4
  1342. addl $4, (%esp)
  1343. /* copy the iret frame of 12 bytes */
  1344. .rept 3
  1345. pushl 16(%esp)
  1346. CFI_ADJUST_CFA_OFFSET 4
  1347. .endr
  1348. pushl %eax
  1349. CFI_ADJUST_CFA_OFFSET 4
  1350. SAVE_ALL
  1351. FIXUP_ESPFIX_STACK # %eax == %esp
  1352. xorl %edx,%edx # zero error code
  1353. call do_nmi
  1354. RESTORE_REGS
  1355. lss 12+4(%esp), %esp # back to espfix stack
  1356. CFI_ADJUST_CFA_OFFSET -24
  1357. jmp irq_return
  1358. CFI_ENDPROC
  1359. END(nmi)
  1360. ENTRY(int3)
  1361. RING0_INT_FRAME
  1362. pushl $-1 # mark this as an int
  1363. CFI_ADJUST_CFA_OFFSET 4
  1364. SAVE_ALL
  1365. TRACE_IRQS_OFF
  1366. xorl %edx,%edx # zero error code
  1367. movl %esp,%eax # pt_regs pointer
  1368. call do_int3
  1369. jmp ret_from_exception
  1370. CFI_ENDPROC
  1371. END(int3)
  1372. ENTRY(general_protection)
  1373. RING0_EC_FRAME
  1374. pushl $do_general_protection
  1375. CFI_ADJUST_CFA_OFFSET 4
  1376. jmp error_code
  1377. CFI_ENDPROC
  1378. END(general_protection)
  1379. /*
  1380. * End of kprobes section
  1381. */
  1382. .popsection