entry_32.S 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425
  1. /*
  2. *
  3. * Copyright (C) 1991, 1992 Linus Torvalds
  4. */
  5. /*
  6. * entry.S contains the system-call and fault low-level handling routines.
  7. * This also contains the timer-interrupt handler, as well as all interrupts
  8. * and faults that can result in a task-switch.
  9. *
  10. * NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. *
  13. * I changed all the .align's to 4 (16 byte alignment), as that's faster
  14. * on a 486.
  15. *
  16. * Stack layout in 'syscall_exit':
  17. * ptrace needs to have all regs on the stack.
  18. * if the order here is changed, it needs to be
  19. * updated in fork.c:copy_process, signal.c:do_signal,
  20. * ptrace.c and ptrace.h
  21. *
  22. * 0(%esp) - %ebx
  23. * 4(%esp) - %ecx
  24. * 8(%esp) - %edx
  25. * C(%esp) - %esi
  26. * 10(%esp) - %edi
  27. * 14(%esp) - %ebp
  28. * 18(%esp) - %eax
  29. * 1C(%esp) - %ds
  30. * 20(%esp) - %es
  31. * 24(%esp) - %fs
  32. * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
  33. * 2C(%esp) - orig_eax
  34. * 30(%esp) - %eip
  35. * 34(%esp) - %cs
  36. * 38(%esp) - %eflags
  37. * 3C(%esp) - %oldesp
  38. * 40(%esp) - %oldss
  39. *
  40. * "current" is in register %ebx during any slow entries.
  41. */
  42. #include <linux/linkage.h>
  43. #include <asm/thread_info.h>
  44. #include <asm/irqflags.h>
  45. #include <asm/errno.h>
  46. #include <asm/segment.h>
  47. #include <asm/smp.h>
  48. #include <asm/page_types.h>
  49. #include <asm/percpu.h>
  50. #include <asm/dwarf2.h>
  51. #include <asm/processor-flags.h>
  52. #include <asm/ftrace.h>
  53. #include <asm/irq_vectors.h>
  54. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  55. #include <linux/elf-em.h>
  56. #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
  57. #define __AUDIT_ARCH_LE 0x40000000
  58. #ifndef CONFIG_AUDITSYSCALL
  59. #define sysenter_audit syscall_trace_entry
  60. #define sysexit_audit syscall_exit_work
  61. #endif
  62. /*
  63. * We use macros for low-level operations which need to be overridden
  64. * for paravirtualization. The following will never clobber any registers:
  65. * INTERRUPT_RETURN (aka. "iret")
  66. * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
  67. * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
  68. *
  69. * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
  70. * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
  71. * Allowing a register to be clobbered can shrink the paravirt replacement
  72. * enough to patch inline, increasing performance.
  73. */
  74. #define nr_syscalls ((syscall_table_size)/4)
  75. #ifdef CONFIG_PREEMPT
  76. #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
  77. #else
  78. #define preempt_stop(clobbers)
  79. #define resume_kernel restore_all
  80. #endif
  81. .macro TRACE_IRQS_IRET
  82. #ifdef CONFIG_TRACE_IRQFLAGS
  83. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off?
  84. jz 1f
  85. TRACE_IRQS_ON
  86. 1:
  87. #endif
  88. .endm
  89. #ifdef CONFIG_VM86
  90. #define resume_userspace_sig check_userspace
  91. #else
  92. #define resume_userspace_sig resume_userspace
  93. #endif
  94. /*
  95. * User gs save/restore
  96. *
  97. * %gs is used for userland TLS and kernel only uses it for stack
  98. * canary which is required to be at %gs:20 by gcc. Read the comment
  99. * at the top of stackprotector.h for more info.
  100. *
  101. * Local labels 98 and 99 are used.
  102. */
  103. #ifdef CONFIG_X86_32_LAZY_GS
  104. /* unfortunately push/pop can't be no-op */
  105. .macro PUSH_GS
  106. pushl $0
  107. CFI_ADJUST_CFA_OFFSET 4
  108. .endm
  109. .macro POP_GS pop=0
  110. addl $(4 + \pop), %esp
  111. CFI_ADJUST_CFA_OFFSET -(4 + \pop)
  112. .endm
  113. .macro POP_GS_EX
  114. .endm
  115. /* all the rest are no-op */
  116. .macro PTGS_TO_GS
  117. .endm
  118. .macro PTGS_TO_GS_EX
  119. .endm
  120. .macro GS_TO_REG reg
  121. .endm
  122. .macro REG_TO_PTGS reg
  123. .endm
  124. .macro SET_KERNEL_GS reg
  125. .endm
  126. #else /* CONFIG_X86_32_LAZY_GS */
  127. .macro PUSH_GS
  128. pushl %gs
  129. CFI_ADJUST_CFA_OFFSET 4
  130. /*CFI_REL_OFFSET gs, 0*/
  131. .endm
  132. .macro POP_GS pop=0
  133. 98: popl %gs
  134. CFI_ADJUST_CFA_OFFSET -4
  135. /*CFI_RESTORE gs*/
  136. .if \pop <> 0
  137. add $\pop, %esp
  138. CFI_ADJUST_CFA_OFFSET -\pop
  139. .endif
  140. .endm
  141. .macro POP_GS_EX
  142. .pushsection .fixup, "ax"
  143. 99: movl $0, (%esp)
  144. jmp 98b
  145. .section __ex_table, "a"
  146. .align 4
  147. .long 98b, 99b
  148. .popsection
  149. .endm
  150. .macro PTGS_TO_GS
  151. 98: mov PT_GS(%esp), %gs
  152. .endm
  153. .macro PTGS_TO_GS_EX
  154. .pushsection .fixup, "ax"
  155. 99: movl $0, PT_GS(%esp)
  156. jmp 98b
  157. .section __ex_table, "a"
  158. .align 4
  159. .long 98b, 99b
  160. .popsection
  161. .endm
  162. .macro GS_TO_REG reg
  163. movl %gs, \reg
  164. /*CFI_REGISTER gs, \reg*/
  165. .endm
  166. .macro REG_TO_PTGS reg
  167. movl \reg, PT_GS(%esp)
  168. /*CFI_REL_OFFSET gs, PT_GS*/
  169. .endm
  170. .macro SET_KERNEL_GS reg
  171. movl $(__KERNEL_STACK_CANARY), \reg
  172. movl \reg, %gs
  173. .endm
  174. #endif /* CONFIG_X86_32_LAZY_GS */
  175. .macro SAVE_ALL
  176. cld
  177. PUSH_GS
  178. pushl %fs
  179. CFI_ADJUST_CFA_OFFSET 4
  180. /*CFI_REL_OFFSET fs, 0;*/
  181. pushl %es
  182. CFI_ADJUST_CFA_OFFSET 4
  183. /*CFI_REL_OFFSET es, 0;*/
  184. pushl %ds
  185. CFI_ADJUST_CFA_OFFSET 4
  186. /*CFI_REL_OFFSET ds, 0;*/
  187. pushl %eax
  188. CFI_ADJUST_CFA_OFFSET 4
  189. CFI_REL_OFFSET eax, 0
  190. pushl %ebp
  191. CFI_ADJUST_CFA_OFFSET 4
  192. CFI_REL_OFFSET ebp, 0
  193. pushl %edi
  194. CFI_ADJUST_CFA_OFFSET 4
  195. CFI_REL_OFFSET edi, 0
  196. pushl %esi
  197. CFI_ADJUST_CFA_OFFSET 4
  198. CFI_REL_OFFSET esi, 0
  199. pushl %edx
  200. CFI_ADJUST_CFA_OFFSET 4
  201. CFI_REL_OFFSET edx, 0
  202. pushl %ecx
  203. CFI_ADJUST_CFA_OFFSET 4
  204. CFI_REL_OFFSET ecx, 0
  205. pushl %ebx
  206. CFI_ADJUST_CFA_OFFSET 4
  207. CFI_REL_OFFSET ebx, 0
  208. movl $(__USER_DS), %edx
  209. movl %edx, %ds
  210. movl %edx, %es
  211. movl $(__KERNEL_PERCPU), %edx
  212. movl %edx, %fs
  213. SET_KERNEL_GS %edx
  214. .endm
  215. .macro RESTORE_INT_REGS
  216. popl %ebx
  217. CFI_ADJUST_CFA_OFFSET -4
  218. CFI_RESTORE ebx
  219. popl %ecx
  220. CFI_ADJUST_CFA_OFFSET -4
  221. CFI_RESTORE ecx
  222. popl %edx
  223. CFI_ADJUST_CFA_OFFSET -4
  224. CFI_RESTORE edx
  225. popl %esi
  226. CFI_ADJUST_CFA_OFFSET -4
  227. CFI_RESTORE esi
  228. popl %edi
  229. CFI_ADJUST_CFA_OFFSET -4
  230. CFI_RESTORE edi
  231. popl %ebp
  232. CFI_ADJUST_CFA_OFFSET -4
  233. CFI_RESTORE ebp
  234. popl %eax
  235. CFI_ADJUST_CFA_OFFSET -4
  236. CFI_RESTORE eax
  237. .endm
  238. .macro RESTORE_REGS pop=0
  239. RESTORE_INT_REGS
  240. 1: popl %ds
  241. CFI_ADJUST_CFA_OFFSET -4
  242. /*CFI_RESTORE ds;*/
  243. 2: popl %es
  244. CFI_ADJUST_CFA_OFFSET -4
  245. /*CFI_RESTORE es;*/
  246. 3: popl %fs
  247. CFI_ADJUST_CFA_OFFSET -4
  248. /*CFI_RESTORE fs;*/
  249. POP_GS \pop
  250. .pushsection .fixup, "ax"
  251. 4: movl $0, (%esp)
  252. jmp 1b
  253. 5: movl $0, (%esp)
  254. jmp 2b
  255. 6: movl $0, (%esp)
  256. jmp 3b
  257. .section __ex_table, "a"
  258. .align 4
  259. .long 1b, 4b
  260. .long 2b, 5b
  261. .long 3b, 6b
  262. .popsection
  263. POP_GS_EX
  264. .endm
  265. .macro RING0_INT_FRAME
  266. CFI_STARTPROC simple
  267. CFI_SIGNAL_FRAME
  268. CFI_DEF_CFA esp, 3*4
  269. /*CFI_OFFSET cs, -2*4;*/
  270. CFI_OFFSET eip, -3*4
  271. .endm
  272. .macro RING0_EC_FRAME
  273. CFI_STARTPROC simple
  274. CFI_SIGNAL_FRAME
  275. CFI_DEF_CFA esp, 4*4
  276. /*CFI_OFFSET cs, -2*4;*/
  277. CFI_OFFSET eip, -3*4
  278. .endm
  279. .macro RING0_PTREGS_FRAME
  280. CFI_STARTPROC simple
  281. CFI_SIGNAL_FRAME
  282. CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
  283. /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
  284. CFI_OFFSET eip, PT_EIP-PT_OLDESP
  285. /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
  286. /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
  287. CFI_OFFSET eax, PT_EAX-PT_OLDESP
  288. CFI_OFFSET ebp, PT_EBP-PT_OLDESP
  289. CFI_OFFSET edi, PT_EDI-PT_OLDESP
  290. CFI_OFFSET esi, PT_ESI-PT_OLDESP
  291. CFI_OFFSET edx, PT_EDX-PT_OLDESP
  292. CFI_OFFSET ecx, PT_ECX-PT_OLDESP
  293. CFI_OFFSET ebx, PT_EBX-PT_OLDESP
  294. .endm
  295. ENTRY(ret_from_fork)
  296. CFI_STARTPROC
  297. pushl %eax
  298. CFI_ADJUST_CFA_OFFSET 4
  299. call schedule_tail
  300. GET_THREAD_INFO(%ebp)
  301. popl %eax
  302. CFI_ADJUST_CFA_OFFSET -4
  303. pushl $0x0202 # Reset kernel eflags
  304. CFI_ADJUST_CFA_OFFSET 4
  305. popfl
  306. CFI_ADJUST_CFA_OFFSET -4
  307. jmp syscall_exit
  308. CFI_ENDPROC
  309. END(ret_from_fork)
  310. /*
  311. * Return to user mode is not as complex as all this looks,
  312. * but we want the default path for a system call return to
  313. * go as quickly as possible which is why some of this is
  314. * less clear than it otherwise should be.
  315. */
  316. # userspace resumption stub bypassing syscall exit tracing
  317. ALIGN
  318. RING0_PTREGS_FRAME
  319. ret_from_exception:
  320. preempt_stop(CLBR_ANY)
  321. ret_from_intr:
  322. GET_THREAD_INFO(%ebp)
  323. check_userspace:
  324. movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
  325. movb PT_CS(%esp), %al
  326. andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
  327. cmpl $USER_RPL, %eax
  328. jb resume_kernel # not returning to v8086 or userspace
  329. ENTRY(resume_userspace)
  330. LOCKDEP_SYS_EXIT
  331. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  332. # setting need_resched or sigpending
  333. # between sampling and the iret
  334. TRACE_IRQS_OFF
  335. movl TI_flags(%ebp), %ecx
  336. andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
  337. # int/exception return?
  338. jne work_pending
  339. jmp restore_all
  340. END(ret_from_exception)
  341. #ifdef CONFIG_PREEMPT
  342. ENTRY(resume_kernel)
  343. DISABLE_INTERRUPTS(CLBR_ANY)
  344. cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
  345. jnz restore_all
  346. need_resched:
  347. movl TI_flags(%ebp), %ecx # need_resched set ?
  348. testb $_TIF_NEED_RESCHED, %cl
  349. jz restore_all
  350. testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ?
  351. jz restore_all
  352. call preempt_schedule_irq
  353. jmp need_resched
  354. END(resume_kernel)
  355. #endif
  356. CFI_ENDPROC
  357. /* SYSENTER_RETURN points to after the "sysenter" instruction in
  358. the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
  359. # sysenter call handler stub
  360. ENTRY(ia32_sysenter_target)
  361. CFI_STARTPROC simple
  362. CFI_SIGNAL_FRAME
  363. CFI_DEF_CFA esp, 0
  364. CFI_REGISTER esp, ebp
  365. movl TSS_sysenter_sp0(%esp),%esp
  366. sysenter_past_esp:
  367. /*
  368. * Interrupts are disabled here, but we can't trace it until
  369. * enough kernel state to call TRACE_IRQS_OFF can be called - but
  370. * we immediately enable interrupts at that point anyway.
  371. */
  372. pushl $(__USER_DS)
  373. CFI_ADJUST_CFA_OFFSET 4
  374. /*CFI_REL_OFFSET ss, 0*/
  375. pushl %ebp
  376. CFI_ADJUST_CFA_OFFSET 4
  377. CFI_REL_OFFSET esp, 0
  378. pushfl
  379. orl $X86_EFLAGS_IF, (%esp)
  380. CFI_ADJUST_CFA_OFFSET 4
  381. pushl $(__USER_CS)
  382. CFI_ADJUST_CFA_OFFSET 4
  383. /*CFI_REL_OFFSET cs, 0*/
  384. /*
  385. * Push current_thread_info()->sysenter_return to the stack.
  386. * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
  387. * pushed above; +8 corresponds to copy_thread's esp0 setting.
  388. */
  389. pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
  390. CFI_ADJUST_CFA_OFFSET 4
  391. CFI_REL_OFFSET eip, 0
  392. pushl %eax
  393. CFI_ADJUST_CFA_OFFSET 4
  394. SAVE_ALL
  395. ENABLE_INTERRUPTS(CLBR_NONE)
  396. /*
  397. * Load the potential sixth argument from user stack.
  398. * Careful about security.
  399. */
  400. cmpl $__PAGE_OFFSET-3,%ebp
  401. jae syscall_fault
  402. 1: movl (%ebp),%ebp
  403. movl %ebp,PT_EBP(%esp)
  404. .section __ex_table,"a"
  405. .align 4
  406. .long 1b,syscall_fault
  407. .previous
  408. GET_THREAD_INFO(%ebp)
  409. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  410. jnz sysenter_audit
  411. sysenter_do_call:
  412. cmpl $(nr_syscalls), %eax
  413. jae syscall_badsys
  414. call *sys_call_table(,%eax,4)
  415. movl %eax,PT_EAX(%esp)
  416. LOCKDEP_SYS_EXIT
  417. DISABLE_INTERRUPTS(CLBR_ANY)
  418. TRACE_IRQS_OFF
  419. movl TI_flags(%ebp), %ecx
  420. testl $_TIF_ALLWORK_MASK, %ecx
  421. jne sysexit_audit
  422. sysenter_exit:
  423. /* if something modifies registers it must also disable sysexit */
  424. movl PT_EIP(%esp), %edx
  425. movl PT_OLDESP(%esp), %ecx
  426. xorl %ebp,%ebp
  427. TRACE_IRQS_ON
  428. 1: mov PT_FS(%esp), %fs
  429. PTGS_TO_GS
  430. ENABLE_INTERRUPTS_SYSEXIT
  431. #ifdef CONFIG_AUDITSYSCALL
  432. sysenter_audit:
  433. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
  434. jnz syscall_trace_entry
  435. addl $4,%esp
  436. CFI_ADJUST_CFA_OFFSET -4
  437. /* %esi already in 8(%esp) 6th arg: 4th syscall arg */
  438. /* %edx already in 4(%esp) 5th arg: 3rd syscall arg */
  439. /* %ecx already in 0(%esp) 4th arg: 2nd syscall arg */
  440. movl %ebx,%ecx /* 3rd arg: 1st syscall arg */
  441. movl %eax,%edx /* 2nd arg: syscall number */
  442. movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
  443. call audit_syscall_entry
  444. pushl %ebx
  445. CFI_ADJUST_CFA_OFFSET 4
  446. movl PT_EAX(%esp),%eax /* reload syscall number */
  447. jmp sysenter_do_call
  448. sysexit_audit:
  449. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  450. jne syscall_exit_work
  451. TRACE_IRQS_ON
  452. ENABLE_INTERRUPTS(CLBR_ANY)
  453. movl %eax,%edx /* second arg, syscall return value */
  454. cmpl $0,%eax /* is it < 0? */
  455. setl %al /* 1 if so, 0 if not */
  456. movzbl %al,%eax /* zero-extend that */
  457. inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
  458. call audit_syscall_exit
  459. DISABLE_INTERRUPTS(CLBR_ANY)
  460. TRACE_IRQS_OFF
  461. movl TI_flags(%ebp), %ecx
  462. testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
  463. jne syscall_exit_work
  464. movl PT_EAX(%esp),%eax /* reload syscall return value */
  465. jmp sysenter_exit
  466. #endif
  467. CFI_ENDPROC
  468. .pushsection .fixup,"ax"
  469. 2: movl $0,PT_FS(%esp)
  470. jmp 1b
  471. .section __ex_table,"a"
  472. .align 4
  473. .long 1b,2b
  474. .popsection
  475. PTGS_TO_GS_EX
  476. ENDPROC(ia32_sysenter_target)
  477. # system call handler stub
  478. ENTRY(system_call)
  479. RING0_INT_FRAME # can't unwind into user space anyway
  480. pushl %eax # save orig_eax
  481. CFI_ADJUST_CFA_OFFSET 4
  482. SAVE_ALL
  483. GET_THREAD_INFO(%ebp)
  484. # system call tracing in operation / emulation
  485. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
  486. jnz syscall_trace_entry
  487. cmpl $(nr_syscalls), %eax
  488. jae syscall_badsys
  489. syscall_call:
  490. call *sys_call_table(,%eax,4)
  491. movl %eax,PT_EAX(%esp) # store the return value
  492. syscall_exit:
  493. LOCKDEP_SYS_EXIT
  494. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  495. # setting need_resched or sigpending
  496. # between sampling and the iret
  497. TRACE_IRQS_OFF
  498. movl TI_flags(%ebp), %ecx
  499. testl $_TIF_ALLWORK_MASK, %ecx # current->work
  500. jne syscall_exit_work
  501. restore_all:
  502. TRACE_IRQS_IRET
  503. restore_all_notrace:
  504. movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
  505. # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
  506. # are returning to the kernel.
  507. # See comments in process.c:copy_thread() for details.
  508. movb PT_OLDSS(%esp), %ah
  509. movb PT_CS(%esp), %al
  510. andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
  511. cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
  512. CFI_REMEMBER_STATE
  513. je ldt_ss # returning to user-space with LDT SS
  514. restore_nocheck:
  515. RESTORE_REGS 4 # skip orig_eax/error_code
  516. CFI_ADJUST_CFA_OFFSET -4
  517. irq_return:
  518. INTERRUPT_RETURN
  519. .section .fixup,"ax"
  520. ENTRY(iret_exc)
  521. pushl $0 # no error code
  522. pushl $do_iret_error
  523. jmp error_code
  524. .previous
  525. .section __ex_table,"a"
  526. .align 4
  527. .long irq_return,iret_exc
  528. .previous
  529. CFI_RESTORE_STATE
  530. ldt_ss:
  531. larl PT_OLDSS(%esp), %eax
  532. jnz restore_nocheck
  533. testl $0x00400000, %eax # returning to 32bit stack?
  534. jnz restore_nocheck # allright, normal return
  535. #ifdef CONFIG_PARAVIRT
  536. /*
  537. * The kernel can't run on a non-flat stack if paravirt mode
  538. * is active. Rather than try to fixup the high bits of
  539. * ESP, bypass this code entirely. This may break DOSemu
  540. * and/or Wine support in a paravirt VM, although the option
  541. * is still available to implement the setting of the high
  542. * 16-bits in the INTERRUPT_RETURN paravirt-op.
  543. */
  544. cmpl $0, pv_info+PARAVIRT_enabled
  545. jne restore_nocheck
  546. #endif
  547. /*
  548. * Setup and switch to ESPFIX stack
  549. *
  550. * We're returning to userspace with a 16 bit stack. The CPU will not
  551. * restore the high word of ESP for us on executing iret... This is an
  552. * "official" bug of all the x86-compatible CPUs, which we can work
  553. * around to make dosemu and wine happy. We do this by preloading the
  554. * high word of ESP with the high word of the userspace ESP while
  555. * compensating for the offset by changing to the ESPFIX segment with
  556. * a base address that matches for the difference.
  557. */
  558. mov %esp, %edx /* load kernel esp */
  559. mov PT_OLDESP(%esp), %eax /* load userspace esp */
  560. mov %dx, %ax /* eax: new kernel esp */
  561. sub %eax, %edx /* offset (low word is 0) */
  562. PER_CPU(gdt_page, %ebx)
  563. shr $16, %edx
  564. mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
  565. mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
  566. pushl $__ESPFIX_SS
  567. CFI_ADJUST_CFA_OFFSET 4
  568. push %eax /* new kernel esp */
  569. CFI_ADJUST_CFA_OFFSET 4
  570. /* Disable interrupts, but do not irqtrace this section: we
  571. * will soon execute iret and the tracer was already set to
  572. * the irqstate after the iret */
  573. DISABLE_INTERRUPTS(CLBR_EAX)
  574. lss (%esp), %esp /* switch to espfix segment */
  575. CFI_ADJUST_CFA_OFFSET -8
  576. jmp restore_nocheck
  577. CFI_ENDPROC
  578. ENDPROC(system_call)
  579. # perform work that needs to be done immediately before resumption
  580. ALIGN
  581. RING0_PTREGS_FRAME # can't unwind into user space anyway
  582. work_pending:
  583. testb $_TIF_NEED_RESCHED, %cl
  584. jz work_notifysig
  585. work_resched:
  586. call schedule
  587. LOCKDEP_SYS_EXIT
  588. DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
  589. # setting need_resched or sigpending
  590. # between sampling and the iret
  591. TRACE_IRQS_OFF
  592. movl TI_flags(%ebp), %ecx
  593. andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
  594. # than syscall tracing?
  595. jz restore_all
  596. testb $_TIF_NEED_RESCHED, %cl
  597. jnz work_resched
  598. work_notifysig: # deal with pending signals and
  599. # notify-resume requests
  600. #ifdef CONFIG_VM86
  601. testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
  602. movl %esp, %eax
  603. jne work_notifysig_v86 # returning to kernel-space or
  604. # vm86-space
  605. xorl %edx, %edx
  606. call do_notify_resume
  607. jmp resume_userspace_sig
  608. ALIGN
  609. work_notifysig_v86:
  610. pushl %ecx # save ti_flags for do_notify_resume
  611. CFI_ADJUST_CFA_OFFSET 4
  612. call save_v86_state # %eax contains pt_regs pointer
  613. popl %ecx
  614. CFI_ADJUST_CFA_OFFSET -4
  615. movl %eax, %esp
  616. #else
  617. movl %esp, %eax
  618. #endif
  619. xorl %edx, %edx
  620. call do_notify_resume
  621. jmp resume_userspace_sig
  622. END(work_pending)
  623. # perform syscall exit tracing
  624. ALIGN
  625. syscall_trace_entry:
  626. movl $-ENOSYS,PT_EAX(%esp)
  627. movl %esp, %eax
  628. call syscall_trace_enter
  629. /* What it returned is what we'll actually use. */
  630. cmpl $(nr_syscalls), %eax
  631. jnae syscall_call
  632. jmp syscall_exit
  633. END(syscall_trace_entry)
  634. # perform syscall exit tracing
  635. ALIGN
  636. syscall_exit_work:
  637. testl $_TIF_WORK_SYSCALL_EXIT, %ecx
  638. jz work_pending
  639. TRACE_IRQS_ON
  640. ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call
  641. # schedule() instead
  642. movl %esp, %eax
  643. call syscall_trace_leave
  644. jmp resume_userspace
  645. END(syscall_exit_work)
  646. CFI_ENDPROC
  647. RING0_INT_FRAME # can't unwind into user space anyway
  648. syscall_fault:
  649. GET_THREAD_INFO(%ebp)
  650. movl $-EFAULT,PT_EAX(%esp)
  651. jmp resume_userspace
  652. END(syscall_fault)
  653. syscall_badsys:
  654. movl $-ENOSYS,PT_EAX(%esp)
  655. jmp resume_userspace
  656. END(syscall_badsys)
  657. CFI_ENDPROC
  658. /*
  659. * System calls that need a pt_regs pointer.
  660. */
  661. #define PTREGSCALL(name) \
  662. ALIGN; \
  663. ptregs_##name: \
  664. leal 4(%esp),%eax; \
  665. jmp sys_##name;
  666. PTREGSCALL(iopl)
  667. PTREGSCALL(fork)
  668. PTREGSCALL(clone)
  669. PTREGSCALL(vfork)
  670. PTREGSCALL(execve)
  671. PTREGSCALL(sigaltstack)
  672. PTREGSCALL(sigreturn)
  673. PTREGSCALL(rt_sigreturn)
  674. PTREGSCALL(vm86)
  675. PTREGSCALL(vm86old)
  676. .macro FIXUP_ESPFIX_STACK
  677. /*
  678. * Switch back for ESPFIX stack to the normal zerobased stack
  679. *
  680. * We can't call C functions using the ESPFIX stack. This code reads
  681. * the high word of the segment base from the GDT and swiches to the
  682. * normal stack and adjusts ESP with the matching offset.
  683. */
  684. /* fixup the stack */
  685. PER_CPU(gdt_page, %ebx)
  686. mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
  687. mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
  688. shl $16, %eax
  689. addl %esp, %eax /* the adjusted stack pointer */
  690. pushl $__KERNEL_DS
  691. CFI_ADJUST_CFA_OFFSET 4
  692. pushl %eax
  693. CFI_ADJUST_CFA_OFFSET 4
  694. lss (%esp), %esp /* switch to the normal stack segment */
  695. CFI_ADJUST_CFA_OFFSET -8
  696. .endm
  697. .macro UNWIND_ESPFIX_STACK
  698. movl %ss, %eax
  699. /* see if on espfix stack */
  700. cmpw $__ESPFIX_SS, %ax
  701. jne 27f
  702. movl $__KERNEL_DS, %eax
  703. movl %eax, %ds
  704. movl %eax, %es
  705. /* switch to normal stack */
  706. FIXUP_ESPFIX_STACK
  707. 27:
  708. .endm
  709. /*
  710. * Build the entry stubs and pointer table with some assembler magic.
  711. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  712. * single cache line on all modern x86 implementations.
  713. */
  714. .section .init.rodata,"a"
  715. ENTRY(interrupt)
  716. .text
  717. .p2align 5
  718. .p2align CONFIG_X86_L1_CACHE_SHIFT
  719. ENTRY(irq_entries_start)
  720. RING0_INT_FRAME
  721. vector=FIRST_EXTERNAL_VECTOR
  722. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  723. .balign 32
  724. .rept 7
  725. .if vector < NR_VECTORS
  726. .if vector <> FIRST_EXTERNAL_VECTOR
  727. CFI_ADJUST_CFA_OFFSET -4
  728. .endif
  729. 1: pushl $(~vector+0x80) /* Note: always in signed byte range */
  730. CFI_ADJUST_CFA_OFFSET 4
  731. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  732. jmp 2f
  733. .endif
  734. .previous
  735. .long 1b
  736. .text
  737. vector=vector+1
  738. .endif
  739. .endr
  740. 2: jmp common_interrupt
  741. .endr
  742. END(irq_entries_start)
  743. .previous
  744. END(interrupt)
  745. .previous
  746. /*
  747. * the CPU automatically disables interrupts when executing an IRQ vector,
  748. * so IRQ-flags tracing has to follow that:
  749. */
  750. .p2align CONFIG_X86_L1_CACHE_SHIFT
  751. common_interrupt:
  752. addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */
  753. SAVE_ALL
  754. TRACE_IRQS_OFF
  755. movl %esp,%eax
  756. call do_IRQ
  757. jmp ret_from_intr
  758. ENDPROC(common_interrupt)
  759. CFI_ENDPROC
  760. #define BUILD_INTERRUPT3(name, nr, fn) \
  761. ENTRY(name) \
  762. RING0_INT_FRAME; \
  763. pushl $~(nr); \
  764. CFI_ADJUST_CFA_OFFSET 4; \
  765. SAVE_ALL; \
  766. TRACE_IRQS_OFF \
  767. movl %esp,%eax; \
  768. call fn; \
  769. jmp ret_from_intr; \
  770. CFI_ENDPROC; \
  771. ENDPROC(name)
  772. #define BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(name, nr, smp_##name)
  773. /* The include is where all of the SMP etc. interrupts come from */
  774. #include <asm/entry_arch.h>
  775. ENTRY(coprocessor_error)
  776. RING0_INT_FRAME
  777. pushl $0
  778. CFI_ADJUST_CFA_OFFSET 4
  779. pushl $do_coprocessor_error
  780. CFI_ADJUST_CFA_OFFSET 4
  781. jmp error_code
  782. CFI_ENDPROC
  783. END(coprocessor_error)
  784. ENTRY(simd_coprocessor_error)
  785. RING0_INT_FRAME
  786. pushl $0
  787. CFI_ADJUST_CFA_OFFSET 4
  788. pushl $do_simd_coprocessor_error
  789. CFI_ADJUST_CFA_OFFSET 4
  790. jmp error_code
  791. CFI_ENDPROC
  792. END(simd_coprocessor_error)
  793. ENTRY(device_not_available)
  794. RING0_INT_FRAME
  795. pushl $-1 # mark this as an int
  796. CFI_ADJUST_CFA_OFFSET 4
  797. pushl $do_device_not_available
  798. CFI_ADJUST_CFA_OFFSET 4
  799. jmp error_code
  800. CFI_ENDPROC
  801. END(device_not_available)
  802. #ifdef CONFIG_PARAVIRT
  803. ENTRY(native_iret)
  804. iret
  805. .section __ex_table,"a"
  806. .align 4
  807. .long native_iret, iret_exc
  808. .previous
  809. END(native_iret)
  810. ENTRY(native_irq_enable_sysexit)
  811. sti
  812. sysexit
  813. END(native_irq_enable_sysexit)
  814. #endif
  815. ENTRY(overflow)
  816. RING0_INT_FRAME
  817. pushl $0
  818. CFI_ADJUST_CFA_OFFSET 4
  819. pushl $do_overflow
  820. CFI_ADJUST_CFA_OFFSET 4
  821. jmp error_code
  822. CFI_ENDPROC
  823. END(overflow)
  824. ENTRY(bounds)
  825. RING0_INT_FRAME
  826. pushl $0
  827. CFI_ADJUST_CFA_OFFSET 4
  828. pushl $do_bounds
  829. CFI_ADJUST_CFA_OFFSET 4
  830. jmp error_code
  831. CFI_ENDPROC
  832. END(bounds)
  833. ENTRY(invalid_op)
  834. RING0_INT_FRAME
  835. pushl $0
  836. CFI_ADJUST_CFA_OFFSET 4
  837. pushl $do_invalid_op
  838. CFI_ADJUST_CFA_OFFSET 4
  839. jmp error_code
  840. CFI_ENDPROC
  841. END(invalid_op)
  842. ENTRY(coprocessor_segment_overrun)
  843. RING0_INT_FRAME
  844. pushl $0
  845. CFI_ADJUST_CFA_OFFSET 4
  846. pushl $do_coprocessor_segment_overrun
  847. CFI_ADJUST_CFA_OFFSET 4
  848. jmp error_code
  849. CFI_ENDPROC
  850. END(coprocessor_segment_overrun)
  851. ENTRY(invalid_TSS)
  852. RING0_EC_FRAME
  853. pushl $do_invalid_TSS
  854. CFI_ADJUST_CFA_OFFSET 4
  855. jmp error_code
  856. CFI_ENDPROC
  857. END(invalid_TSS)
  858. ENTRY(segment_not_present)
  859. RING0_EC_FRAME
  860. pushl $do_segment_not_present
  861. CFI_ADJUST_CFA_OFFSET 4
  862. jmp error_code
  863. CFI_ENDPROC
  864. END(segment_not_present)
  865. ENTRY(stack_segment)
  866. RING0_EC_FRAME
  867. pushl $do_stack_segment
  868. CFI_ADJUST_CFA_OFFSET 4
  869. jmp error_code
  870. CFI_ENDPROC
  871. END(stack_segment)
  872. ENTRY(alignment_check)
  873. RING0_EC_FRAME
  874. pushl $do_alignment_check
  875. CFI_ADJUST_CFA_OFFSET 4
  876. jmp error_code
  877. CFI_ENDPROC
  878. END(alignment_check)
  879. ENTRY(divide_error)
  880. RING0_INT_FRAME
  881. pushl $0 # no error code
  882. CFI_ADJUST_CFA_OFFSET 4
  883. pushl $do_divide_error
  884. CFI_ADJUST_CFA_OFFSET 4
  885. jmp error_code
  886. CFI_ENDPROC
  887. END(divide_error)
  888. #ifdef CONFIG_X86_MCE
  889. ENTRY(machine_check)
  890. RING0_INT_FRAME
  891. pushl $0
  892. CFI_ADJUST_CFA_OFFSET 4
  893. pushl machine_check_vector
  894. CFI_ADJUST_CFA_OFFSET 4
  895. jmp error_code
  896. CFI_ENDPROC
  897. END(machine_check)
  898. #endif
  899. ENTRY(spurious_interrupt_bug)
  900. RING0_INT_FRAME
  901. pushl $0
  902. CFI_ADJUST_CFA_OFFSET 4
  903. pushl $do_spurious_interrupt_bug
  904. CFI_ADJUST_CFA_OFFSET 4
  905. jmp error_code
  906. CFI_ENDPROC
  907. END(spurious_interrupt_bug)
  908. ENTRY(kernel_thread_helper)
  909. pushl $0 # fake return address for unwinder
  910. CFI_STARTPROC
  911. movl %edx,%eax
  912. push %edx
  913. CFI_ADJUST_CFA_OFFSET 4
  914. call *%ebx
  915. push %eax
  916. CFI_ADJUST_CFA_OFFSET 4
  917. call do_exit
  918. ud2 # padding for call trace
  919. CFI_ENDPROC
  920. ENDPROC(kernel_thread_helper)
  921. #ifdef CONFIG_XEN
  922. /* Xen doesn't set %esp to be precisely what the normal sysenter
  923. entrypoint expects, so fix it up before using the normal path. */
  924. ENTRY(xen_sysenter_target)
  925. RING0_INT_FRAME
  926. addl $5*4, %esp /* remove xen-provided frame */
  927. CFI_ADJUST_CFA_OFFSET -5*4
  928. jmp sysenter_past_esp
  929. CFI_ENDPROC
  930. ENTRY(xen_hypervisor_callback)
  931. CFI_STARTPROC
  932. pushl $0
  933. CFI_ADJUST_CFA_OFFSET 4
  934. SAVE_ALL
  935. TRACE_IRQS_OFF
  936. /* Check to see if we got the event in the critical
  937. region in xen_iret_direct, after we've reenabled
  938. events and checked for pending events. This simulates
  939. iret instruction's behaviour where it delivers a
  940. pending interrupt when enabling interrupts. */
  941. movl PT_EIP(%esp),%eax
  942. cmpl $xen_iret_start_crit,%eax
  943. jb 1f
  944. cmpl $xen_iret_end_crit,%eax
  945. jae 1f
  946. jmp xen_iret_crit_fixup
  947. ENTRY(xen_do_upcall)
  948. 1: mov %esp, %eax
  949. call xen_evtchn_do_upcall
  950. jmp ret_from_intr
  951. CFI_ENDPROC
  952. ENDPROC(xen_hypervisor_callback)
  953. # Hypervisor uses this for application faults while it executes.
  954. # We get here for two reasons:
  955. # 1. Fault while reloading DS, ES, FS or GS
  956. # 2. Fault while executing IRET
  957. # Category 1 we fix up by reattempting the load, and zeroing the segment
  958. # register if the load fails.
  959. # Category 2 we fix up by jumping to do_iret_error. We cannot use the
  960. # normal Linux return path in this case because if we use the IRET hypercall
  961. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  962. # We distinguish between categories by maintaining a status value in EAX.
  963. ENTRY(xen_failsafe_callback)
  964. CFI_STARTPROC
  965. pushl %eax
  966. CFI_ADJUST_CFA_OFFSET 4
  967. movl $1,%eax
  968. 1: mov 4(%esp),%ds
  969. 2: mov 8(%esp),%es
  970. 3: mov 12(%esp),%fs
  971. 4: mov 16(%esp),%gs
  972. testl %eax,%eax
  973. popl %eax
  974. CFI_ADJUST_CFA_OFFSET -4
  975. lea 16(%esp),%esp
  976. CFI_ADJUST_CFA_OFFSET -16
  977. jz 5f
  978. addl $16,%esp
  979. jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
  980. 5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
  981. CFI_ADJUST_CFA_OFFSET 4
  982. SAVE_ALL
  983. jmp ret_from_exception
  984. CFI_ENDPROC
  985. .section .fixup,"ax"
  986. 6: xorl %eax,%eax
  987. movl %eax,4(%esp)
  988. jmp 1b
  989. 7: xorl %eax,%eax
  990. movl %eax,8(%esp)
  991. jmp 2b
  992. 8: xorl %eax,%eax
  993. movl %eax,12(%esp)
  994. jmp 3b
  995. 9: xorl %eax,%eax
  996. movl %eax,16(%esp)
  997. jmp 4b
  998. .previous
  999. .section __ex_table,"a"
  1000. .align 4
  1001. .long 1b,6b
  1002. .long 2b,7b
  1003. .long 3b,8b
  1004. .long 4b,9b
  1005. .previous
  1006. ENDPROC(xen_failsafe_callback)
  1007. #endif /* CONFIG_XEN */
  1008. #ifdef CONFIG_FUNCTION_TRACER
  1009. #ifdef CONFIG_DYNAMIC_FTRACE
  1010. ENTRY(mcount)
  1011. ret
  1012. END(mcount)
  1013. ENTRY(ftrace_caller)
  1014. cmpl $0, function_trace_stop
  1015. jne ftrace_stub
  1016. pushl %eax
  1017. pushl %ecx
  1018. pushl %edx
  1019. movl 0xc(%esp), %eax
  1020. movl 0x4(%ebp), %edx
  1021. subl $MCOUNT_INSN_SIZE, %eax
  1022. .globl ftrace_call
  1023. ftrace_call:
  1024. call ftrace_stub
  1025. popl %edx
  1026. popl %ecx
  1027. popl %eax
  1028. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1029. .globl ftrace_graph_call
  1030. ftrace_graph_call:
  1031. jmp ftrace_stub
  1032. #endif
  1033. .globl ftrace_stub
  1034. ftrace_stub:
  1035. ret
  1036. END(ftrace_caller)
  1037. #else /* ! CONFIG_DYNAMIC_FTRACE */
  1038. ENTRY(mcount)
  1039. cmpl $0, function_trace_stop
  1040. jne ftrace_stub
  1041. cmpl $ftrace_stub, ftrace_trace_function
  1042. jnz trace
  1043. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1044. cmpl $ftrace_stub, ftrace_graph_return
  1045. jnz ftrace_graph_caller
  1046. cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
  1047. jnz ftrace_graph_caller
  1048. #endif
  1049. .globl ftrace_stub
  1050. ftrace_stub:
  1051. ret
  1052. /* taken from glibc */
  1053. trace:
  1054. pushl %eax
  1055. pushl %ecx
  1056. pushl %edx
  1057. movl 0xc(%esp), %eax
  1058. movl 0x4(%ebp), %edx
  1059. subl $MCOUNT_INSN_SIZE, %eax
  1060. call *ftrace_trace_function
  1061. popl %edx
  1062. popl %ecx
  1063. popl %eax
  1064. jmp ftrace_stub
  1065. END(mcount)
  1066. #endif /* CONFIG_DYNAMIC_FTRACE */
  1067. #endif /* CONFIG_FUNCTION_TRACER */
  1068. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1069. ENTRY(ftrace_graph_caller)
  1070. cmpl $0, function_trace_stop
  1071. jne ftrace_stub
  1072. pushl %eax
  1073. pushl %ecx
  1074. pushl %edx
  1075. movl 0xc(%esp), %edx
  1076. lea 0x4(%ebp), %eax
  1077. movl (%ebp), %ecx
  1078. subl $MCOUNT_INSN_SIZE, %edx
  1079. call prepare_ftrace_return
  1080. popl %edx
  1081. popl %ecx
  1082. popl %eax
  1083. ret
  1084. END(ftrace_graph_caller)
  1085. .globl return_to_handler
  1086. return_to_handler:
  1087. pushl $0
  1088. pushl %eax
  1089. pushl %ecx
  1090. pushl %edx
  1091. movl %ebp, %eax
  1092. call ftrace_return_to_handler
  1093. movl %eax, 0xc(%esp)
  1094. popl %edx
  1095. popl %ecx
  1096. popl %eax
  1097. ret
  1098. #endif
  1099. .section .rodata,"a"
  1100. #include "syscall_table_32.S"
  1101. syscall_table_size=(.-sys_call_table)
  1102. /*
  1103. * Some functions should be protected against kprobes
  1104. */
  1105. .pushsection .kprobes.text, "ax"
  1106. ENTRY(page_fault)
  1107. RING0_EC_FRAME
  1108. pushl $do_page_fault
  1109. CFI_ADJUST_CFA_OFFSET 4
  1110. ALIGN
  1111. error_code:
  1112. /* the function address is in %gs's slot on the stack */
  1113. pushl %fs
  1114. CFI_ADJUST_CFA_OFFSET 4
  1115. /*CFI_REL_OFFSET fs, 0*/
  1116. pushl %es
  1117. CFI_ADJUST_CFA_OFFSET 4
  1118. /*CFI_REL_OFFSET es, 0*/
  1119. pushl %ds
  1120. CFI_ADJUST_CFA_OFFSET 4
  1121. /*CFI_REL_OFFSET ds, 0*/
  1122. pushl %eax
  1123. CFI_ADJUST_CFA_OFFSET 4
  1124. CFI_REL_OFFSET eax, 0
  1125. pushl %ebp
  1126. CFI_ADJUST_CFA_OFFSET 4
  1127. CFI_REL_OFFSET ebp, 0
  1128. pushl %edi
  1129. CFI_ADJUST_CFA_OFFSET 4
  1130. CFI_REL_OFFSET edi, 0
  1131. pushl %esi
  1132. CFI_ADJUST_CFA_OFFSET 4
  1133. CFI_REL_OFFSET esi, 0
  1134. pushl %edx
  1135. CFI_ADJUST_CFA_OFFSET 4
  1136. CFI_REL_OFFSET edx, 0
  1137. pushl %ecx
  1138. CFI_ADJUST_CFA_OFFSET 4
  1139. CFI_REL_OFFSET ecx, 0
  1140. pushl %ebx
  1141. CFI_ADJUST_CFA_OFFSET 4
  1142. CFI_REL_OFFSET ebx, 0
  1143. cld
  1144. movl $(__KERNEL_PERCPU), %ecx
  1145. movl %ecx, %fs
  1146. UNWIND_ESPFIX_STACK
  1147. GS_TO_REG %ecx
  1148. movl PT_GS(%esp), %edi # get the function address
  1149. movl PT_ORIG_EAX(%esp), %edx # get the error code
  1150. movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
  1151. REG_TO_PTGS %ecx
  1152. SET_KERNEL_GS %ecx
  1153. movl $(__USER_DS), %ecx
  1154. movl %ecx, %ds
  1155. movl %ecx, %es
  1156. TRACE_IRQS_OFF
  1157. movl %esp,%eax # pt_regs pointer
  1158. call *%edi
  1159. jmp ret_from_exception
  1160. CFI_ENDPROC
  1161. END(page_fault)
  1162. /*
  1163. * Debug traps and NMI can happen at the one SYSENTER instruction
  1164. * that sets up the real kernel stack. Check here, since we can't
  1165. * allow the wrong stack to be used.
  1166. *
  1167. * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
  1168. * already pushed 3 words if it hits on the sysenter instruction:
  1169. * eflags, cs and eip.
  1170. *
  1171. * We just load the right stack, and push the three (known) values
  1172. * by hand onto the new stack - while updating the return eip past
  1173. * the instruction that would have done it for sysenter.
  1174. */
  1175. .macro FIX_STACK offset ok label
  1176. cmpw $__KERNEL_CS, 4(%esp)
  1177. jne \ok
  1178. \label:
  1179. movl TSS_sysenter_sp0 + \offset(%esp), %esp
  1180. CFI_DEF_CFA esp, 0
  1181. CFI_UNDEFINED eip
  1182. pushfl
  1183. CFI_ADJUST_CFA_OFFSET 4
  1184. pushl $__KERNEL_CS
  1185. CFI_ADJUST_CFA_OFFSET 4
  1186. pushl $sysenter_past_esp
  1187. CFI_ADJUST_CFA_OFFSET 4
  1188. CFI_REL_OFFSET eip, 0
  1189. .endm
  1190. ENTRY(debug)
  1191. RING0_INT_FRAME
  1192. cmpl $ia32_sysenter_target,(%esp)
  1193. jne debug_stack_correct
  1194. FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
  1195. debug_stack_correct:
  1196. pushl $-1 # mark this as an int
  1197. CFI_ADJUST_CFA_OFFSET 4
  1198. SAVE_ALL
  1199. TRACE_IRQS_OFF
  1200. xorl %edx,%edx # error code 0
  1201. movl %esp,%eax # pt_regs pointer
  1202. call do_debug
  1203. jmp ret_from_exception
  1204. CFI_ENDPROC
  1205. END(debug)
  1206. /*
  1207. * NMI is doubly nasty. It can happen _while_ we're handling
  1208. * a debug fault, and the debug fault hasn't yet been able to
  1209. * clear up the stack. So we first check whether we got an
  1210. * NMI on the sysenter entry path, but after that we need to
  1211. * check whether we got an NMI on the debug path where the debug
  1212. * fault happened on the sysenter path.
  1213. */
  1214. ENTRY(nmi)
  1215. RING0_INT_FRAME
  1216. pushl %eax
  1217. CFI_ADJUST_CFA_OFFSET 4
  1218. movl %ss, %eax
  1219. cmpw $__ESPFIX_SS, %ax
  1220. popl %eax
  1221. CFI_ADJUST_CFA_OFFSET -4
  1222. je nmi_espfix_stack
  1223. cmpl $ia32_sysenter_target,(%esp)
  1224. je nmi_stack_fixup
  1225. pushl %eax
  1226. CFI_ADJUST_CFA_OFFSET 4
  1227. movl %esp,%eax
  1228. /* Do not access memory above the end of our stack page,
  1229. * it might not exist.
  1230. */
  1231. andl $(THREAD_SIZE-1),%eax
  1232. cmpl $(THREAD_SIZE-20),%eax
  1233. popl %eax
  1234. CFI_ADJUST_CFA_OFFSET -4
  1235. jae nmi_stack_correct
  1236. cmpl $ia32_sysenter_target,12(%esp)
  1237. je nmi_debug_stack_check
  1238. nmi_stack_correct:
  1239. /* We have a RING0_INT_FRAME here */
  1240. pushl %eax
  1241. CFI_ADJUST_CFA_OFFSET 4
  1242. SAVE_ALL
  1243. xorl %edx,%edx # zero error code
  1244. movl %esp,%eax # pt_regs pointer
  1245. call do_nmi
  1246. jmp restore_all_notrace
  1247. CFI_ENDPROC
  1248. nmi_stack_fixup:
  1249. RING0_INT_FRAME
  1250. FIX_STACK 12, nmi_stack_correct, 1
  1251. jmp nmi_stack_correct
  1252. nmi_debug_stack_check:
  1253. /* We have a RING0_INT_FRAME here */
  1254. cmpw $__KERNEL_CS,16(%esp)
  1255. jne nmi_stack_correct
  1256. cmpl $debug,(%esp)
  1257. jb nmi_stack_correct
  1258. cmpl $debug_esp_fix_insn,(%esp)
  1259. ja nmi_stack_correct
  1260. FIX_STACK 24, nmi_stack_correct, 1
  1261. jmp nmi_stack_correct
  1262. nmi_espfix_stack:
  1263. /* We have a RING0_INT_FRAME here.
  1264. *
  1265. * create the pointer to lss back
  1266. */
  1267. pushl %ss
  1268. CFI_ADJUST_CFA_OFFSET 4
  1269. pushl %esp
  1270. CFI_ADJUST_CFA_OFFSET 4
  1271. addl $4, (%esp)
  1272. /* copy the iret frame of 12 bytes */
  1273. .rept 3
  1274. pushl 16(%esp)
  1275. CFI_ADJUST_CFA_OFFSET 4
  1276. .endr
  1277. pushl %eax
  1278. CFI_ADJUST_CFA_OFFSET 4
  1279. SAVE_ALL
  1280. FIXUP_ESPFIX_STACK # %eax == %esp
  1281. xorl %edx,%edx # zero error code
  1282. call do_nmi
  1283. RESTORE_REGS
  1284. lss 12+4(%esp), %esp # back to espfix stack
  1285. CFI_ADJUST_CFA_OFFSET -24
  1286. jmp irq_return
  1287. CFI_ENDPROC
  1288. END(nmi)
  1289. ENTRY(int3)
  1290. RING0_INT_FRAME
  1291. pushl $-1 # mark this as an int
  1292. CFI_ADJUST_CFA_OFFSET 4
  1293. SAVE_ALL
  1294. TRACE_IRQS_OFF
  1295. xorl %edx,%edx # zero error code
  1296. movl %esp,%eax # pt_regs pointer
  1297. call do_int3
  1298. jmp ret_from_exception
  1299. CFI_ENDPROC
  1300. END(int3)
  1301. ENTRY(general_protection)
  1302. RING0_EC_FRAME
  1303. pushl $do_general_protection
  1304. CFI_ADJUST_CFA_OFFSET 4
  1305. jmp error_code
  1306. CFI_ENDPROC
  1307. END(general_protection)
  1308. /*
  1309. * End of kprobes section
  1310. */
  1311. .popsection