entry_64.S 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. #include <asm/paravirt.h>
  52. #include <asm/ftrace.h>
  53. .code64
  54. #ifdef CONFIG_FTRACE
  55. #ifdef CONFIG_DYNAMIC_FTRACE
  56. ENTRY(mcount)
  57. subq $0x38, %rsp
  58. movq %rax, (%rsp)
  59. movq %rcx, 8(%rsp)
  60. movq %rdx, 16(%rsp)
  61. movq %rsi, 24(%rsp)
  62. movq %rdi, 32(%rsp)
  63. movq %r8, 40(%rsp)
  64. movq %r9, 48(%rsp)
  65. movq 0x38(%rsp), %rdi
  66. subq $MCOUNT_INSN_SIZE, %rdi
  67. .globl mcount_call
  68. mcount_call:
  69. call ftrace_stub
  70. movq 48(%rsp), %r9
  71. movq 40(%rsp), %r8
  72. movq 32(%rsp), %rdi
  73. movq 24(%rsp), %rsi
  74. movq 16(%rsp), %rdx
  75. movq 8(%rsp), %rcx
  76. movq (%rsp), %rax
  77. addq $0x38, %rsp
  78. retq
  79. END(mcount)
  80. ENTRY(ftrace_caller)
  81. /* taken from glibc */
  82. subq $0x38, %rsp
  83. movq %rax, (%rsp)
  84. movq %rcx, 8(%rsp)
  85. movq %rdx, 16(%rsp)
  86. movq %rsi, 24(%rsp)
  87. movq %rdi, 32(%rsp)
  88. movq %r8, 40(%rsp)
  89. movq %r9, 48(%rsp)
  90. movq 0x38(%rsp), %rdi
  91. movq 8(%rbp), %rsi
  92. subq $MCOUNT_INSN_SIZE, %rdi
  93. .globl ftrace_call
  94. ftrace_call:
  95. call ftrace_stub
  96. movq 48(%rsp), %r9
  97. movq 40(%rsp), %r8
  98. movq 32(%rsp), %rdi
  99. movq 24(%rsp), %rsi
  100. movq 16(%rsp), %rdx
  101. movq 8(%rsp), %rcx
  102. movq (%rsp), %rax
  103. addq $0x38, %rsp
  104. .globl ftrace_stub
  105. ftrace_stub:
  106. retq
  107. END(ftrace_caller)
  108. #else /* ! CONFIG_DYNAMIC_FTRACE */
  109. ENTRY(mcount)
  110. cmpq $ftrace_stub, ftrace_trace_function
  111. jnz trace
  112. .globl ftrace_stub
  113. ftrace_stub:
  114. retq
  115. trace:
  116. /* taken from glibc */
  117. subq $0x38, %rsp
  118. movq %rax, (%rsp)
  119. movq %rcx, 8(%rsp)
  120. movq %rdx, 16(%rsp)
  121. movq %rsi, 24(%rsp)
  122. movq %rdi, 32(%rsp)
  123. movq %r8, 40(%rsp)
  124. movq %r9, 48(%rsp)
  125. movq 0x38(%rsp), %rdi
  126. movq 8(%rbp), %rsi
  127. subq $MCOUNT_INSN_SIZE, %rdi
  128. call *ftrace_trace_function
  129. movq 48(%rsp), %r9
  130. movq 40(%rsp), %r8
  131. movq 32(%rsp), %rdi
  132. movq 24(%rsp), %rsi
  133. movq 16(%rsp), %rdx
  134. movq 8(%rsp), %rcx
  135. movq (%rsp), %rax
  136. addq $0x38, %rsp
  137. jmp ftrace_stub
  138. END(mcount)
  139. #endif /* CONFIG_DYNAMIC_FTRACE */
  140. #endif /* CONFIG_FTRACE */
  141. #ifndef CONFIG_PREEMPT
  142. #define retint_kernel retint_restore_args
  143. #endif
  144. #ifdef CONFIG_PARAVIRT
  145. ENTRY(native_usergs_sysret64)
  146. swapgs
  147. sysretq
  148. #endif /* CONFIG_PARAVIRT */
  149. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  150. #ifdef CONFIG_TRACE_IRQFLAGS
  151. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  152. jnc 1f
  153. TRACE_IRQS_ON
  154. 1:
  155. #endif
  156. .endm
  157. /*
  158. * C code is not supposed to know about undefined top of stack. Every time
  159. * a C function with an pt_regs argument is called from the SYSCALL based
  160. * fast path FIXUP_TOP_OF_STACK is needed.
  161. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  162. * manipulation.
  163. */
  164. /* %rsp:at FRAMEEND */
  165. .macro FIXUP_TOP_OF_STACK tmp
  166. movq %gs:pda_oldrsp,\tmp
  167. movq \tmp,RSP(%rsp)
  168. movq $__USER_DS,SS(%rsp)
  169. movq $__USER_CS,CS(%rsp)
  170. movq $-1,RCX(%rsp)
  171. movq R11(%rsp),\tmp /* get eflags */
  172. movq \tmp,EFLAGS(%rsp)
  173. .endm
  174. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  175. movq RSP-\offset(%rsp),\tmp
  176. movq \tmp,%gs:pda_oldrsp
  177. movq EFLAGS-\offset(%rsp),\tmp
  178. movq \tmp,R11-\offset(%rsp)
  179. .endm
  180. .macro FAKE_STACK_FRAME child_rip
  181. /* push in order ss, rsp, eflags, cs, rip */
  182. xorl %eax, %eax
  183. pushq $__KERNEL_DS /* ss */
  184. CFI_ADJUST_CFA_OFFSET 8
  185. /*CFI_REL_OFFSET ss,0*/
  186. pushq %rax /* rsp */
  187. CFI_ADJUST_CFA_OFFSET 8
  188. CFI_REL_OFFSET rsp,0
  189. pushq $(1<<9) /* eflags - interrupts on */
  190. CFI_ADJUST_CFA_OFFSET 8
  191. /*CFI_REL_OFFSET rflags,0*/
  192. pushq $__KERNEL_CS /* cs */
  193. CFI_ADJUST_CFA_OFFSET 8
  194. /*CFI_REL_OFFSET cs,0*/
  195. pushq \child_rip /* rip */
  196. CFI_ADJUST_CFA_OFFSET 8
  197. CFI_REL_OFFSET rip,0
  198. pushq %rax /* orig rax */
  199. CFI_ADJUST_CFA_OFFSET 8
  200. .endm
  201. .macro UNFAKE_STACK_FRAME
  202. addq $8*6, %rsp
  203. CFI_ADJUST_CFA_OFFSET -(6*8)
  204. .endm
  205. .macro CFI_DEFAULT_STACK start=1
  206. .if \start
  207. CFI_STARTPROC simple
  208. CFI_SIGNAL_FRAME
  209. CFI_DEF_CFA rsp,SS+8
  210. .else
  211. CFI_DEF_CFA_OFFSET SS+8
  212. .endif
  213. CFI_REL_OFFSET r15,R15
  214. CFI_REL_OFFSET r14,R14
  215. CFI_REL_OFFSET r13,R13
  216. CFI_REL_OFFSET r12,R12
  217. CFI_REL_OFFSET rbp,RBP
  218. CFI_REL_OFFSET rbx,RBX
  219. CFI_REL_OFFSET r11,R11
  220. CFI_REL_OFFSET r10,R10
  221. CFI_REL_OFFSET r9,R9
  222. CFI_REL_OFFSET r8,R8
  223. CFI_REL_OFFSET rax,RAX
  224. CFI_REL_OFFSET rcx,RCX
  225. CFI_REL_OFFSET rdx,RDX
  226. CFI_REL_OFFSET rsi,RSI
  227. CFI_REL_OFFSET rdi,RDI
  228. CFI_REL_OFFSET rip,RIP
  229. /*CFI_REL_OFFSET cs,CS*/
  230. /*CFI_REL_OFFSET rflags,EFLAGS*/
  231. CFI_REL_OFFSET rsp,RSP
  232. /*CFI_REL_OFFSET ss,SS*/
  233. .endm
  234. /*
  235. * A newly forked process directly context switches into this.
  236. */
  237. /* rdi: prev */
  238. ENTRY(ret_from_fork)
  239. CFI_DEFAULT_STACK
  240. push kernel_eflags(%rip)
  241. CFI_ADJUST_CFA_OFFSET 4
  242. popf # reset kernel eflags
  243. CFI_ADJUST_CFA_OFFSET -4
  244. call schedule_tail
  245. GET_THREAD_INFO(%rcx)
  246. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
  247. jnz rff_trace
  248. rff_action:
  249. RESTORE_REST
  250. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  251. je int_ret_from_sys_call
  252. testl $_TIF_IA32,TI_flags(%rcx)
  253. jnz int_ret_from_sys_call
  254. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  255. jmp ret_from_sys_call
  256. rff_trace:
  257. movq %rsp,%rdi
  258. call syscall_trace_leave
  259. GET_THREAD_INFO(%rcx)
  260. jmp rff_action
  261. CFI_ENDPROC
  262. END(ret_from_fork)
  263. /*
  264. * System call entry. Upto 6 arguments in registers are supported.
  265. *
  266. * SYSCALL does not save anything on the stack and does not change the
  267. * stack pointer.
  268. */
  269. /*
  270. * Register setup:
  271. * rax system call number
  272. * rdi arg0
  273. * rcx return address for syscall/sysret, C arg3
  274. * rsi arg1
  275. * rdx arg2
  276. * r10 arg3 (--> moved to rcx for C)
  277. * r8 arg4
  278. * r9 arg5
  279. * r11 eflags for syscall/sysret, temporary for C
  280. * r12-r15,rbp,rbx saved by C code, not touched.
  281. *
  282. * Interrupts are off on entry.
  283. * Only called from user space.
  284. *
  285. * XXX if we had a free scratch register we could save the RSP into the stack frame
  286. * and report it properly in ps. Unfortunately we haven't.
  287. *
  288. * When user can change the frames always force IRET. That is because
  289. * it deals with uncanonical addresses better. SYSRET has trouble
  290. * with them due to bugs in both AMD and Intel CPUs.
  291. */
  292. ENTRY(system_call)
  293. CFI_STARTPROC simple
  294. CFI_SIGNAL_FRAME
  295. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  296. CFI_REGISTER rip,rcx
  297. /*CFI_REGISTER rflags,r11*/
  298. SWAPGS_UNSAFE_STACK
  299. /*
  300. * A hypervisor implementation might want to use a label
  301. * after the swapgs, so that it can do the swapgs
  302. * for the guest and jump here on syscall.
  303. */
  304. ENTRY(system_call_after_swapgs)
  305. movq %rsp,%gs:pda_oldrsp
  306. movq %gs:pda_kernelstack,%rsp
  307. /*
  308. * No need to follow this irqs off/on section - it's straight
  309. * and short:
  310. */
  311. ENABLE_INTERRUPTS(CLBR_NONE)
  312. SAVE_ARGS 8,1
  313. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  314. movq %rcx,RIP-ARGOFFSET(%rsp)
  315. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  316. GET_THREAD_INFO(%rcx)
  317. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
  318. TI_flags(%rcx)
  319. jnz tracesys
  320. cmpq $__NR_syscall_max,%rax
  321. ja badsys
  322. movq %r10,%rcx
  323. call *sys_call_table(,%rax,8) # XXX: rip relative
  324. movq %rax,RAX-ARGOFFSET(%rsp)
  325. /*
  326. * Syscall return path ending with SYSRET (fast path)
  327. * Has incomplete stack frame and undefined top of stack.
  328. */
  329. ret_from_sys_call:
  330. movl $_TIF_ALLWORK_MASK,%edi
  331. /* edi: flagmask */
  332. sysret_check:
  333. LOCKDEP_SYS_EXIT
  334. GET_THREAD_INFO(%rcx)
  335. DISABLE_INTERRUPTS(CLBR_NONE)
  336. TRACE_IRQS_OFF
  337. movl TI_flags(%rcx),%edx
  338. andl %edi,%edx
  339. jnz sysret_careful
  340. CFI_REMEMBER_STATE
  341. /*
  342. * sysretq will re-enable interrupts:
  343. */
  344. TRACE_IRQS_ON
  345. movq RIP-ARGOFFSET(%rsp),%rcx
  346. CFI_REGISTER rip,rcx
  347. RESTORE_ARGS 0,-ARG_SKIP,1
  348. /*CFI_REGISTER rflags,r11*/
  349. movq %gs:pda_oldrsp, %rsp
  350. USERGS_SYSRET64
  351. CFI_RESTORE_STATE
  352. /* Handle reschedules */
  353. /* edx: work, edi: workmask */
  354. sysret_careful:
  355. bt $TIF_NEED_RESCHED,%edx
  356. jnc sysret_signal
  357. TRACE_IRQS_ON
  358. ENABLE_INTERRUPTS(CLBR_NONE)
  359. pushq %rdi
  360. CFI_ADJUST_CFA_OFFSET 8
  361. call schedule
  362. popq %rdi
  363. CFI_ADJUST_CFA_OFFSET -8
  364. jmp sysret_check
  365. /* Handle a signal */
  366. sysret_signal:
  367. TRACE_IRQS_ON
  368. ENABLE_INTERRUPTS(CLBR_NONE)
  369. testl $_TIF_DO_NOTIFY_MASK,%edx
  370. jz 1f
  371. /* Really a signal */
  372. /* edx: work flags (arg3) */
  373. leaq do_notify_resume(%rip),%rax
  374. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  375. xorl %esi,%esi # oldset -> arg2
  376. call ptregscall_common
  377. 1: movl $_TIF_WORK_MASK,%edi
  378. /* Use IRET because user could have changed frame. This
  379. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  380. DISABLE_INTERRUPTS(CLBR_NONE)
  381. TRACE_IRQS_OFF
  382. jmp int_with_check
  383. badsys:
  384. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  385. jmp ret_from_sys_call
  386. /* Do syscall tracing */
  387. tracesys:
  388. SAVE_REST
  389. movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
  390. FIXUP_TOP_OF_STACK %rdi
  391. movq %rsp,%rdi
  392. call syscall_trace_enter
  393. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  394. RESTORE_REST
  395. cmpq $__NR_syscall_max,%rax
  396. ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
  397. movq %r10,%rcx /* fixup for C */
  398. call *sys_call_table(,%rax,8)
  399. movq %rax,RAX-ARGOFFSET(%rsp)
  400. /* Use IRET because user could have changed frame */
  401. /*
  402. * Syscall return path ending with IRET.
  403. * Has correct top of stack, but partial stack frame.
  404. */
  405. .globl int_ret_from_sys_call
  406. int_ret_from_sys_call:
  407. DISABLE_INTERRUPTS(CLBR_NONE)
  408. TRACE_IRQS_OFF
  409. testl $3,CS-ARGOFFSET(%rsp)
  410. je retint_restore_args
  411. movl $_TIF_ALLWORK_MASK,%edi
  412. /* edi: mask to check */
  413. int_with_check:
  414. LOCKDEP_SYS_EXIT_IRQ
  415. GET_THREAD_INFO(%rcx)
  416. movl TI_flags(%rcx),%edx
  417. andl %edi,%edx
  418. jnz int_careful
  419. andl $~TS_COMPAT,TI_status(%rcx)
  420. jmp retint_swapgs
  421. /* Either reschedule or signal or syscall exit tracking needed. */
  422. /* First do a reschedule test. */
  423. /* edx: work, edi: workmask */
  424. int_careful:
  425. bt $TIF_NEED_RESCHED,%edx
  426. jnc int_very_careful
  427. TRACE_IRQS_ON
  428. ENABLE_INTERRUPTS(CLBR_NONE)
  429. pushq %rdi
  430. CFI_ADJUST_CFA_OFFSET 8
  431. call schedule
  432. popq %rdi
  433. CFI_ADJUST_CFA_OFFSET -8
  434. DISABLE_INTERRUPTS(CLBR_NONE)
  435. TRACE_IRQS_OFF
  436. jmp int_with_check
  437. /* handle signals and tracing -- both require a full stack frame */
  438. int_very_careful:
  439. TRACE_IRQS_ON
  440. ENABLE_INTERRUPTS(CLBR_NONE)
  441. SAVE_REST
  442. /* Check for syscall exit trace */
  443. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  444. jz int_signal
  445. pushq %rdi
  446. CFI_ADJUST_CFA_OFFSET 8
  447. leaq 8(%rsp),%rdi # &ptregs -> arg1
  448. call syscall_trace_leave
  449. popq %rdi
  450. CFI_ADJUST_CFA_OFFSET -8
  451. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  452. jmp int_restore_rest
  453. int_signal:
  454. testl $_TIF_DO_NOTIFY_MASK,%edx
  455. jz 1f
  456. movq %rsp,%rdi # &ptregs -> arg1
  457. xorl %esi,%esi # oldset -> arg2
  458. call do_notify_resume
  459. 1: movl $_TIF_WORK_MASK,%edi
  460. int_restore_rest:
  461. RESTORE_REST
  462. DISABLE_INTERRUPTS(CLBR_NONE)
  463. TRACE_IRQS_OFF
  464. jmp int_with_check
  465. CFI_ENDPROC
  466. END(system_call)
  467. /*
  468. * Certain special system calls that need to save a complete full stack frame.
  469. */
  470. .macro PTREGSCALL label,func,arg
  471. .globl \label
  472. \label:
  473. leaq \func(%rip),%rax
  474. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  475. jmp ptregscall_common
  476. END(\label)
  477. .endm
  478. CFI_STARTPROC
  479. PTREGSCALL stub_clone, sys_clone, %r8
  480. PTREGSCALL stub_fork, sys_fork, %rdi
  481. PTREGSCALL stub_vfork, sys_vfork, %rdi
  482. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  483. PTREGSCALL stub_iopl, sys_iopl, %rsi
  484. ENTRY(ptregscall_common)
  485. popq %r11
  486. CFI_ADJUST_CFA_OFFSET -8
  487. CFI_REGISTER rip, r11
  488. SAVE_REST
  489. movq %r11, %r15
  490. CFI_REGISTER rip, r15
  491. FIXUP_TOP_OF_STACK %r11
  492. call *%rax
  493. RESTORE_TOP_OF_STACK %r11
  494. movq %r15, %r11
  495. CFI_REGISTER rip, r11
  496. RESTORE_REST
  497. pushq %r11
  498. CFI_ADJUST_CFA_OFFSET 8
  499. CFI_REL_OFFSET rip, 0
  500. ret
  501. CFI_ENDPROC
  502. END(ptregscall_common)
  503. ENTRY(stub_execve)
  504. CFI_STARTPROC
  505. popq %r11
  506. CFI_ADJUST_CFA_OFFSET -8
  507. CFI_REGISTER rip, r11
  508. SAVE_REST
  509. FIXUP_TOP_OF_STACK %r11
  510. movq %rsp, %rcx
  511. call sys_execve
  512. RESTORE_TOP_OF_STACK %r11
  513. movq %rax,RAX(%rsp)
  514. RESTORE_REST
  515. jmp int_ret_from_sys_call
  516. CFI_ENDPROC
  517. END(stub_execve)
  518. /*
  519. * sigreturn is special because it needs to restore all registers on return.
  520. * This cannot be done with SYSRET, so use the IRET return path instead.
  521. */
  522. ENTRY(stub_rt_sigreturn)
  523. CFI_STARTPROC
  524. addq $8, %rsp
  525. CFI_ADJUST_CFA_OFFSET -8
  526. SAVE_REST
  527. movq %rsp,%rdi
  528. FIXUP_TOP_OF_STACK %r11
  529. call sys_rt_sigreturn
  530. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  531. RESTORE_REST
  532. jmp int_ret_from_sys_call
  533. CFI_ENDPROC
  534. END(stub_rt_sigreturn)
  535. /*
  536. * initial frame state for interrupts and exceptions
  537. */
  538. .macro _frame ref
  539. CFI_STARTPROC simple
  540. CFI_SIGNAL_FRAME
  541. CFI_DEF_CFA rsp,SS+8-\ref
  542. /*CFI_REL_OFFSET ss,SS-\ref*/
  543. CFI_REL_OFFSET rsp,RSP-\ref
  544. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  545. /*CFI_REL_OFFSET cs,CS-\ref*/
  546. CFI_REL_OFFSET rip,RIP-\ref
  547. .endm
  548. /* initial frame state for interrupts (and exceptions without error code) */
  549. #define INTR_FRAME _frame RIP
  550. /* initial frame state for exceptions with error code (and interrupts with
  551. vector already pushed) */
  552. #define XCPT_FRAME _frame ORIG_RAX
  553. /*
  554. * Interrupt entry/exit.
  555. *
  556. * Interrupt entry points save only callee clobbered registers in fast path.
  557. *
  558. * Entry runs with interrupts off.
  559. */
  560. /* 0(%rsp): interrupt number */
  561. .macro interrupt func
  562. cld
  563. SAVE_ARGS
  564. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  565. pushq %rbp
  566. CFI_ADJUST_CFA_OFFSET 8
  567. CFI_REL_OFFSET rbp, 0
  568. movq %rsp,%rbp
  569. CFI_DEF_CFA_REGISTER rbp
  570. testl $3,CS(%rdi)
  571. je 1f
  572. SWAPGS
  573. /* irqcount is used to check if a CPU is already on an interrupt
  574. stack or not. While this is essentially redundant with preempt_count
  575. it is a little cheaper to use a separate counter in the PDA
  576. (short of moving irq_enter into assembly, which would be too
  577. much work) */
  578. 1: incl %gs:pda_irqcount
  579. cmoveq %gs:pda_irqstackptr,%rsp
  580. push %rbp # backlink for old unwinder
  581. /*
  582. * We entered an interrupt context - irqs are off:
  583. */
  584. TRACE_IRQS_OFF
  585. call \func
  586. .endm
  587. ENTRY(common_interrupt)
  588. XCPT_FRAME
  589. interrupt do_IRQ
  590. /* 0(%rsp): oldrsp-ARGOFFSET */
  591. ret_from_intr:
  592. DISABLE_INTERRUPTS(CLBR_NONE)
  593. TRACE_IRQS_OFF
  594. decl %gs:pda_irqcount
  595. leaveq
  596. CFI_DEF_CFA_REGISTER rsp
  597. CFI_ADJUST_CFA_OFFSET -8
  598. exit_intr:
  599. GET_THREAD_INFO(%rcx)
  600. testl $3,CS-ARGOFFSET(%rsp)
  601. je retint_kernel
  602. /* Interrupt came from user space */
  603. /*
  604. * Has a correct top of stack, but a partial stack frame
  605. * %rcx: thread info. Interrupts off.
  606. */
  607. retint_with_reschedule:
  608. movl $_TIF_WORK_MASK,%edi
  609. retint_check:
  610. LOCKDEP_SYS_EXIT_IRQ
  611. movl TI_flags(%rcx),%edx
  612. andl %edi,%edx
  613. CFI_REMEMBER_STATE
  614. jnz retint_careful
  615. retint_swapgs: /* return to user-space */
  616. /*
  617. * The iretq could re-enable interrupts:
  618. */
  619. DISABLE_INTERRUPTS(CLBR_ANY)
  620. TRACE_IRQS_IRETQ
  621. SWAPGS
  622. jmp restore_args
  623. retint_restore_args: /* return to kernel space */
  624. DISABLE_INTERRUPTS(CLBR_ANY)
  625. /*
  626. * The iretq could re-enable interrupts:
  627. */
  628. TRACE_IRQS_IRETQ
  629. restore_args:
  630. RESTORE_ARGS 0,8,0
  631. irq_return:
  632. INTERRUPT_RETURN
  633. .section __ex_table, "a"
  634. .quad irq_return, bad_iret
  635. .previous
  636. #ifdef CONFIG_PARAVIRT
  637. ENTRY(native_iret)
  638. iretq
  639. .section __ex_table,"a"
  640. .quad native_iret, bad_iret
  641. .previous
  642. #endif
  643. .section .fixup,"ax"
  644. bad_iret:
  645. /*
  646. * The iret traps when the %cs or %ss being restored is bogus.
  647. * We've lost the original trap vector and error code.
  648. * #GPF is the most likely one to get for an invalid selector.
  649. * So pretend we completed the iret and took the #GPF in user mode.
  650. *
  651. * We are now running with the kernel GS after exception recovery.
  652. * But error_entry expects us to have user GS to match the user %cs,
  653. * so swap back.
  654. */
  655. pushq $0
  656. SWAPGS
  657. jmp general_protection
  658. .previous
  659. /* edi: workmask, edx: work */
  660. retint_careful:
  661. CFI_RESTORE_STATE
  662. bt $TIF_NEED_RESCHED,%edx
  663. jnc retint_signal
  664. TRACE_IRQS_ON
  665. ENABLE_INTERRUPTS(CLBR_NONE)
  666. pushq %rdi
  667. CFI_ADJUST_CFA_OFFSET 8
  668. call schedule
  669. popq %rdi
  670. CFI_ADJUST_CFA_OFFSET -8
  671. GET_THREAD_INFO(%rcx)
  672. DISABLE_INTERRUPTS(CLBR_NONE)
  673. TRACE_IRQS_OFF
  674. jmp retint_check
  675. retint_signal:
  676. testl $_TIF_DO_NOTIFY_MASK,%edx
  677. jz retint_swapgs
  678. TRACE_IRQS_ON
  679. ENABLE_INTERRUPTS(CLBR_NONE)
  680. SAVE_REST
  681. movq $-1,ORIG_RAX(%rsp)
  682. xorl %esi,%esi # oldset
  683. movq %rsp,%rdi # &pt_regs
  684. call do_notify_resume
  685. RESTORE_REST
  686. DISABLE_INTERRUPTS(CLBR_NONE)
  687. TRACE_IRQS_OFF
  688. GET_THREAD_INFO(%rcx)
  689. jmp retint_with_reschedule
  690. #ifdef CONFIG_PREEMPT
  691. /* Returning to kernel space. Check if we need preemption */
  692. /* rcx: threadinfo. interrupts off. */
  693. ENTRY(retint_kernel)
  694. cmpl $0,TI_preempt_count(%rcx)
  695. jnz retint_restore_args
  696. bt $TIF_NEED_RESCHED,TI_flags(%rcx)
  697. jnc retint_restore_args
  698. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  699. jnc retint_restore_args
  700. call preempt_schedule_irq
  701. jmp exit_intr
  702. #endif
  703. CFI_ENDPROC
  704. END(common_interrupt)
  705. /*
  706. * APIC interrupts.
  707. */
  708. .macro apicinterrupt num,func
  709. INTR_FRAME
  710. pushq $~(\num)
  711. CFI_ADJUST_CFA_OFFSET 8
  712. interrupt \func
  713. jmp ret_from_intr
  714. CFI_ENDPROC
  715. .endm
  716. ENTRY(thermal_interrupt)
  717. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  718. END(thermal_interrupt)
  719. ENTRY(threshold_interrupt)
  720. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  721. END(threshold_interrupt)
  722. #ifdef CONFIG_SMP
  723. ENTRY(reschedule_interrupt)
  724. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  725. END(reschedule_interrupt)
  726. .macro INVALIDATE_ENTRY num
  727. ENTRY(invalidate_interrupt\num)
  728. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  729. END(invalidate_interrupt\num)
  730. .endm
  731. INVALIDATE_ENTRY 0
  732. INVALIDATE_ENTRY 1
  733. INVALIDATE_ENTRY 2
  734. INVALIDATE_ENTRY 3
  735. INVALIDATE_ENTRY 4
  736. INVALIDATE_ENTRY 5
  737. INVALIDATE_ENTRY 6
  738. INVALIDATE_ENTRY 7
  739. ENTRY(call_function_interrupt)
  740. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  741. END(call_function_interrupt)
  742. ENTRY(call_function_single_interrupt)
  743. apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
  744. END(call_function_single_interrupt)
  745. ENTRY(irq_move_cleanup_interrupt)
  746. apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
  747. END(irq_move_cleanup_interrupt)
  748. #endif
  749. ENTRY(apic_timer_interrupt)
  750. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  751. END(apic_timer_interrupt)
  752. ENTRY(uv_bau_message_intr1)
  753. apicinterrupt 220,uv_bau_message_interrupt
  754. END(uv_bau_message_intr1)
  755. ENTRY(error_interrupt)
  756. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  757. END(error_interrupt)
  758. ENTRY(spurious_interrupt)
  759. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  760. END(spurious_interrupt)
  761. /*
  762. * Exception entry points.
  763. */
  764. .macro zeroentry sym
  765. INTR_FRAME
  766. PARAVIRT_ADJUST_EXCEPTION_FRAME
  767. pushq $0 /* push error code/oldrax */
  768. CFI_ADJUST_CFA_OFFSET 8
  769. pushq %rax /* push real oldrax to the rdi slot */
  770. CFI_ADJUST_CFA_OFFSET 8
  771. CFI_REL_OFFSET rax,0
  772. leaq \sym(%rip),%rax
  773. jmp error_entry
  774. CFI_ENDPROC
  775. .endm
  776. .macro errorentry sym
  777. XCPT_FRAME
  778. PARAVIRT_ADJUST_EXCEPTION_FRAME
  779. pushq %rax
  780. CFI_ADJUST_CFA_OFFSET 8
  781. CFI_REL_OFFSET rax,0
  782. leaq \sym(%rip),%rax
  783. jmp error_entry
  784. CFI_ENDPROC
  785. .endm
  786. /* error code is on the stack already */
  787. /* handle NMI like exceptions that can happen everywhere */
  788. .macro paranoidentry sym, ist=0, irqtrace=1
  789. SAVE_ALL
  790. cld
  791. movl $1,%ebx
  792. movl $MSR_GS_BASE,%ecx
  793. rdmsr
  794. testl %edx,%edx
  795. js 1f
  796. SWAPGS
  797. xorl %ebx,%ebx
  798. 1:
  799. .if \ist
  800. movq %gs:pda_data_offset, %rbp
  801. .endif
  802. movq %rsp,%rdi
  803. movq ORIG_RAX(%rsp),%rsi
  804. movq $-1,ORIG_RAX(%rsp)
  805. .if \ist
  806. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  807. .endif
  808. call \sym
  809. .if \ist
  810. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  811. .endif
  812. DISABLE_INTERRUPTS(CLBR_NONE)
  813. .if \irqtrace
  814. TRACE_IRQS_OFF
  815. .endif
  816. .endm
  817. /*
  818. * "Paranoid" exit path from exception stack.
  819. * Paranoid because this is used by NMIs and cannot take
  820. * any kernel state for granted.
  821. * We don't do kernel preemption checks here, because only
  822. * NMI should be common and it does not enable IRQs and
  823. * cannot get reschedule ticks.
  824. *
  825. * "trace" is 0 for the NMI handler only, because irq-tracing
  826. * is fundamentally NMI-unsafe. (we cannot change the soft and
  827. * hard flags at once, atomically)
  828. */
  829. .macro paranoidexit trace=1
  830. /* ebx: no swapgs flag */
  831. paranoid_exit\trace:
  832. testl %ebx,%ebx /* swapgs needed? */
  833. jnz paranoid_restore\trace
  834. testl $3,CS(%rsp)
  835. jnz paranoid_userspace\trace
  836. paranoid_swapgs\trace:
  837. .if \trace
  838. TRACE_IRQS_IRETQ 0
  839. .endif
  840. SWAPGS_UNSAFE_STACK
  841. paranoid_restore\trace:
  842. RESTORE_ALL 8
  843. jmp irq_return
  844. paranoid_userspace\trace:
  845. GET_THREAD_INFO(%rcx)
  846. movl TI_flags(%rcx),%ebx
  847. andl $_TIF_WORK_MASK,%ebx
  848. jz paranoid_swapgs\trace
  849. movq %rsp,%rdi /* &pt_regs */
  850. call sync_regs
  851. movq %rax,%rsp /* switch stack for scheduling */
  852. testl $_TIF_NEED_RESCHED,%ebx
  853. jnz paranoid_schedule\trace
  854. movl %ebx,%edx /* arg3: thread flags */
  855. .if \trace
  856. TRACE_IRQS_ON
  857. .endif
  858. ENABLE_INTERRUPTS(CLBR_NONE)
  859. xorl %esi,%esi /* arg2: oldset */
  860. movq %rsp,%rdi /* arg1: &pt_regs */
  861. call do_notify_resume
  862. DISABLE_INTERRUPTS(CLBR_NONE)
  863. .if \trace
  864. TRACE_IRQS_OFF
  865. .endif
  866. jmp paranoid_userspace\trace
  867. paranoid_schedule\trace:
  868. .if \trace
  869. TRACE_IRQS_ON
  870. .endif
  871. ENABLE_INTERRUPTS(CLBR_ANY)
  872. call schedule
  873. DISABLE_INTERRUPTS(CLBR_ANY)
  874. .if \trace
  875. TRACE_IRQS_OFF
  876. .endif
  877. jmp paranoid_userspace\trace
  878. CFI_ENDPROC
  879. .endm
  880. /*
  881. * Exception entry point. This expects an error code/orig_rax on the stack
  882. * and the exception handler in %rax.
  883. */
  884. KPROBE_ENTRY(error_entry)
  885. _frame RDI
  886. CFI_REL_OFFSET rax,0
  887. /* rdi slot contains rax, oldrax contains error code */
  888. cld
  889. subq $14*8,%rsp
  890. CFI_ADJUST_CFA_OFFSET (14*8)
  891. movq %rsi,13*8(%rsp)
  892. CFI_REL_OFFSET rsi,RSI
  893. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  894. CFI_REGISTER rax,rsi
  895. movq %rdx,12*8(%rsp)
  896. CFI_REL_OFFSET rdx,RDX
  897. movq %rcx,11*8(%rsp)
  898. CFI_REL_OFFSET rcx,RCX
  899. movq %rsi,10*8(%rsp) /* store rax */
  900. CFI_REL_OFFSET rax,RAX
  901. movq %r8, 9*8(%rsp)
  902. CFI_REL_OFFSET r8,R8
  903. movq %r9, 8*8(%rsp)
  904. CFI_REL_OFFSET r9,R9
  905. movq %r10,7*8(%rsp)
  906. CFI_REL_OFFSET r10,R10
  907. movq %r11,6*8(%rsp)
  908. CFI_REL_OFFSET r11,R11
  909. movq %rbx,5*8(%rsp)
  910. CFI_REL_OFFSET rbx,RBX
  911. movq %rbp,4*8(%rsp)
  912. CFI_REL_OFFSET rbp,RBP
  913. movq %r12,3*8(%rsp)
  914. CFI_REL_OFFSET r12,R12
  915. movq %r13,2*8(%rsp)
  916. CFI_REL_OFFSET r13,R13
  917. movq %r14,1*8(%rsp)
  918. CFI_REL_OFFSET r14,R14
  919. movq %r15,(%rsp)
  920. CFI_REL_OFFSET r15,R15
  921. xorl %ebx,%ebx
  922. testl $3,CS(%rsp)
  923. je error_kernelspace
  924. error_swapgs:
  925. SWAPGS
  926. error_sti:
  927. movq %rdi,RDI(%rsp)
  928. CFI_REL_OFFSET rdi,RDI
  929. movq %rsp,%rdi
  930. movq ORIG_RAX(%rsp),%rsi /* get error code */
  931. movq $-1,ORIG_RAX(%rsp)
  932. call *%rax
  933. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  934. error_exit:
  935. movl %ebx,%eax
  936. RESTORE_REST
  937. DISABLE_INTERRUPTS(CLBR_NONE)
  938. TRACE_IRQS_OFF
  939. GET_THREAD_INFO(%rcx)
  940. testl %eax,%eax
  941. jne retint_kernel
  942. LOCKDEP_SYS_EXIT_IRQ
  943. movl TI_flags(%rcx),%edx
  944. movl $_TIF_WORK_MASK,%edi
  945. andl %edi,%edx
  946. jnz retint_careful
  947. jmp retint_swapgs
  948. CFI_ENDPROC
  949. error_kernelspace:
  950. incl %ebx
  951. /* There are two places in the kernel that can potentially fault with
  952. usergs. Handle them here. The exception handlers after
  953. iret run with kernel gs again, so don't set the user space flag.
  954. B stepping K8s sometimes report an truncated RIP for IRET
  955. exceptions returning to compat mode. Check for these here too. */
  956. leaq irq_return(%rip),%rcx
  957. cmpq %rcx,RIP(%rsp)
  958. je error_swapgs
  959. movl %ecx,%ecx /* zero extend */
  960. cmpq %rcx,RIP(%rsp)
  961. je error_swapgs
  962. cmpq $gs_change,RIP(%rsp)
  963. je error_swapgs
  964. jmp error_sti
  965. KPROBE_END(error_entry)
  966. /* Reload gs selector with exception handling */
  967. /* edi: new selector */
  968. ENTRY(native_load_gs_index)
  969. CFI_STARTPROC
  970. pushf
  971. CFI_ADJUST_CFA_OFFSET 8
  972. DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
  973. SWAPGS
  974. gs_change:
  975. movl %edi,%gs
  976. 2: mfence /* workaround */
  977. SWAPGS
  978. popf
  979. CFI_ADJUST_CFA_OFFSET -8
  980. ret
  981. CFI_ENDPROC
  982. ENDPROC(native_load_gs_index)
  983. .section __ex_table,"a"
  984. .align 8
  985. .quad gs_change,bad_gs
  986. .previous
  987. .section .fixup,"ax"
  988. /* running with kernelgs */
  989. bad_gs:
  990. SWAPGS /* switch back to user gs */
  991. xorl %eax,%eax
  992. movl %eax,%gs
  993. jmp 2b
  994. .previous
  995. /*
  996. * Create a kernel thread.
  997. *
  998. * C extern interface:
  999. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  1000. *
  1001. * asm input arguments:
  1002. * rdi: fn, rsi: arg, rdx: flags
  1003. */
  1004. ENTRY(kernel_thread)
  1005. CFI_STARTPROC
  1006. FAKE_STACK_FRAME $child_rip
  1007. SAVE_ALL
  1008. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  1009. movq %rdx,%rdi
  1010. orq kernel_thread_flags(%rip),%rdi
  1011. movq $-1, %rsi
  1012. movq %rsp, %rdx
  1013. xorl %r8d,%r8d
  1014. xorl %r9d,%r9d
  1015. # clone now
  1016. call do_fork
  1017. movq %rax,RAX(%rsp)
  1018. xorl %edi,%edi
  1019. /*
  1020. * It isn't worth to check for reschedule here,
  1021. * so internally to the x86_64 port you can rely on kernel_thread()
  1022. * not to reschedule the child before returning, this avoids the need
  1023. * of hacks for example to fork off the per-CPU idle tasks.
  1024. * [Hopefully no generic code relies on the reschedule -AK]
  1025. */
  1026. RESTORE_ALL
  1027. UNFAKE_STACK_FRAME
  1028. ret
  1029. CFI_ENDPROC
  1030. ENDPROC(kernel_thread)
  1031. child_rip:
  1032. pushq $0 # fake return address
  1033. CFI_STARTPROC
  1034. /*
  1035. * Here we are in the child and the registers are set as they were
  1036. * at kernel_thread() invocation in the parent.
  1037. */
  1038. movq %rdi, %rax
  1039. movq %rsi, %rdi
  1040. call *%rax
  1041. # exit
  1042. mov %eax, %edi
  1043. call do_exit
  1044. CFI_ENDPROC
  1045. ENDPROC(child_rip)
  1046. /*
  1047. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  1048. *
  1049. * C extern interface:
  1050. * extern long execve(char *name, char **argv, char **envp)
  1051. *
  1052. * asm input arguments:
  1053. * rdi: name, rsi: argv, rdx: envp
  1054. *
  1055. * We want to fallback into:
  1056. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
  1057. *
  1058. * do_sys_execve asm fallback arguments:
  1059. * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
  1060. */
  1061. ENTRY(kernel_execve)
  1062. CFI_STARTPROC
  1063. FAKE_STACK_FRAME $0
  1064. SAVE_ALL
  1065. movq %rsp,%rcx
  1066. call sys_execve
  1067. movq %rax, RAX(%rsp)
  1068. RESTORE_REST
  1069. testq %rax,%rax
  1070. je int_ret_from_sys_call
  1071. RESTORE_ARGS
  1072. UNFAKE_STACK_FRAME
  1073. ret
  1074. CFI_ENDPROC
  1075. ENDPROC(kernel_execve)
  1076. KPROBE_ENTRY(page_fault)
  1077. errorentry do_page_fault
  1078. KPROBE_END(page_fault)
  1079. ENTRY(coprocessor_error)
  1080. zeroentry do_coprocessor_error
  1081. END(coprocessor_error)
  1082. ENTRY(simd_coprocessor_error)
  1083. zeroentry do_simd_coprocessor_error
  1084. END(simd_coprocessor_error)
  1085. ENTRY(device_not_available)
  1086. zeroentry math_state_restore
  1087. END(device_not_available)
  1088. /* runs on exception stack */
  1089. KPROBE_ENTRY(debug)
  1090. INTR_FRAME
  1091. pushq $0
  1092. CFI_ADJUST_CFA_OFFSET 8
  1093. paranoidentry do_debug, DEBUG_STACK
  1094. paranoidexit
  1095. KPROBE_END(debug)
  1096. /* runs on exception stack */
  1097. KPROBE_ENTRY(nmi)
  1098. INTR_FRAME
  1099. pushq $-1
  1100. CFI_ADJUST_CFA_OFFSET 8
  1101. paranoidentry do_nmi, 0, 0
  1102. #ifdef CONFIG_TRACE_IRQFLAGS
  1103. paranoidexit 0
  1104. #else
  1105. jmp paranoid_exit1
  1106. CFI_ENDPROC
  1107. #endif
  1108. KPROBE_END(nmi)
  1109. KPROBE_ENTRY(int3)
  1110. INTR_FRAME
  1111. pushq $0
  1112. CFI_ADJUST_CFA_OFFSET 8
  1113. paranoidentry do_int3, DEBUG_STACK
  1114. jmp paranoid_exit1
  1115. CFI_ENDPROC
  1116. KPROBE_END(int3)
  1117. ENTRY(overflow)
  1118. zeroentry do_overflow
  1119. END(overflow)
  1120. ENTRY(bounds)
  1121. zeroentry do_bounds
  1122. END(bounds)
  1123. ENTRY(invalid_op)
  1124. zeroentry do_invalid_op
  1125. END(invalid_op)
  1126. ENTRY(coprocessor_segment_overrun)
  1127. zeroentry do_coprocessor_segment_overrun
  1128. END(coprocessor_segment_overrun)
  1129. /* runs on exception stack */
  1130. ENTRY(double_fault)
  1131. XCPT_FRAME
  1132. paranoidentry do_double_fault
  1133. jmp paranoid_exit1
  1134. CFI_ENDPROC
  1135. END(double_fault)
  1136. ENTRY(invalid_TSS)
  1137. errorentry do_invalid_TSS
  1138. END(invalid_TSS)
  1139. ENTRY(segment_not_present)
  1140. errorentry do_segment_not_present
  1141. END(segment_not_present)
  1142. /* runs on exception stack */
  1143. ENTRY(stack_segment)
  1144. XCPT_FRAME
  1145. paranoidentry do_stack_segment
  1146. jmp paranoid_exit1
  1147. CFI_ENDPROC
  1148. END(stack_segment)
  1149. KPROBE_ENTRY(general_protection)
  1150. errorentry do_general_protection
  1151. KPROBE_END(general_protection)
  1152. ENTRY(alignment_check)
  1153. errorentry do_alignment_check
  1154. END(alignment_check)
  1155. ENTRY(divide_error)
  1156. zeroentry do_divide_error
  1157. END(divide_error)
  1158. ENTRY(spurious_interrupt_bug)
  1159. zeroentry do_spurious_interrupt_bug
  1160. END(spurious_interrupt_bug)
  1161. #ifdef CONFIG_X86_MCE
  1162. /* runs on exception stack */
  1163. ENTRY(machine_check)
  1164. INTR_FRAME
  1165. pushq $0
  1166. CFI_ADJUST_CFA_OFFSET 8
  1167. paranoidentry do_machine_check
  1168. jmp paranoid_exit1
  1169. CFI_ENDPROC
  1170. END(machine_check)
  1171. #endif
  1172. /* Call softirq on interrupt stack. Interrupts are off. */
  1173. ENTRY(call_softirq)
  1174. CFI_STARTPROC
  1175. push %rbp
  1176. CFI_ADJUST_CFA_OFFSET 8
  1177. CFI_REL_OFFSET rbp,0
  1178. mov %rsp,%rbp
  1179. CFI_DEF_CFA_REGISTER rbp
  1180. incl %gs:pda_irqcount
  1181. cmove %gs:pda_irqstackptr,%rsp
  1182. push %rbp # backlink for old unwinder
  1183. call __do_softirq
  1184. leaveq
  1185. CFI_DEF_CFA_REGISTER rsp
  1186. CFI_ADJUST_CFA_OFFSET -8
  1187. decl %gs:pda_irqcount
  1188. ret
  1189. CFI_ENDPROC
  1190. ENDPROC(call_softirq)
  1191. KPROBE_ENTRY(ignore_sysret)
  1192. CFI_STARTPROC
  1193. mov $-ENOSYS,%eax
  1194. sysret
  1195. CFI_ENDPROC
  1196. ENDPROC(ignore_sysret)