entry_64.S 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page_types.h>
  50. #include <asm/irqflags.h>
  51. #include <asm/paravirt.h>
  52. #include <asm/ftrace.h>
  53. #include <asm/percpu.h>
  54. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  55. #include <linux/elf-em.h>
  56. #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
  57. #define __AUDIT_ARCH_64BIT 0x80000000
  58. #define __AUDIT_ARCH_LE 0x40000000
  59. .code64
  60. #ifdef CONFIG_FUNCTION_TRACER
  61. #ifdef CONFIG_DYNAMIC_FTRACE
  62. ENTRY(mcount)
  63. retq
  64. END(mcount)
  65. ENTRY(ftrace_caller)
  66. cmpl $0, function_trace_stop
  67. jne ftrace_stub
  68. MCOUNT_SAVE_FRAME
  69. movq 0x38(%rsp), %rdi
  70. movq 8(%rbp), %rsi
  71. subq $MCOUNT_INSN_SIZE, %rdi
  72. GLOBAL(ftrace_call)
  73. call ftrace_stub
  74. MCOUNT_RESTORE_FRAME
  75. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  76. GLOBAL(ftrace_graph_call)
  77. jmp ftrace_stub
  78. #endif
  79. GLOBAL(ftrace_stub)
  80. retq
  81. END(ftrace_caller)
  82. #else /* ! CONFIG_DYNAMIC_FTRACE */
  83. ENTRY(mcount)
  84. cmpl $0, function_trace_stop
  85. jne ftrace_stub
  86. cmpq $ftrace_stub, ftrace_trace_function
  87. jnz trace
  88. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  89. cmpq $ftrace_stub, ftrace_graph_return
  90. jnz ftrace_graph_caller
  91. cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
  92. jnz ftrace_graph_caller
  93. #endif
  94. GLOBAL(ftrace_stub)
  95. retq
  96. trace:
  97. MCOUNT_SAVE_FRAME
  98. movq 0x38(%rsp), %rdi
  99. movq 8(%rbp), %rsi
  100. subq $MCOUNT_INSN_SIZE, %rdi
  101. call *ftrace_trace_function
  102. MCOUNT_RESTORE_FRAME
  103. jmp ftrace_stub
  104. END(mcount)
  105. #endif /* CONFIG_DYNAMIC_FTRACE */
  106. #endif /* CONFIG_FUNCTION_TRACER */
  107. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  108. ENTRY(ftrace_graph_caller)
  109. cmpl $0, function_trace_stop
  110. jne ftrace_stub
  111. MCOUNT_SAVE_FRAME
  112. leaq 8(%rbp), %rdi
  113. movq 0x38(%rsp), %rsi
  114. movq (%rbp), %rdx
  115. subq $MCOUNT_INSN_SIZE, %rsi
  116. call prepare_ftrace_return
  117. MCOUNT_RESTORE_FRAME
  118. retq
  119. END(ftrace_graph_caller)
  120. GLOBAL(return_to_handler)
  121. subq $24, %rsp
  122. /* Save the return values */
  123. movq %rax, (%rsp)
  124. movq %rdx, 8(%rsp)
  125. movq %rbp, %rdi
  126. call ftrace_return_to_handler
  127. movq %rax, 16(%rsp)
  128. movq 8(%rsp), %rdx
  129. movq (%rsp), %rax
  130. addq $16, %rsp
  131. retq
  132. #endif
  133. #ifndef CONFIG_PREEMPT
  134. #define retint_kernel retint_restore_args
  135. #endif
  136. #ifdef CONFIG_PARAVIRT
  137. ENTRY(native_usergs_sysret64)
  138. swapgs
  139. sysretq
  140. ENDPROC(native_usergs_sysret64)
  141. #endif /* CONFIG_PARAVIRT */
  142. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  143. #ifdef CONFIG_TRACE_IRQFLAGS
  144. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  145. jnc 1f
  146. TRACE_IRQS_ON
  147. 1:
  148. #endif
  149. .endm
  150. /*
  151. * C code is not supposed to know about undefined top of stack. Every time
  152. * a C function with an pt_regs argument is called from the SYSCALL based
  153. * fast path FIXUP_TOP_OF_STACK is needed.
  154. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  155. * manipulation.
  156. */
  157. /* %rsp:at FRAMEEND */
  158. .macro FIXUP_TOP_OF_STACK tmp offset=0
  159. movq PER_CPU_VAR(old_rsp),\tmp
  160. movq \tmp,RSP+\offset(%rsp)
  161. movq $__USER_DS,SS+\offset(%rsp)
  162. movq $__USER_CS,CS+\offset(%rsp)
  163. movq $-1,RCX+\offset(%rsp)
  164. movq R11+\offset(%rsp),\tmp /* get eflags */
  165. movq \tmp,EFLAGS+\offset(%rsp)
  166. .endm
  167. .macro RESTORE_TOP_OF_STACK tmp offset=0
  168. movq RSP+\offset(%rsp),\tmp
  169. movq \tmp,PER_CPU_VAR(old_rsp)
  170. movq EFLAGS+\offset(%rsp),\tmp
  171. movq \tmp,R11+\offset(%rsp)
  172. .endm
  173. .macro FAKE_STACK_FRAME child_rip
  174. /* push in order ss, rsp, eflags, cs, rip */
  175. xorl %eax, %eax
  176. pushq $__KERNEL_DS /* ss */
  177. CFI_ADJUST_CFA_OFFSET 8
  178. /*CFI_REL_OFFSET ss,0*/
  179. pushq %rax /* rsp */
  180. CFI_ADJUST_CFA_OFFSET 8
  181. CFI_REL_OFFSET rsp,0
  182. pushq $X86_EFLAGS_IF /* eflags - interrupts on */
  183. CFI_ADJUST_CFA_OFFSET 8
  184. /*CFI_REL_OFFSET rflags,0*/
  185. pushq $__KERNEL_CS /* cs */
  186. CFI_ADJUST_CFA_OFFSET 8
  187. /*CFI_REL_OFFSET cs,0*/
  188. pushq \child_rip /* rip */
  189. CFI_ADJUST_CFA_OFFSET 8
  190. CFI_REL_OFFSET rip,0
  191. pushq %rax /* orig rax */
  192. CFI_ADJUST_CFA_OFFSET 8
  193. .endm
  194. .macro UNFAKE_STACK_FRAME
  195. addq $8*6, %rsp
  196. CFI_ADJUST_CFA_OFFSET -(6*8)
  197. .endm
  198. /*
  199. * initial frame state for interrupts (and exceptions without error code)
  200. */
  201. .macro EMPTY_FRAME start=1 offset=0
  202. .if \start
  203. CFI_STARTPROC simple
  204. CFI_SIGNAL_FRAME
  205. CFI_DEF_CFA rsp,8+\offset
  206. .else
  207. CFI_DEF_CFA_OFFSET 8+\offset
  208. .endif
  209. .endm
  210. /*
  211. * initial frame state for interrupts (and exceptions without error code)
  212. */
  213. .macro INTR_FRAME start=1 offset=0
  214. EMPTY_FRAME \start, SS+8+\offset-RIP
  215. /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
  216. CFI_REL_OFFSET rsp, RSP+\offset-RIP
  217. /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
  218. /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
  219. CFI_REL_OFFSET rip, RIP+\offset-RIP
  220. .endm
  221. /*
  222. * initial frame state for exceptions with error code (and interrupts
  223. * with vector already pushed)
  224. */
  225. .macro XCPT_FRAME start=1 offset=0
  226. INTR_FRAME \start, RIP+\offset-ORIG_RAX
  227. /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
  228. .endm
  229. /*
  230. * frame that enables calling into C.
  231. */
  232. .macro PARTIAL_FRAME start=1 offset=0
  233. XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
  234. CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
  235. CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
  236. CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
  237. CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
  238. CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
  239. CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
  240. CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
  241. CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
  242. CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
  243. .endm
  244. /*
  245. * frame that enables passing a complete pt_regs to a C function.
  246. */
  247. .macro DEFAULT_FRAME start=1 offset=0
  248. PARTIAL_FRAME \start, R11+\offset-R15
  249. CFI_REL_OFFSET rbx, RBX+\offset
  250. CFI_REL_OFFSET rbp, RBP+\offset
  251. CFI_REL_OFFSET r12, R12+\offset
  252. CFI_REL_OFFSET r13, R13+\offset
  253. CFI_REL_OFFSET r14, R14+\offset
  254. CFI_REL_OFFSET r15, R15+\offset
  255. .endm
  256. /* save partial stack frame */
  257. ENTRY(save_args)
  258. XCPT_FRAME
  259. cld
  260. movq_cfi rdi, RDI+16-ARGOFFSET
  261. movq_cfi rsi, RSI+16-ARGOFFSET
  262. movq_cfi rdx, RDX+16-ARGOFFSET
  263. movq_cfi rcx, RCX+16-ARGOFFSET
  264. movq_cfi rax, RAX+16-ARGOFFSET
  265. movq_cfi r8, R8+16-ARGOFFSET
  266. movq_cfi r9, R9+16-ARGOFFSET
  267. movq_cfi r10, R10+16-ARGOFFSET
  268. movq_cfi r11, R11+16-ARGOFFSET
  269. leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
  270. movq_cfi rbp, 8 /* push %rbp */
  271. leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
  272. testl $3, CS(%rdi)
  273. je 1f
  274. SWAPGS
  275. /*
  276. * irq_count is used to check if a CPU is already on an interrupt stack
  277. * or not. While this is essentially redundant with preempt_count it is
  278. * a little cheaper to use a separate counter in the PDA (short of
  279. * moving irq_enter into assembly, which would be too much work)
  280. */
  281. 1: incl PER_CPU_VAR(irq_count)
  282. jne 2f
  283. popq_cfi %rax /* move return address... */
  284. mov PER_CPU_VAR(irq_stack_ptr),%rsp
  285. EMPTY_FRAME 0
  286. pushq_cfi %rbp /* backlink for unwinder */
  287. pushq_cfi %rax /* ... to the new stack */
  288. /*
  289. * We entered an interrupt context - irqs are off:
  290. */
  291. 2: TRACE_IRQS_OFF
  292. ret
  293. CFI_ENDPROC
  294. END(save_args)
  295. ENTRY(save_rest)
  296. PARTIAL_FRAME 1 REST_SKIP+8
  297. movq 5*8+16(%rsp), %r11 /* save return address */
  298. movq_cfi rbx, RBX+16
  299. movq_cfi rbp, RBP+16
  300. movq_cfi r12, R12+16
  301. movq_cfi r13, R13+16
  302. movq_cfi r14, R14+16
  303. movq_cfi r15, R15+16
  304. movq %r11, 8(%rsp) /* return address */
  305. FIXUP_TOP_OF_STACK %r11, 16
  306. ret
  307. CFI_ENDPROC
  308. END(save_rest)
  309. /* save complete stack frame */
  310. .pushsection .kprobes.text, "ax"
  311. ENTRY(save_paranoid)
  312. XCPT_FRAME 1 RDI+8
  313. cld
  314. movq_cfi rdi, RDI+8
  315. movq_cfi rsi, RSI+8
  316. movq_cfi rdx, RDX+8
  317. movq_cfi rcx, RCX+8
  318. movq_cfi rax, RAX+8
  319. movq_cfi r8, R8+8
  320. movq_cfi r9, R9+8
  321. movq_cfi r10, R10+8
  322. movq_cfi r11, R11+8
  323. movq_cfi rbx, RBX+8
  324. movq_cfi rbp, RBP+8
  325. movq_cfi r12, R12+8
  326. movq_cfi r13, R13+8
  327. movq_cfi r14, R14+8
  328. movq_cfi r15, R15+8
  329. movl $1,%ebx
  330. movl $MSR_GS_BASE,%ecx
  331. rdmsr
  332. testl %edx,%edx
  333. js 1f /* negative -> in kernel */
  334. SWAPGS
  335. xorl %ebx,%ebx
  336. 1: ret
  337. CFI_ENDPROC
  338. END(save_paranoid)
  339. .popsection
  340. /*
  341. * A newly forked process directly context switches into this address.
  342. *
  343. * rdi: prev task we switched from
  344. */
  345. ENTRY(ret_from_fork)
  346. DEFAULT_FRAME
  347. LOCK ; btr $TIF_FORK,TI_flags(%r8)
  348. push kernel_eflags(%rip)
  349. CFI_ADJUST_CFA_OFFSET 8
  350. popf # reset kernel eflags
  351. CFI_ADJUST_CFA_OFFSET -8
  352. call schedule_tail # rdi: 'prev' task parameter
  353. GET_THREAD_INFO(%rcx)
  354. RESTORE_REST
  355. testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
  356. je int_ret_from_sys_call
  357. testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
  358. jnz int_ret_from_sys_call
  359. RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
  360. jmp ret_from_sys_call # go to the SYSRET fastpath
  361. CFI_ENDPROC
  362. END(ret_from_fork)
  363. /*
  364. * System call entry. Upto 6 arguments in registers are supported.
  365. *
  366. * SYSCALL does not save anything on the stack and does not change the
  367. * stack pointer.
  368. */
  369. /*
  370. * Register setup:
  371. * rax system call number
  372. * rdi arg0
  373. * rcx return address for syscall/sysret, C arg3
  374. * rsi arg1
  375. * rdx arg2
  376. * r10 arg3 (--> moved to rcx for C)
  377. * r8 arg4
  378. * r9 arg5
  379. * r11 eflags for syscall/sysret, temporary for C
  380. * r12-r15,rbp,rbx saved by C code, not touched.
  381. *
  382. * Interrupts are off on entry.
  383. * Only called from user space.
  384. *
  385. * XXX if we had a free scratch register we could save the RSP into the stack frame
  386. * and report it properly in ps. Unfortunately we haven't.
  387. *
  388. * When user can change the frames always force IRET. That is because
  389. * it deals with uncanonical addresses better. SYSRET has trouble
  390. * with them due to bugs in both AMD and Intel CPUs.
  391. */
  392. ENTRY(system_call)
  393. CFI_STARTPROC simple
  394. CFI_SIGNAL_FRAME
  395. CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
  396. CFI_REGISTER rip,rcx
  397. /*CFI_REGISTER rflags,r11*/
  398. SWAPGS_UNSAFE_STACK
  399. /*
  400. * A hypervisor implementation might want to use a label
  401. * after the swapgs, so that it can do the swapgs
  402. * for the guest and jump here on syscall.
  403. */
  404. ENTRY(system_call_after_swapgs)
  405. movq %rsp,PER_CPU_VAR(old_rsp)
  406. movq PER_CPU_VAR(kernel_stack),%rsp
  407. /*
  408. * No need to follow this irqs off/on section - it's straight
  409. * and short:
  410. */
  411. ENABLE_INTERRUPTS(CLBR_NONE)
  412. SAVE_ARGS 8,1
  413. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  414. movq %rcx,RIP-ARGOFFSET(%rsp)
  415. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  416. GET_THREAD_INFO(%rcx)
  417. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
  418. jnz tracesys
  419. system_call_fastpath:
  420. cmpq $__NR_syscall_max,%rax
  421. ja badsys
  422. movq %r10,%rcx
  423. call *sys_call_table(,%rax,8) # XXX: rip relative
  424. movq %rax,RAX-ARGOFFSET(%rsp)
  425. /*
  426. * Syscall return path ending with SYSRET (fast path)
  427. * Has incomplete stack frame and undefined top of stack.
  428. */
  429. ret_from_sys_call:
  430. movl $_TIF_ALLWORK_MASK,%edi
  431. /* edi: flagmask */
  432. sysret_check:
  433. LOCKDEP_SYS_EXIT
  434. GET_THREAD_INFO(%rcx)
  435. DISABLE_INTERRUPTS(CLBR_NONE)
  436. TRACE_IRQS_OFF
  437. movl TI_flags(%rcx),%edx
  438. andl %edi,%edx
  439. jnz sysret_careful
  440. CFI_REMEMBER_STATE
  441. /*
  442. * sysretq will re-enable interrupts:
  443. */
  444. TRACE_IRQS_ON
  445. movq RIP-ARGOFFSET(%rsp),%rcx
  446. CFI_REGISTER rip,rcx
  447. RESTORE_ARGS 0,-ARG_SKIP,1
  448. /*CFI_REGISTER rflags,r11*/
  449. movq PER_CPU_VAR(old_rsp), %rsp
  450. USERGS_SYSRET64
  451. CFI_RESTORE_STATE
  452. /* Handle reschedules */
  453. /* edx: work, edi: workmask */
  454. sysret_careful:
  455. bt $TIF_NEED_RESCHED,%edx
  456. jnc sysret_signal
  457. TRACE_IRQS_ON
  458. ENABLE_INTERRUPTS(CLBR_NONE)
  459. pushq %rdi
  460. CFI_ADJUST_CFA_OFFSET 8
  461. call schedule
  462. popq %rdi
  463. CFI_ADJUST_CFA_OFFSET -8
  464. jmp sysret_check
  465. /* Handle a signal */
  466. sysret_signal:
  467. TRACE_IRQS_ON
  468. ENABLE_INTERRUPTS(CLBR_NONE)
  469. #ifdef CONFIG_AUDITSYSCALL
  470. bt $TIF_SYSCALL_AUDIT,%edx
  471. jc sysret_audit
  472. #endif
  473. /* edx: work flags (arg3) */
  474. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  475. xorl %esi,%esi # oldset -> arg2
  476. SAVE_REST
  477. FIXUP_TOP_OF_STACK %r11
  478. call do_notify_resume
  479. RESTORE_TOP_OF_STACK %r11
  480. RESTORE_REST
  481. movl $_TIF_WORK_MASK,%edi
  482. /* Use IRET because user could have changed frame. This
  483. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  484. DISABLE_INTERRUPTS(CLBR_NONE)
  485. TRACE_IRQS_OFF
  486. jmp int_with_check
  487. badsys:
  488. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  489. jmp ret_from_sys_call
  490. #ifdef CONFIG_AUDITSYSCALL
  491. /*
  492. * Fast path for syscall audit without full syscall trace.
  493. * We just call audit_syscall_entry() directly, and then
  494. * jump back to the normal fast path.
  495. */
  496. auditsys:
  497. movq %r10,%r9 /* 6th arg: 4th syscall arg */
  498. movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
  499. movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
  500. movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
  501. movq %rax,%rsi /* 2nd arg: syscall number */
  502. movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
  503. call audit_syscall_entry
  504. LOAD_ARGS 0 /* reload call-clobbered registers */
  505. jmp system_call_fastpath
  506. /*
  507. * Return fast path for syscall audit. Call audit_syscall_exit()
  508. * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
  509. * masked off.
  510. */
  511. sysret_audit:
  512. movq %rax,%rsi /* second arg, syscall return value */
  513. cmpq $0,%rax /* is it < 0? */
  514. setl %al /* 1 if so, 0 if not */
  515. movzbl %al,%edi /* zero-extend that into %edi */
  516. inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
  517. call audit_syscall_exit
  518. movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
  519. jmp sysret_check
  520. #endif /* CONFIG_AUDITSYSCALL */
  521. /* Do syscall tracing */
  522. tracesys:
  523. #ifdef CONFIG_AUDITSYSCALL
  524. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
  525. jz auditsys
  526. #endif
  527. SAVE_REST
  528. movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
  529. FIXUP_TOP_OF_STACK %rdi
  530. movq %rsp,%rdi
  531. call syscall_trace_enter
  532. /*
  533. * Reload arg registers from stack in case ptrace changed them.
  534. * We don't reload %rax because syscall_trace_enter() returned
  535. * the value it wants us to use in the table lookup.
  536. */
  537. LOAD_ARGS ARGOFFSET, 1
  538. RESTORE_REST
  539. cmpq $__NR_syscall_max,%rax
  540. ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
  541. movq %r10,%rcx /* fixup for C */
  542. call *sys_call_table(,%rax,8)
  543. movq %rax,RAX-ARGOFFSET(%rsp)
  544. /* Use IRET because user could have changed frame */
  545. /*
  546. * Syscall return path ending with IRET.
  547. * Has correct top of stack, but partial stack frame.
  548. */
  549. GLOBAL(int_ret_from_sys_call)
  550. DISABLE_INTERRUPTS(CLBR_NONE)
  551. TRACE_IRQS_OFF
  552. testl $3,CS-ARGOFFSET(%rsp)
  553. je retint_restore_args
  554. movl $_TIF_ALLWORK_MASK,%edi
  555. /* edi: mask to check */
  556. GLOBAL(int_with_check)
  557. LOCKDEP_SYS_EXIT_IRQ
  558. GET_THREAD_INFO(%rcx)
  559. movl TI_flags(%rcx),%edx
  560. andl %edi,%edx
  561. jnz int_careful
  562. andl $~TS_COMPAT,TI_status(%rcx)
  563. jmp retint_swapgs
  564. /* Either reschedule or signal or syscall exit tracking needed. */
  565. /* First do a reschedule test. */
  566. /* edx: work, edi: workmask */
  567. int_careful:
  568. bt $TIF_NEED_RESCHED,%edx
  569. jnc int_very_careful
  570. TRACE_IRQS_ON
  571. ENABLE_INTERRUPTS(CLBR_NONE)
  572. pushq %rdi
  573. CFI_ADJUST_CFA_OFFSET 8
  574. call schedule
  575. popq %rdi
  576. CFI_ADJUST_CFA_OFFSET -8
  577. DISABLE_INTERRUPTS(CLBR_NONE)
  578. TRACE_IRQS_OFF
  579. jmp int_with_check
  580. /* handle signals and tracing -- both require a full stack frame */
  581. int_very_careful:
  582. TRACE_IRQS_ON
  583. ENABLE_INTERRUPTS(CLBR_NONE)
  584. SAVE_REST
  585. /* Check for syscall exit trace */
  586. testl $_TIF_WORK_SYSCALL_EXIT,%edx
  587. jz int_signal
  588. pushq %rdi
  589. CFI_ADJUST_CFA_OFFSET 8
  590. leaq 8(%rsp),%rdi # &ptregs -> arg1
  591. call syscall_trace_leave
  592. popq %rdi
  593. CFI_ADJUST_CFA_OFFSET -8
  594. andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
  595. jmp int_restore_rest
  596. int_signal:
  597. testl $_TIF_DO_NOTIFY_MASK,%edx
  598. jz 1f
  599. movq %rsp,%rdi # &ptregs -> arg1
  600. xorl %esi,%esi # oldset -> arg2
  601. call do_notify_resume
  602. 1: movl $_TIF_WORK_MASK,%edi
  603. int_restore_rest:
  604. RESTORE_REST
  605. DISABLE_INTERRUPTS(CLBR_NONE)
  606. TRACE_IRQS_OFF
  607. jmp int_with_check
  608. CFI_ENDPROC
  609. END(system_call)
  610. /*
  611. * Certain special system calls that need to save a complete full stack frame.
  612. */
  613. .macro PTREGSCALL label,func,arg
  614. ENTRY(\label)
  615. PARTIAL_FRAME 1 8 /* offset 8: return address */
  616. subq $REST_SKIP, %rsp
  617. CFI_ADJUST_CFA_OFFSET REST_SKIP
  618. call save_rest
  619. DEFAULT_FRAME 0 8 /* offset 8: return address */
  620. leaq 8(%rsp), \arg /* pt_regs pointer */
  621. call \func
  622. jmp ptregscall_common
  623. CFI_ENDPROC
  624. END(\label)
  625. .endm
  626. PTREGSCALL stub_clone, sys_clone, %r8
  627. PTREGSCALL stub_fork, sys_fork, %rdi
  628. PTREGSCALL stub_vfork, sys_vfork, %rdi
  629. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  630. PTREGSCALL stub_iopl, sys_iopl, %rsi
  631. ENTRY(ptregscall_common)
  632. DEFAULT_FRAME 1 8 /* offset 8: return address */
  633. RESTORE_TOP_OF_STACK %r11, 8
  634. movq_cfi_restore R15+8, r15
  635. movq_cfi_restore R14+8, r14
  636. movq_cfi_restore R13+8, r13
  637. movq_cfi_restore R12+8, r12
  638. movq_cfi_restore RBP+8, rbp
  639. movq_cfi_restore RBX+8, rbx
  640. ret $REST_SKIP /* pop extended registers */
  641. CFI_ENDPROC
  642. END(ptregscall_common)
  643. ENTRY(stub_execve)
  644. CFI_STARTPROC
  645. popq %r11
  646. CFI_ADJUST_CFA_OFFSET -8
  647. CFI_REGISTER rip, r11
  648. SAVE_REST
  649. FIXUP_TOP_OF_STACK %r11
  650. movq %rsp, %rcx
  651. call sys_execve
  652. RESTORE_TOP_OF_STACK %r11
  653. movq %rax,RAX(%rsp)
  654. RESTORE_REST
  655. jmp int_ret_from_sys_call
  656. CFI_ENDPROC
  657. END(stub_execve)
  658. /*
  659. * sigreturn is special because it needs to restore all registers on return.
  660. * This cannot be done with SYSRET, so use the IRET return path instead.
  661. */
  662. ENTRY(stub_rt_sigreturn)
  663. CFI_STARTPROC
  664. addq $8, %rsp
  665. CFI_ADJUST_CFA_OFFSET -8
  666. SAVE_REST
  667. movq %rsp,%rdi
  668. FIXUP_TOP_OF_STACK %r11
  669. call sys_rt_sigreturn
  670. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  671. RESTORE_REST
  672. jmp int_ret_from_sys_call
  673. CFI_ENDPROC
  674. END(stub_rt_sigreturn)
  675. /*
  676. * Build the entry stubs and pointer table with some assembler magic.
  677. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  678. * single cache line on all modern x86 implementations.
  679. */
  680. .section .init.rodata,"a"
  681. ENTRY(interrupt)
  682. .text
  683. .p2align 5
  684. .p2align CONFIG_X86_L1_CACHE_SHIFT
  685. ENTRY(irq_entries_start)
  686. INTR_FRAME
  687. vector=FIRST_EXTERNAL_VECTOR
  688. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  689. .balign 32
  690. .rept 7
  691. .if vector < NR_VECTORS
  692. .if vector <> FIRST_EXTERNAL_VECTOR
  693. CFI_ADJUST_CFA_OFFSET -8
  694. .endif
  695. 1: pushq $(~vector+0x80) /* Note: always in signed byte range */
  696. CFI_ADJUST_CFA_OFFSET 8
  697. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  698. jmp 2f
  699. .endif
  700. .previous
  701. .quad 1b
  702. .text
  703. vector=vector+1
  704. .endif
  705. .endr
  706. 2: jmp common_interrupt
  707. .endr
  708. CFI_ENDPROC
  709. END(irq_entries_start)
  710. .previous
  711. END(interrupt)
  712. .previous
  713. /*
  714. * Interrupt entry/exit.
  715. *
  716. * Interrupt entry points save only callee clobbered registers in fast path.
  717. *
  718. * Entry runs with interrupts off.
  719. */
  720. /* 0(%rsp): ~(interrupt number) */
  721. .macro interrupt func
  722. subq $10*8, %rsp
  723. CFI_ADJUST_CFA_OFFSET 10*8
  724. call save_args
  725. PARTIAL_FRAME 0
  726. call \func
  727. .endm
  728. /*
  729. * The interrupt stubs push (~vector+0x80) onto the stack and
  730. * then jump to common_interrupt.
  731. */
  732. .p2align CONFIG_X86_L1_CACHE_SHIFT
  733. common_interrupt:
  734. XCPT_FRAME
  735. addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
  736. interrupt do_IRQ
  737. /* 0(%rsp): old_rsp-ARGOFFSET */
  738. ret_from_intr:
  739. DISABLE_INTERRUPTS(CLBR_NONE)
  740. TRACE_IRQS_OFF
  741. decl PER_CPU_VAR(irq_count)
  742. leaveq
  743. CFI_DEF_CFA_REGISTER rsp
  744. CFI_ADJUST_CFA_OFFSET -8
  745. exit_intr:
  746. GET_THREAD_INFO(%rcx)
  747. testl $3,CS-ARGOFFSET(%rsp)
  748. je retint_kernel
  749. /* Interrupt came from user space */
  750. /*
  751. * Has a correct top of stack, but a partial stack frame
  752. * %rcx: thread info. Interrupts off.
  753. */
  754. retint_with_reschedule:
  755. movl $_TIF_WORK_MASK,%edi
  756. retint_check:
  757. LOCKDEP_SYS_EXIT_IRQ
  758. movl TI_flags(%rcx),%edx
  759. andl %edi,%edx
  760. CFI_REMEMBER_STATE
  761. jnz retint_careful
  762. retint_swapgs: /* return to user-space */
  763. /*
  764. * The iretq could re-enable interrupts:
  765. */
  766. DISABLE_INTERRUPTS(CLBR_ANY)
  767. TRACE_IRQS_IRETQ
  768. SWAPGS
  769. jmp restore_args
  770. retint_restore_args: /* return to kernel space */
  771. DISABLE_INTERRUPTS(CLBR_ANY)
  772. /*
  773. * The iretq could re-enable interrupts:
  774. */
  775. TRACE_IRQS_IRETQ
  776. restore_args:
  777. RESTORE_ARGS 0,8,0
  778. irq_return:
  779. INTERRUPT_RETURN
  780. .section __ex_table, "a"
  781. .quad irq_return, bad_iret
  782. .previous
  783. #ifdef CONFIG_PARAVIRT
  784. ENTRY(native_iret)
  785. iretq
  786. .section __ex_table,"a"
  787. .quad native_iret, bad_iret
  788. .previous
  789. #endif
  790. .section .fixup,"ax"
  791. bad_iret:
  792. /*
  793. * The iret traps when the %cs or %ss being restored is bogus.
  794. * We've lost the original trap vector and error code.
  795. * #GPF is the most likely one to get for an invalid selector.
  796. * So pretend we completed the iret and took the #GPF in user mode.
  797. *
  798. * We are now running with the kernel GS after exception recovery.
  799. * But error_entry expects us to have user GS to match the user %cs,
  800. * so swap back.
  801. */
  802. pushq $0
  803. SWAPGS
  804. jmp general_protection
  805. .previous
  806. /* edi: workmask, edx: work */
  807. retint_careful:
  808. CFI_RESTORE_STATE
  809. bt $TIF_NEED_RESCHED,%edx
  810. jnc retint_signal
  811. TRACE_IRQS_ON
  812. ENABLE_INTERRUPTS(CLBR_NONE)
  813. pushq %rdi
  814. CFI_ADJUST_CFA_OFFSET 8
  815. call schedule
  816. popq %rdi
  817. CFI_ADJUST_CFA_OFFSET -8
  818. GET_THREAD_INFO(%rcx)
  819. DISABLE_INTERRUPTS(CLBR_NONE)
  820. TRACE_IRQS_OFF
  821. jmp retint_check
  822. retint_signal:
  823. testl $_TIF_DO_NOTIFY_MASK,%edx
  824. jz retint_swapgs
  825. TRACE_IRQS_ON
  826. ENABLE_INTERRUPTS(CLBR_NONE)
  827. SAVE_REST
  828. movq $-1,ORIG_RAX(%rsp)
  829. xorl %esi,%esi # oldset
  830. movq %rsp,%rdi # &pt_regs
  831. call do_notify_resume
  832. RESTORE_REST
  833. DISABLE_INTERRUPTS(CLBR_NONE)
  834. TRACE_IRQS_OFF
  835. GET_THREAD_INFO(%rcx)
  836. jmp retint_with_reschedule
  837. #ifdef CONFIG_PREEMPT
  838. /* Returning to kernel space. Check if we need preemption */
  839. /* rcx: threadinfo. interrupts off. */
  840. ENTRY(retint_kernel)
  841. cmpl $0,TI_preempt_count(%rcx)
  842. jnz retint_restore_args
  843. bt $TIF_NEED_RESCHED,TI_flags(%rcx)
  844. jnc retint_restore_args
  845. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  846. jnc retint_restore_args
  847. call preempt_schedule_irq
  848. jmp exit_intr
  849. #endif
  850. CFI_ENDPROC
  851. END(common_interrupt)
  852. /*
  853. * APIC interrupts.
  854. */
  855. .macro apicinterrupt num sym do_sym
  856. ENTRY(\sym)
  857. INTR_FRAME
  858. pushq $~(\num)
  859. CFI_ADJUST_CFA_OFFSET 8
  860. interrupt \do_sym
  861. jmp ret_from_intr
  862. CFI_ENDPROC
  863. END(\sym)
  864. .endm
  865. #ifdef CONFIG_SMP
  866. apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
  867. irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
  868. apicinterrupt REBOOT_VECTOR \
  869. reboot_interrupt smp_reboot_interrupt
  870. #endif
  871. #ifdef CONFIG_X86_UV
  872. apicinterrupt UV_BAU_MESSAGE \
  873. uv_bau_message_intr1 uv_bau_message_interrupt
  874. #endif
  875. apicinterrupt LOCAL_TIMER_VECTOR \
  876. apic_timer_interrupt smp_apic_timer_interrupt
  877. apicinterrupt GENERIC_INTERRUPT_VECTOR \
  878. generic_interrupt smp_generic_interrupt
  879. #ifdef CONFIG_SMP
  880. apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
  881. invalidate_interrupt0 smp_invalidate_interrupt
  882. apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \
  883. invalidate_interrupt1 smp_invalidate_interrupt
  884. apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
  885. invalidate_interrupt2 smp_invalidate_interrupt
  886. apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
  887. invalidate_interrupt3 smp_invalidate_interrupt
  888. apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
  889. invalidate_interrupt4 smp_invalidate_interrupt
  890. apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \
  891. invalidate_interrupt5 smp_invalidate_interrupt
  892. apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \
  893. invalidate_interrupt6 smp_invalidate_interrupt
  894. apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
  895. invalidate_interrupt7 smp_invalidate_interrupt
  896. #endif
  897. apicinterrupt THRESHOLD_APIC_VECTOR \
  898. threshold_interrupt smp_threshold_interrupt
  899. apicinterrupt THERMAL_APIC_VECTOR \
  900. thermal_interrupt smp_thermal_interrupt
  901. #ifdef CONFIG_X86_MCE
  902. apicinterrupt MCE_SELF_VECTOR \
  903. mce_self_interrupt smp_mce_self_interrupt
  904. #endif
  905. #ifdef CONFIG_SMP
  906. apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
  907. call_function_single_interrupt smp_call_function_single_interrupt
  908. apicinterrupt CALL_FUNCTION_VECTOR \
  909. call_function_interrupt smp_call_function_interrupt
  910. apicinterrupt RESCHEDULE_VECTOR \
  911. reschedule_interrupt smp_reschedule_interrupt
  912. #endif
  913. apicinterrupt ERROR_APIC_VECTOR \
  914. error_interrupt smp_error_interrupt
  915. apicinterrupt SPURIOUS_APIC_VECTOR \
  916. spurious_interrupt smp_spurious_interrupt
  917. #ifdef CONFIG_PERF_EVENTS
  918. apicinterrupt LOCAL_PENDING_VECTOR \
  919. perf_pending_interrupt smp_perf_pending_interrupt
  920. #endif
  921. /*
  922. * Exception entry points.
  923. */
  924. .macro zeroentry sym do_sym
  925. ENTRY(\sym)
  926. INTR_FRAME
  927. PARAVIRT_ADJUST_EXCEPTION_FRAME
  928. pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
  929. subq $15*8,%rsp
  930. CFI_ADJUST_CFA_OFFSET 15*8
  931. call error_entry
  932. DEFAULT_FRAME 0
  933. movq %rsp,%rdi /* pt_regs pointer */
  934. xorl %esi,%esi /* no error code */
  935. call \do_sym
  936. jmp error_exit /* %ebx: no swapgs flag */
  937. CFI_ENDPROC
  938. END(\sym)
  939. .endm
  940. .macro paranoidzeroentry sym do_sym
  941. ENTRY(\sym)
  942. INTR_FRAME
  943. PARAVIRT_ADJUST_EXCEPTION_FRAME
  944. pushq $-1 /* ORIG_RAX: no syscall to restart */
  945. CFI_ADJUST_CFA_OFFSET 8
  946. subq $15*8, %rsp
  947. call save_paranoid
  948. TRACE_IRQS_OFF
  949. movq %rsp,%rdi /* pt_regs pointer */
  950. xorl %esi,%esi /* no error code */
  951. call \do_sym
  952. jmp paranoid_exit /* %ebx: no swapgs flag */
  953. CFI_ENDPROC
  954. END(\sym)
  955. .endm
  956. .macro paranoidzeroentry_ist sym do_sym ist
  957. ENTRY(\sym)
  958. INTR_FRAME
  959. PARAVIRT_ADJUST_EXCEPTION_FRAME
  960. pushq $-1 /* ORIG_RAX: no syscall to restart */
  961. CFI_ADJUST_CFA_OFFSET 8
  962. subq $15*8, %rsp
  963. call save_paranoid
  964. TRACE_IRQS_OFF
  965. movq %rsp,%rdi /* pt_regs pointer */
  966. xorl %esi,%esi /* no error code */
  967. PER_CPU(init_tss, %rbp)
  968. subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
  969. call \do_sym
  970. addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
  971. jmp paranoid_exit /* %ebx: no swapgs flag */
  972. CFI_ENDPROC
  973. END(\sym)
  974. .endm
  975. .macro errorentry sym do_sym
  976. ENTRY(\sym)
  977. XCPT_FRAME
  978. PARAVIRT_ADJUST_EXCEPTION_FRAME
  979. subq $15*8,%rsp
  980. CFI_ADJUST_CFA_OFFSET 15*8
  981. call error_entry
  982. DEFAULT_FRAME 0
  983. movq %rsp,%rdi /* pt_regs pointer */
  984. movq ORIG_RAX(%rsp),%rsi /* get error code */
  985. movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
  986. call \do_sym
  987. jmp error_exit /* %ebx: no swapgs flag */
  988. CFI_ENDPROC
  989. END(\sym)
  990. .endm
  991. /* error code is on the stack already */
  992. .macro paranoiderrorentry sym do_sym
  993. ENTRY(\sym)
  994. XCPT_FRAME
  995. PARAVIRT_ADJUST_EXCEPTION_FRAME
  996. subq $15*8,%rsp
  997. CFI_ADJUST_CFA_OFFSET 15*8
  998. call save_paranoid
  999. DEFAULT_FRAME 0
  1000. TRACE_IRQS_OFF
  1001. movq %rsp,%rdi /* pt_regs pointer */
  1002. movq ORIG_RAX(%rsp),%rsi /* get error code */
  1003. movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
  1004. call \do_sym
  1005. jmp paranoid_exit /* %ebx: no swapgs flag */
  1006. CFI_ENDPROC
  1007. END(\sym)
  1008. .endm
  1009. zeroentry divide_error do_divide_error
  1010. zeroentry overflow do_overflow
  1011. zeroentry bounds do_bounds
  1012. zeroentry invalid_op do_invalid_op
  1013. zeroentry device_not_available do_device_not_available
  1014. paranoiderrorentry double_fault do_double_fault
  1015. zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
  1016. errorentry invalid_TSS do_invalid_TSS
  1017. errorentry segment_not_present do_segment_not_present
  1018. zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
  1019. zeroentry coprocessor_error do_coprocessor_error
  1020. errorentry alignment_check do_alignment_check
  1021. zeroentry simd_coprocessor_error do_simd_coprocessor_error
  1022. /* Reload gs selector with exception handling */
  1023. /* edi: new selector */
  1024. ENTRY(native_load_gs_index)
  1025. CFI_STARTPROC
  1026. pushf
  1027. CFI_ADJUST_CFA_OFFSET 8
  1028. DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
  1029. SWAPGS
  1030. gs_change:
  1031. movl %edi,%gs
  1032. 2: mfence /* workaround */
  1033. SWAPGS
  1034. popf
  1035. CFI_ADJUST_CFA_OFFSET -8
  1036. ret
  1037. CFI_ENDPROC
  1038. END(native_load_gs_index)
  1039. .section __ex_table,"a"
  1040. .align 8
  1041. .quad gs_change,bad_gs
  1042. .previous
  1043. .section .fixup,"ax"
  1044. /* running with kernelgs */
  1045. bad_gs:
  1046. SWAPGS /* switch back to user gs */
  1047. xorl %eax,%eax
  1048. movl %eax,%gs
  1049. jmp 2b
  1050. .previous
  1051. /*
  1052. * Create a kernel thread.
  1053. *
  1054. * C extern interface:
  1055. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  1056. *
  1057. * asm input arguments:
  1058. * rdi: fn, rsi: arg, rdx: flags
  1059. */
  1060. ENTRY(kernel_thread)
  1061. CFI_STARTPROC
  1062. FAKE_STACK_FRAME $child_rip
  1063. SAVE_ALL
  1064. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  1065. movq %rdx,%rdi
  1066. orq kernel_thread_flags(%rip),%rdi
  1067. movq $-1, %rsi
  1068. movq %rsp, %rdx
  1069. xorl %r8d,%r8d
  1070. xorl %r9d,%r9d
  1071. # clone now
  1072. call do_fork
  1073. movq %rax,RAX(%rsp)
  1074. xorl %edi,%edi
  1075. /*
  1076. * It isn't worth to check for reschedule here,
  1077. * so internally to the x86_64 port you can rely on kernel_thread()
  1078. * not to reschedule the child before returning, this avoids the need
  1079. * of hacks for example to fork off the per-CPU idle tasks.
  1080. * [Hopefully no generic code relies on the reschedule -AK]
  1081. */
  1082. RESTORE_ALL
  1083. UNFAKE_STACK_FRAME
  1084. ret
  1085. CFI_ENDPROC
  1086. END(kernel_thread)
  1087. ENTRY(child_rip)
  1088. pushq $0 # fake return address
  1089. CFI_STARTPROC
  1090. /*
  1091. * Here we are in the child and the registers are set as they were
  1092. * at kernel_thread() invocation in the parent.
  1093. */
  1094. movq %rdi, %rax
  1095. movq %rsi, %rdi
  1096. call *%rax
  1097. # exit
  1098. mov %eax, %edi
  1099. call do_exit
  1100. ud2 # padding for call trace
  1101. CFI_ENDPROC
  1102. END(child_rip)
  1103. /*
  1104. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  1105. *
  1106. * C extern interface:
  1107. * extern long execve(char *name, char **argv, char **envp)
  1108. *
  1109. * asm input arguments:
  1110. * rdi: name, rsi: argv, rdx: envp
  1111. *
  1112. * We want to fallback into:
  1113. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
  1114. *
  1115. * do_sys_execve asm fallback arguments:
  1116. * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
  1117. */
  1118. ENTRY(kernel_execve)
  1119. CFI_STARTPROC
  1120. FAKE_STACK_FRAME $0
  1121. SAVE_ALL
  1122. movq %rsp,%rcx
  1123. call sys_execve
  1124. movq %rax, RAX(%rsp)
  1125. RESTORE_REST
  1126. testq %rax,%rax
  1127. je int_ret_from_sys_call
  1128. RESTORE_ARGS
  1129. UNFAKE_STACK_FRAME
  1130. ret
  1131. CFI_ENDPROC
  1132. END(kernel_execve)
  1133. /* Call softirq on interrupt stack. Interrupts are off. */
  1134. ENTRY(call_softirq)
  1135. CFI_STARTPROC
  1136. push %rbp
  1137. CFI_ADJUST_CFA_OFFSET 8
  1138. CFI_REL_OFFSET rbp,0
  1139. mov %rsp,%rbp
  1140. CFI_DEF_CFA_REGISTER rbp
  1141. incl PER_CPU_VAR(irq_count)
  1142. cmove PER_CPU_VAR(irq_stack_ptr),%rsp
  1143. push %rbp # backlink for old unwinder
  1144. call __do_softirq
  1145. leaveq
  1146. CFI_DEF_CFA_REGISTER rsp
  1147. CFI_ADJUST_CFA_OFFSET -8
  1148. decl PER_CPU_VAR(irq_count)
  1149. ret
  1150. CFI_ENDPROC
  1151. END(call_softirq)
  1152. #ifdef CONFIG_XEN
  1153. zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
  1154. /*
  1155. * A note on the "critical region" in our callback handler.
  1156. * We want to avoid stacking callback handlers due to events occurring
  1157. * during handling of the last event. To do this, we keep events disabled
  1158. * until we've done all processing. HOWEVER, we must enable events before
  1159. * popping the stack frame (can't be done atomically) and so it would still
  1160. * be possible to get enough handler activations to overflow the stack.
  1161. * Although unlikely, bugs of that kind are hard to track down, so we'd
  1162. * like to avoid the possibility.
  1163. * So, on entry to the handler we detect whether we interrupted an
  1164. * existing activation in its critical region -- if so, we pop the current
  1165. * activation and restart the handler using the previous one.
  1166. */
  1167. ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
  1168. CFI_STARTPROC
  1169. /*
  1170. * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
  1171. * see the correct pointer to the pt_regs
  1172. */
  1173. movq %rdi, %rsp # we don't return, adjust the stack frame
  1174. CFI_ENDPROC
  1175. DEFAULT_FRAME
  1176. 11: incl PER_CPU_VAR(irq_count)
  1177. movq %rsp,%rbp
  1178. CFI_DEF_CFA_REGISTER rbp
  1179. cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
  1180. pushq %rbp # backlink for old unwinder
  1181. call xen_evtchn_do_upcall
  1182. popq %rsp
  1183. CFI_DEF_CFA_REGISTER rsp
  1184. decl PER_CPU_VAR(irq_count)
  1185. jmp error_exit
  1186. CFI_ENDPROC
  1187. END(do_hypervisor_callback)
  1188. /*
  1189. * Hypervisor uses this for application faults while it executes.
  1190. * We get here for two reasons:
  1191. * 1. Fault while reloading DS, ES, FS or GS
  1192. * 2. Fault while executing IRET
  1193. * Category 1 we do not need to fix up as Xen has already reloaded all segment
  1194. * registers that could be reloaded and zeroed the others.
  1195. * Category 2 we fix up by killing the current process. We cannot use the
  1196. * normal Linux return path in this case because if we use the IRET hypercall
  1197. * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  1198. * We distinguish between categories by comparing each saved segment register
  1199. * with its current contents: any discrepancy means we in category 1.
  1200. */
  1201. ENTRY(xen_failsafe_callback)
  1202. INTR_FRAME 1 (6*8)
  1203. /*CFI_REL_OFFSET gs,GS*/
  1204. /*CFI_REL_OFFSET fs,FS*/
  1205. /*CFI_REL_OFFSET es,ES*/
  1206. /*CFI_REL_OFFSET ds,DS*/
  1207. CFI_REL_OFFSET r11,8
  1208. CFI_REL_OFFSET rcx,0
  1209. movw %ds,%cx
  1210. cmpw %cx,0x10(%rsp)
  1211. CFI_REMEMBER_STATE
  1212. jne 1f
  1213. movw %es,%cx
  1214. cmpw %cx,0x18(%rsp)
  1215. jne 1f
  1216. movw %fs,%cx
  1217. cmpw %cx,0x20(%rsp)
  1218. jne 1f
  1219. movw %gs,%cx
  1220. cmpw %cx,0x28(%rsp)
  1221. jne 1f
  1222. /* All segments match their saved values => Category 2 (Bad IRET). */
  1223. movq (%rsp),%rcx
  1224. CFI_RESTORE rcx
  1225. movq 8(%rsp),%r11
  1226. CFI_RESTORE r11
  1227. addq $0x30,%rsp
  1228. CFI_ADJUST_CFA_OFFSET -0x30
  1229. pushq_cfi $0 /* RIP */
  1230. pushq_cfi %r11
  1231. pushq_cfi %rcx
  1232. jmp general_protection
  1233. CFI_RESTORE_STATE
  1234. 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
  1235. movq (%rsp),%rcx
  1236. CFI_RESTORE rcx
  1237. movq 8(%rsp),%r11
  1238. CFI_RESTORE r11
  1239. addq $0x30,%rsp
  1240. CFI_ADJUST_CFA_OFFSET -0x30
  1241. pushq_cfi $0
  1242. SAVE_ALL
  1243. jmp error_exit
  1244. CFI_ENDPROC
  1245. END(xen_failsafe_callback)
  1246. #endif /* CONFIG_XEN */
  1247. /*
  1248. * Some functions should be protected against kprobes
  1249. */
  1250. .pushsection .kprobes.text, "ax"
  1251. paranoidzeroentry_ist debug do_debug DEBUG_STACK
  1252. paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
  1253. paranoiderrorentry stack_segment do_stack_segment
  1254. #ifdef CONFIG_XEN
  1255. zeroentry xen_debug do_debug
  1256. zeroentry xen_int3 do_int3
  1257. errorentry xen_stack_segment do_stack_segment
  1258. #endif
  1259. errorentry general_protection do_general_protection
  1260. errorentry page_fault do_page_fault
  1261. #ifdef CONFIG_X86_MCE
  1262. paranoidzeroentry machine_check *machine_check_vector(%rip)
  1263. #endif
  1264. /*
  1265. * "Paranoid" exit path from exception stack.
  1266. * Paranoid because this is used by NMIs and cannot take
  1267. * any kernel state for granted.
  1268. * We don't do kernel preemption checks here, because only
  1269. * NMI should be common and it does not enable IRQs and
  1270. * cannot get reschedule ticks.
  1271. *
  1272. * "trace" is 0 for the NMI handler only, because irq-tracing
  1273. * is fundamentally NMI-unsafe. (we cannot change the soft and
  1274. * hard flags at once, atomically)
  1275. */
  1276. /* ebx: no swapgs flag */
  1277. ENTRY(paranoid_exit)
  1278. INTR_FRAME
  1279. DISABLE_INTERRUPTS(CLBR_NONE)
  1280. TRACE_IRQS_OFF
  1281. testl %ebx,%ebx /* swapgs needed? */
  1282. jnz paranoid_restore
  1283. testl $3,CS(%rsp)
  1284. jnz paranoid_userspace
  1285. paranoid_swapgs:
  1286. TRACE_IRQS_IRETQ 0
  1287. SWAPGS_UNSAFE_STACK
  1288. RESTORE_ALL 8
  1289. jmp irq_return
  1290. paranoid_restore:
  1291. TRACE_IRQS_IRETQ 0
  1292. RESTORE_ALL 8
  1293. jmp irq_return
  1294. paranoid_userspace:
  1295. GET_THREAD_INFO(%rcx)
  1296. movl TI_flags(%rcx),%ebx
  1297. andl $_TIF_WORK_MASK,%ebx
  1298. jz paranoid_swapgs
  1299. movq %rsp,%rdi /* &pt_regs */
  1300. call sync_regs
  1301. movq %rax,%rsp /* switch stack for scheduling */
  1302. testl $_TIF_NEED_RESCHED,%ebx
  1303. jnz paranoid_schedule
  1304. movl %ebx,%edx /* arg3: thread flags */
  1305. TRACE_IRQS_ON
  1306. ENABLE_INTERRUPTS(CLBR_NONE)
  1307. xorl %esi,%esi /* arg2: oldset */
  1308. movq %rsp,%rdi /* arg1: &pt_regs */
  1309. call do_notify_resume
  1310. DISABLE_INTERRUPTS(CLBR_NONE)
  1311. TRACE_IRQS_OFF
  1312. jmp paranoid_userspace
  1313. paranoid_schedule:
  1314. TRACE_IRQS_ON
  1315. ENABLE_INTERRUPTS(CLBR_ANY)
  1316. call schedule
  1317. DISABLE_INTERRUPTS(CLBR_ANY)
  1318. TRACE_IRQS_OFF
  1319. jmp paranoid_userspace
  1320. CFI_ENDPROC
  1321. END(paranoid_exit)
  1322. /*
  1323. * Exception entry point. This expects an error code/orig_rax on the stack.
  1324. * returns in "no swapgs flag" in %ebx.
  1325. */
  1326. ENTRY(error_entry)
  1327. XCPT_FRAME
  1328. CFI_ADJUST_CFA_OFFSET 15*8
  1329. /* oldrax contains error code */
  1330. cld
  1331. movq_cfi rdi, RDI+8
  1332. movq_cfi rsi, RSI+8
  1333. movq_cfi rdx, RDX+8
  1334. movq_cfi rcx, RCX+8
  1335. movq_cfi rax, RAX+8
  1336. movq_cfi r8, R8+8
  1337. movq_cfi r9, R9+8
  1338. movq_cfi r10, R10+8
  1339. movq_cfi r11, R11+8
  1340. movq_cfi rbx, RBX+8
  1341. movq_cfi rbp, RBP+8
  1342. movq_cfi r12, R12+8
  1343. movq_cfi r13, R13+8
  1344. movq_cfi r14, R14+8
  1345. movq_cfi r15, R15+8
  1346. xorl %ebx,%ebx
  1347. testl $3,CS+8(%rsp)
  1348. je error_kernelspace
  1349. error_swapgs:
  1350. SWAPGS
  1351. error_sti:
  1352. TRACE_IRQS_OFF
  1353. ret
  1354. CFI_ENDPROC
  1355. /*
  1356. * There are two places in the kernel that can potentially fault with
  1357. * usergs. Handle them here. The exception handlers after iret run with
  1358. * kernel gs again, so don't set the user space flag. B stepping K8s
  1359. * sometimes report an truncated RIP for IRET exceptions returning to
  1360. * compat mode. Check for these here too.
  1361. */
  1362. error_kernelspace:
  1363. incl %ebx
  1364. leaq irq_return(%rip),%rcx
  1365. cmpq %rcx,RIP+8(%rsp)
  1366. je error_swapgs
  1367. movl %ecx,%ecx /* zero extend */
  1368. cmpq %rcx,RIP+8(%rsp)
  1369. je error_swapgs
  1370. cmpq $gs_change,RIP+8(%rsp)
  1371. je error_swapgs
  1372. jmp error_sti
  1373. END(error_entry)
  1374. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  1375. ENTRY(error_exit)
  1376. DEFAULT_FRAME
  1377. movl %ebx,%eax
  1378. RESTORE_REST
  1379. DISABLE_INTERRUPTS(CLBR_NONE)
  1380. TRACE_IRQS_OFF
  1381. GET_THREAD_INFO(%rcx)
  1382. testl %eax,%eax
  1383. jne retint_kernel
  1384. LOCKDEP_SYS_EXIT_IRQ
  1385. movl TI_flags(%rcx),%edx
  1386. movl $_TIF_WORK_MASK,%edi
  1387. andl %edi,%edx
  1388. jnz retint_careful
  1389. jmp retint_swapgs
  1390. CFI_ENDPROC
  1391. END(error_exit)
  1392. /* runs on exception stack */
  1393. ENTRY(nmi)
  1394. INTR_FRAME
  1395. PARAVIRT_ADJUST_EXCEPTION_FRAME
  1396. pushq_cfi $-1
  1397. subq $15*8, %rsp
  1398. CFI_ADJUST_CFA_OFFSET 15*8
  1399. call save_paranoid
  1400. DEFAULT_FRAME 0
  1401. /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
  1402. movq %rsp,%rdi
  1403. movq $-1,%rsi
  1404. call do_nmi
  1405. #ifdef CONFIG_TRACE_IRQFLAGS
  1406. /* paranoidexit; without TRACE_IRQS_OFF */
  1407. /* ebx: no swapgs flag */
  1408. DISABLE_INTERRUPTS(CLBR_NONE)
  1409. testl %ebx,%ebx /* swapgs needed? */
  1410. jnz nmi_restore
  1411. testl $3,CS(%rsp)
  1412. jnz nmi_userspace
  1413. nmi_swapgs:
  1414. SWAPGS_UNSAFE_STACK
  1415. nmi_restore:
  1416. RESTORE_ALL 8
  1417. jmp irq_return
  1418. nmi_userspace:
  1419. GET_THREAD_INFO(%rcx)
  1420. movl TI_flags(%rcx),%ebx
  1421. andl $_TIF_WORK_MASK,%ebx
  1422. jz nmi_swapgs
  1423. movq %rsp,%rdi /* &pt_regs */
  1424. call sync_regs
  1425. movq %rax,%rsp /* switch stack for scheduling */
  1426. testl $_TIF_NEED_RESCHED,%ebx
  1427. jnz nmi_schedule
  1428. movl %ebx,%edx /* arg3: thread flags */
  1429. ENABLE_INTERRUPTS(CLBR_NONE)
  1430. xorl %esi,%esi /* arg2: oldset */
  1431. movq %rsp,%rdi /* arg1: &pt_regs */
  1432. call do_notify_resume
  1433. DISABLE_INTERRUPTS(CLBR_NONE)
  1434. jmp nmi_userspace
  1435. nmi_schedule:
  1436. ENABLE_INTERRUPTS(CLBR_ANY)
  1437. call schedule
  1438. DISABLE_INTERRUPTS(CLBR_ANY)
  1439. jmp nmi_userspace
  1440. CFI_ENDPROC
  1441. #else
  1442. jmp paranoid_exit
  1443. CFI_ENDPROC
  1444. #endif
  1445. END(nmi)
  1446. ENTRY(ignore_sysret)
  1447. CFI_STARTPROC
  1448. mov $-ENOSYS,%eax
  1449. sysret
  1450. CFI_ENDPROC
  1451. END(ignore_sysret)
  1452. /*
  1453. * End of kprobes section
  1454. */
  1455. .popsection