entry_64.S 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. #include <asm/paravirt.h>
  52. #include <asm/ftrace.h>
  53. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  54. #include <linux/elf-em.h>
  55. #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
  56. #define __AUDIT_ARCH_64BIT 0x80000000
  57. #define __AUDIT_ARCH_LE 0x40000000
  58. .code64
  59. /*
  60. * Some macro's to hide the most frequently occuring CFI annotations.
  61. */
  62. .macro pushq_cfi reg
  63. pushq \reg
  64. CFI_ADJUST_CFA_OFFSET 8
  65. .endm
  66. .macro popq_cfi reg
  67. popq \reg
  68. CFI_ADJUST_CFA_OFFSET -8
  69. .endm
  70. .macro movq_cfi reg offset=0
  71. movq %\reg, \offset(%rsp)
  72. CFI_REL_OFFSET \reg, \offset
  73. .endm
  74. .macro movq_cfi_restore offset reg
  75. movq \offset(%rsp), %\reg
  76. CFI_RESTORE \reg
  77. .endm
  78. #ifdef CONFIG_FUNCTION_TRACER
  79. #ifdef CONFIG_DYNAMIC_FTRACE
  80. ENTRY(mcount)
  81. retq
  82. END(mcount)
  83. ENTRY(ftrace_caller)
  84. /* taken from glibc */
  85. subq $0x38, %rsp
  86. movq %rax, (%rsp)
  87. movq %rcx, 8(%rsp)
  88. movq %rdx, 16(%rsp)
  89. movq %rsi, 24(%rsp)
  90. movq %rdi, 32(%rsp)
  91. movq %r8, 40(%rsp)
  92. movq %r9, 48(%rsp)
  93. movq 0x38(%rsp), %rdi
  94. movq 8(%rbp), %rsi
  95. subq $MCOUNT_INSN_SIZE, %rdi
  96. .globl ftrace_call
  97. ftrace_call:
  98. call ftrace_stub
  99. movq 48(%rsp), %r9
  100. movq 40(%rsp), %r8
  101. movq 32(%rsp), %rdi
  102. movq 24(%rsp), %rsi
  103. movq 16(%rsp), %rdx
  104. movq 8(%rsp), %rcx
  105. movq (%rsp), %rax
  106. addq $0x38, %rsp
  107. .globl ftrace_stub
  108. ftrace_stub:
  109. retq
  110. END(ftrace_caller)
  111. #else /* ! CONFIG_DYNAMIC_FTRACE */
  112. ENTRY(mcount)
  113. cmpq $ftrace_stub, ftrace_trace_function
  114. jnz trace
  115. .globl ftrace_stub
  116. ftrace_stub:
  117. retq
  118. trace:
  119. /* taken from glibc */
  120. subq $0x38, %rsp
  121. movq %rax, (%rsp)
  122. movq %rcx, 8(%rsp)
  123. movq %rdx, 16(%rsp)
  124. movq %rsi, 24(%rsp)
  125. movq %rdi, 32(%rsp)
  126. movq %r8, 40(%rsp)
  127. movq %r9, 48(%rsp)
  128. movq 0x38(%rsp), %rdi
  129. movq 8(%rbp), %rsi
  130. subq $MCOUNT_INSN_SIZE, %rdi
  131. call *ftrace_trace_function
  132. movq 48(%rsp), %r9
  133. movq 40(%rsp), %r8
  134. movq 32(%rsp), %rdi
  135. movq 24(%rsp), %rsi
  136. movq 16(%rsp), %rdx
  137. movq 8(%rsp), %rcx
  138. movq (%rsp), %rax
  139. addq $0x38, %rsp
  140. jmp ftrace_stub
  141. END(mcount)
  142. #endif /* CONFIG_DYNAMIC_FTRACE */
  143. #endif /* CONFIG_FUNCTION_TRACER */
  144. #ifndef CONFIG_PREEMPT
  145. #define retint_kernel retint_restore_args
  146. #endif
  147. #ifdef CONFIG_PARAVIRT
  148. ENTRY(native_usergs_sysret64)
  149. swapgs
  150. sysretq
  151. #endif /* CONFIG_PARAVIRT */
  152. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  153. #ifdef CONFIG_TRACE_IRQFLAGS
  154. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  155. jnc 1f
  156. TRACE_IRQS_ON
  157. 1:
  158. #endif
  159. .endm
  160. /*
  161. * C code is not supposed to know about undefined top of stack. Every time
  162. * a C function with an pt_regs argument is called from the SYSCALL based
  163. * fast path FIXUP_TOP_OF_STACK is needed.
  164. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  165. * manipulation.
  166. */
  167. /* %rsp:at FRAMEEND */
  168. .macro FIXUP_TOP_OF_STACK tmp offset=0
  169. movq %gs:pda_oldrsp,\tmp
  170. movq \tmp,RSP+\offset(%rsp)
  171. movq $__USER_DS,SS+\offset(%rsp)
  172. movq $__USER_CS,CS+\offset(%rsp)
  173. movq $-1,RCX+\offset(%rsp)
  174. movq R11+\offset(%rsp),\tmp /* get eflags */
  175. movq \tmp,EFLAGS+\offset(%rsp)
  176. .endm
  177. .macro RESTORE_TOP_OF_STACK tmp offset=0
  178. movq RSP+\offset(%rsp),\tmp
  179. movq \tmp,%gs:pda_oldrsp
  180. movq EFLAGS+\offset(%rsp),\tmp
  181. movq \tmp,R11+\offset(%rsp)
  182. .endm
  183. .macro FAKE_STACK_FRAME child_rip
  184. /* push in order ss, rsp, eflags, cs, rip */
  185. xorl %eax, %eax
  186. pushq $__KERNEL_DS /* ss */
  187. CFI_ADJUST_CFA_OFFSET 8
  188. /*CFI_REL_OFFSET ss,0*/
  189. pushq %rax /* rsp */
  190. CFI_ADJUST_CFA_OFFSET 8
  191. CFI_REL_OFFSET rsp,0
  192. pushq $(1<<9) /* eflags - interrupts on */
  193. CFI_ADJUST_CFA_OFFSET 8
  194. /*CFI_REL_OFFSET rflags,0*/
  195. pushq $__KERNEL_CS /* cs */
  196. CFI_ADJUST_CFA_OFFSET 8
  197. /*CFI_REL_OFFSET cs,0*/
  198. pushq \child_rip /* rip */
  199. CFI_ADJUST_CFA_OFFSET 8
  200. CFI_REL_OFFSET rip,0
  201. pushq %rax /* orig rax */
  202. CFI_ADJUST_CFA_OFFSET 8
  203. .endm
  204. .macro UNFAKE_STACK_FRAME
  205. addq $8*6, %rsp
  206. CFI_ADJUST_CFA_OFFSET -(6*8)
  207. .endm
  208. /*
  209. * initial frame state for interrupts (and exceptions without error code)
  210. */
  211. .macro EMPTY_FRAME start=1 offset=0
  212. .if \start
  213. CFI_STARTPROC simple
  214. CFI_SIGNAL_FRAME
  215. CFI_DEF_CFA rsp,8+\offset
  216. .else
  217. CFI_DEF_CFA_OFFSET 8+\offset
  218. .endif
  219. .endm
  220. /*
  221. * initial frame state for interrupts (and exceptions without error code)
  222. */
  223. .macro INTR_FRAME start=1 offset=0
  224. EMPTY_FRAME \start, SS+8+\offset-RIP
  225. /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
  226. CFI_REL_OFFSET rsp, RSP+\offset-RIP
  227. /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
  228. /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
  229. CFI_REL_OFFSET rip, RIP+\offset-RIP
  230. .endm
  231. /*
  232. * initial frame state for exceptions with error code (and interrupts
  233. * with vector already pushed)
  234. */
  235. .macro XCPT_FRAME start=1 offset=0
  236. INTR_FRAME \start, RIP+\offset-ORIG_RAX
  237. /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
  238. .endm
  239. /*
  240. * frame that enables calling into C.
  241. */
  242. .macro PARTIAL_FRAME start=1 offset=0
  243. XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
  244. CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
  245. CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
  246. CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
  247. CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
  248. CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
  249. CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
  250. CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
  251. CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
  252. CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
  253. .endm
  254. /*
  255. * frame that enables passing a complete pt_regs to a C function.
  256. */
  257. .macro DEFAULT_FRAME start=1 offset=0
  258. PARTIAL_FRAME \start, R11+\offset-R15
  259. CFI_REL_OFFSET rbx, RBX+\offset
  260. CFI_REL_OFFSET rbp, RBP+\offset
  261. CFI_REL_OFFSET r12, R12+\offset
  262. CFI_REL_OFFSET r13, R13+\offset
  263. CFI_REL_OFFSET r14, R14+\offset
  264. CFI_REL_OFFSET r15, R15+\offset
  265. .endm
  266. /* save partial stack frame */
  267. ENTRY(save_args)
  268. XCPT_FRAME
  269. cld
  270. movq_cfi rdi, RDI+16-ARGOFFSET
  271. movq_cfi rsi, RSI+16-ARGOFFSET
  272. movq_cfi rdx, RDX+16-ARGOFFSET
  273. movq_cfi rcx, RCX+16-ARGOFFSET
  274. movq_cfi rax, RAX+16-ARGOFFSET
  275. movq_cfi r8, R8+16-ARGOFFSET
  276. movq_cfi r9, R9+16-ARGOFFSET
  277. movq_cfi r10, R10+16-ARGOFFSET
  278. movq_cfi r11, R11+16-ARGOFFSET
  279. leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
  280. movq_cfi rbp, 8 /* push %rbp */
  281. leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
  282. testl $3, CS(%rdi)
  283. je 1f
  284. SWAPGS
  285. /*
  286. * irqcount is used to check if a CPU is already on an interrupt stack
  287. * or not. While this is essentially redundant with preempt_count it is
  288. * a little cheaper to use a separate counter in the PDA (short of
  289. * moving irq_enter into assembly, which would be too much work)
  290. */
  291. 1: incl %gs:pda_irqcount
  292. jne 2f
  293. popq_cfi %rax /* move return address... */
  294. mov %gs:pda_irqstackptr,%rsp
  295. EMPTY_FRAME 0
  296. pushq_cfi %rax /* ... to the new stack */
  297. /*
  298. * We entered an interrupt context - irqs are off:
  299. */
  300. 2: TRACE_IRQS_OFF
  301. ret
  302. CFI_ENDPROC
  303. END(save_args)
  304. ENTRY(save_rest)
  305. PARTIAL_FRAME 1 REST_SKIP+8
  306. movq 5*8+16(%rsp), %r11 /* save return address */
  307. movq_cfi rbx, RBX+16
  308. movq_cfi rbp, RBP+16
  309. movq_cfi r12, R12+16
  310. movq_cfi r13, R13+16
  311. movq_cfi r14, R14+16
  312. movq_cfi r15, R15+16
  313. movq %r11, 8(%rsp) /* return address */
  314. FIXUP_TOP_OF_STACK %r11, 16
  315. ret
  316. CFI_ENDPROC
  317. END(save_rest)
  318. /* save complete stack frame */
  319. ENTRY(save_paranoid)
  320. XCPT_FRAME 1 RDI+8
  321. cld
  322. movq_cfi rdi, RDI+8
  323. movq_cfi rsi, RSI+8
  324. movq_cfi rdx, RDX+8
  325. movq_cfi rcx, RCX+8
  326. movq_cfi rax, RAX+8
  327. movq_cfi r8, R8+8
  328. movq_cfi r9, R9+8
  329. movq_cfi r10, R10+8
  330. movq_cfi r11, R11+8
  331. movq_cfi rbx, RBX+8
  332. movq_cfi rbp, RBP+8
  333. movq_cfi r12, R12+8
  334. movq_cfi r13, R13+8
  335. movq_cfi r14, R14+8
  336. movq_cfi r15, R15+8
  337. movl $1,%ebx
  338. movl $MSR_GS_BASE,%ecx
  339. rdmsr
  340. testl %edx,%edx
  341. js 1f /* negative -> in kernel */
  342. SWAPGS
  343. xorl %ebx,%ebx
  344. 1: ret
  345. CFI_ENDPROC
  346. END(save_paranoid)
  347. /*
  348. * A newly forked process directly context switches into this.
  349. */
  350. /* rdi: prev */
  351. ENTRY(ret_from_fork)
  352. DEFAULT_FRAME
  353. push kernel_eflags(%rip)
  354. CFI_ADJUST_CFA_OFFSET 8
  355. popf # reset kernel eflags
  356. CFI_ADJUST_CFA_OFFSET -8
  357. call schedule_tail
  358. GET_THREAD_INFO(%rcx)
  359. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
  360. jnz rff_trace
  361. rff_action:
  362. RESTORE_REST
  363. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  364. je int_ret_from_sys_call
  365. testl $_TIF_IA32,TI_flags(%rcx)
  366. jnz int_ret_from_sys_call
  367. RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
  368. jmp ret_from_sys_call
  369. rff_trace:
  370. movq %rsp,%rdi
  371. call syscall_trace_leave
  372. GET_THREAD_INFO(%rcx)
  373. jmp rff_action
  374. CFI_ENDPROC
  375. END(ret_from_fork)
  376. /*
  377. * System call entry. Upto 6 arguments in registers are supported.
  378. *
  379. * SYSCALL does not save anything on the stack and does not change the
  380. * stack pointer.
  381. */
  382. /*
  383. * Register setup:
  384. * rax system call number
  385. * rdi arg0
  386. * rcx return address for syscall/sysret, C arg3
  387. * rsi arg1
  388. * rdx arg2
  389. * r10 arg3 (--> moved to rcx for C)
  390. * r8 arg4
  391. * r9 arg5
  392. * r11 eflags for syscall/sysret, temporary for C
  393. * r12-r15,rbp,rbx saved by C code, not touched.
  394. *
  395. * Interrupts are off on entry.
  396. * Only called from user space.
  397. *
  398. * XXX if we had a free scratch register we could save the RSP into the stack frame
  399. * and report it properly in ps. Unfortunately we haven't.
  400. *
  401. * When user can change the frames always force IRET. That is because
  402. * it deals with uncanonical addresses better. SYSRET has trouble
  403. * with them due to bugs in both AMD and Intel CPUs.
  404. */
  405. ENTRY(system_call)
  406. CFI_STARTPROC simple
  407. CFI_SIGNAL_FRAME
  408. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  409. CFI_REGISTER rip,rcx
  410. /*CFI_REGISTER rflags,r11*/
  411. SWAPGS_UNSAFE_STACK
  412. /*
  413. * A hypervisor implementation might want to use a label
  414. * after the swapgs, so that it can do the swapgs
  415. * for the guest and jump here on syscall.
  416. */
  417. ENTRY(system_call_after_swapgs)
  418. movq %rsp,%gs:pda_oldrsp
  419. movq %gs:pda_kernelstack,%rsp
  420. /*
  421. * No need to follow this irqs off/on section - it's straight
  422. * and short:
  423. */
  424. ENABLE_INTERRUPTS(CLBR_NONE)
  425. SAVE_ARGS 8,1
  426. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  427. movq %rcx,RIP-ARGOFFSET(%rsp)
  428. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  429. GET_THREAD_INFO(%rcx)
  430. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
  431. jnz tracesys
  432. system_call_fastpath:
  433. cmpq $__NR_syscall_max,%rax
  434. ja badsys
  435. movq %r10,%rcx
  436. call *sys_call_table(,%rax,8) # XXX: rip relative
  437. movq %rax,RAX-ARGOFFSET(%rsp)
  438. /*
  439. * Syscall return path ending with SYSRET (fast path)
  440. * Has incomplete stack frame and undefined top of stack.
  441. */
  442. ret_from_sys_call:
  443. movl $_TIF_ALLWORK_MASK,%edi
  444. /* edi: flagmask */
  445. sysret_check:
  446. LOCKDEP_SYS_EXIT
  447. GET_THREAD_INFO(%rcx)
  448. DISABLE_INTERRUPTS(CLBR_NONE)
  449. TRACE_IRQS_OFF
  450. movl TI_flags(%rcx),%edx
  451. andl %edi,%edx
  452. jnz sysret_careful
  453. CFI_REMEMBER_STATE
  454. /*
  455. * sysretq will re-enable interrupts:
  456. */
  457. TRACE_IRQS_ON
  458. movq RIP-ARGOFFSET(%rsp),%rcx
  459. CFI_REGISTER rip,rcx
  460. RESTORE_ARGS 0,-ARG_SKIP,1
  461. /*CFI_REGISTER rflags,r11*/
  462. movq %gs:pda_oldrsp, %rsp
  463. USERGS_SYSRET64
  464. CFI_RESTORE_STATE
  465. /* Handle reschedules */
  466. /* edx: work, edi: workmask */
  467. sysret_careful:
  468. bt $TIF_NEED_RESCHED,%edx
  469. jnc sysret_signal
  470. TRACE_IRQS_ON
  471. ENABLE_INTERRUPTS(CLBR_NONE)
  472. pushq %rdi
  473. CFI_ADJUST_CFA_OFFSET 8
  474. call schedule
  475. popq %rdi
  476. CFI_ADJUST_CFA_OFFSET -8
  477. jmp sysret_check
  478. /* Handle a signal */
  479. sysret_signal:
  480. TRACE_IRQS_ON
  481. ENABLE_INTERRUPTS(CLBR_NONE)
  482. #ifdef CONFIG_AUDITSYSCALL
  483. bt $TIF_SYSCALL_AUDIT,%edx
  484. jc sysret_audit
  485. #endif
  486. /* edx: work flags (arg3) */
  487. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  488. xorl %esi,%esi # oldset -> arg2
  489. SAVE_REST
  490. FIXUP_TOP_OF_STACK %r11
  491. call do_notify_resume
  492. RESTORE_TOP_OF_STACK %r11
  493. RESTORE_REST
  494. movl $_TIF_WORK_MASK,%edi
  495. /* Use IRET because user could have changed frame. This
  496. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  497. DISABLE_INTERRUPTS(CLBR_NONE)
  498. TRACE_IRQS_OFF
  499. jmp int_with_check
  500. badsys:
  501. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  502. jmp ret_from_sys_call
  503. #ifdef CONFIG_AUDITSYSCALL
  504. /*
  505. * Fast path for syscall audit without full syscall trace.
  506. * We just call audit_syscall_entry() directly, and then
  507. * jump back to the normal fast path.
  508. */
  509. auditsys:
  510. movq %r10,%r9 /* 6th arg: 4th syscall arg */
  511. movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
  512. movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
  513. movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
  514. movq %rax,%rsi /* 2nd arg: syscall number */
  515. movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
  516. call audit_syscall_entry
  517. LOAD_ARGS 0 /* reload call-clobbered registers */
  518. jmp system_call_fastpath
  519. /*
  520. * Return fast path for syscall audit. Call audit_syscall_exit()
  521. * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
  522. * masked off.
  523. */
  524. sysret_audit:
  525. movq %rax,%rsi /* second arg, syscall return value */
  526. cmpq $0,%rax /* is it < 0? */
  527. setl %al /* 1 if so, 0 if not */
  528. movzbl %al,%edi /* zero-extend that into %edi */
  529. inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
  530. call audit_syscall_exit
  531. movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
  532. jmp sysret_check
  533. #endif /* CONFIG_AUDITSYSCALL */
  534. /* Do syscall tracing */
  535. tracesys:
  536. #ifdef CONFIG_AUDITSYSCALL
  537. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
  538. jz auditsys
  539. #endif
  540. SAVE_REST
  541. movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
  542. FIXUP_TOP_OF_STACK %rdi
  543. movq %rsp,%rdi
  544. call syscall_trace_enter
  545. /*
  546. * Reload arg registers from stack in case ptrace changed them.
  547. * We don't reload %rax because syscall_trace_enter() returned
  548. * the value it wants us to use in the table lookup.
  549. */
  550. LOAD_ARGS ARGOFFSET, 1
  551. RESTORE_REST
  552. cmpq $__NR_syscall_max,%rax
  553. ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
  554. movq %r10,%rcx /* fixup for C */
  555. call *sys_call_table(,%rax,8)
  556. movq %rax,RAX-ARGOFFSET(%rsp)
  557. /* Use IRET because user could have changed frame */
  558. /*
  559. * Syscall return path ending with IRET.
  560. * Has correct top of stack, but partial stack frame.
  561. */
  562. .globl int_ret_from_sys_call
  563. .globl int_with_check
  564. int_ret_from_sys_call:
  565. DISABLE_INTERRUPTS(CLBR_NONE)
  566. TRACE_IRQS_OFF
  567. testl $3,CS-ARGOFFSET(%rsp)
  568. je retint_restore_args
  569. movl $_TIF_ALLWORK_MASK,%edi
  570. /* edi: mask to check */
  571. int_with_check:
  572. LOCKDEP_SYS_EXIT_IRQ
  573. GET_THREAD_INFO(%rcx)
  574. movl TI_flags(%rcx),%edx
  575. andl %edi,%edx
  576. jnz int_careful
  577. andl $~TS_COMPAT,TI_status(%rcx)
  578. jmp retint_swapgs
  579. /* Either reschedule or signal or syscall exit tracking needed. */
  580. /* First do a reschedule test. */
  581. /* edx: work, edi: workmask */
  582. int_careful:
  583. bt $TIF_NEED_RESCHED,%edx
  584. jnc int_very_careful
  585. TRACE_IRQS_ON
  586. ENABLE_INTERRUPTS(CLBR_NONE)
  587. pushq %rdi
  588. CFI_ADJUST_CFA_OFFSET 8
  589. call schedule
  590. popq %rdi
  591. CFI_ADJUST_CFA_OFFSET -8
  592. DISABLE_INTERRUPTS(CLBR_NONE)
  593. TRACE_IRQS_OFF
  594. jmp int_with_check
  595. /* handle signals and tracing -- both require a full stack frame */
  596. int_very_careful:
  597. TRACE_IRQS_ON
  598. ENABLE_INTERRUPTS(CLBR_NONE)
  599. SAVE_REST
  600. /* Check for syscall exit trace */
  601. testl $_TIF_WORK_SYSCALL_EXIT,%edx
  602. jz int_signal
  603. pushq %rdi
  604. CFI_ADJUST_CFA_OFFSET 8
  605. leaq 8(%rsp),%rdi # &ptregs -> arg1
  606. call syscall_trace_leave
  607. popq %rdi
  608. CFI_ADJUST_CFA_OFFSET -8
  609. andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
  610. jmp int_restore_rest
  611. int_signal:
  612. testl $_TIF_DO_NOTIFY_MASK,%edx
  613. jz 1f
  614. movq %rsp,%rdi # &ptregs -> arg1
  615. xorl %esi,%esi # oldset -> arg2
  616. call do_notify_resume
  617. 1: movl $_TIF_WORK_MASK,%edi
  618. int_restore_rest:
  619. RESTORE_REST
  620. DISABLE_INTERRUPTS(CLBR_NONE)
  621. TRACE_IRQS_OFF
  622. jmp int_with_check
  623. CFI_ENDPROC
  624. END(system_call)
  625. /*
  626. * Certain special system calls that need to save a complete full stack frame.
  627. */
  628. .macro PTREGSCALL label,func,arg
  629. ENTRY(\label)
  630. PARTIAL_FRAME 1 8 /* offset 8: return address */
  631. subq $REST_SKIP, %rsp
  632. CFI_ADJUST_CFA_OFFSET REST_SKIP
  633. call save_rest
  634. DEFAULT_FRAME 0 8 /* offset 8: return address */
  635. leaq 8(%rsp), \arg /* pt_regs pointer */
  636. call \func
  637. jmp ptregscall_common
  638. CFI_ENDPROC
  639. END(\label)
  640. .endm
  641. PTREGSCALL stub_clone, sys_clone, %r8
  642. PTREGSCALL stub_fork, sys_fork, %rdi
  643. PTREGSCALL stub_vfork, sys_vfork, %rdi
  644. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  645. PTREGSCALL stub_iopl, sys_iopl, %rsi
  646. ENTRY(ptregscall_common)
  647. DEFAULT_FRAME 1 8 /* offset 8: return address */
  648. RESTORE_TOP_OF_STACK %r11, 8
  649. movq_cfi_restore R15+8, r15
  650. movq_cfi_restore R14+8, r14
  651. movq_cfi_restore R13+8, r13
  652. movq_cfi_restore R12+8, r12
  653. movq_cfi_restore RBP+8, rbp
  654. movq_cfi_restore RBX+8, rbx
  655. ret $REST_SKIP /* pop extended registers */
  656. CFI_ENDPROC
  657. END(ptregscall_common)
  658. ENTRY(stub_execve)
  659. CFI_STARTPROC
  660. popq %r11
  661. CFI_ADJUST_CFA_OFFSET -8
  662. CFI_REGISTER rip, r11
  663. SAVE_REST
  664. FIXUP_TOP_OF_STACK %r11
  665. movq %rsp, %rcx
  666. call sys_execve
  667. RESTORE_TOP_OF_STACK %r11
  668. movq %rax,RAX(%rsp)
  669. RESTORE_REST
  670. jmp int_ret_from_sys_call
  671. CFI_ENDPROC
  672. END(stub_execve)
  673. /*
  674. * sigreturn is special because it needs to restore all registers on return.
  675. * This cannot be done with SYSRET, so use the IRET return path instead.
  676. */
  677. ENTRY(stub_rt_sigreturn)
  678. CFI_STARTPROC
  679. addq $8, %rsp
  680. CFI_ADJUST_CFA_OFFSET -8
  681. SAVE_REST
  682. movq %rsp,%rdi
  683. FIXUP_TOP_OF_STACK %r11
  684. call sys_rt_sigreturn
  685. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  686. RESTORE_REST
  687. jmp int_ret_from_sys_call
  688. CFI_ENDPROC
  689. END(stub_rt_sigreturn)
  690. /*
  691. * Build the entry stubs and pointer table with some assembler magic.
  692. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  693. * single cache line on all modern x86 implementations.
  694. */
  695. .section .init.rodata,"a"
  696. ENTRY(interrupt)
  697. .text
  698. .p2align 5
  699. .p2align CONFIG_X86_L1_CACHE_SHIFT
  700. ENTRY(irq_entries_start)
  701. INTR_FRAME
  702. vector=FIRST_EXTERNAL_VECTOR
  703. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  704. .balign 32
  705. .rept 7
  706. .if vector < NR_VECTORS
  707. .if vector <> FIRST_EXTERNAL_VECTOR
  708. CFI_ADJUST_CFA_OFFSET -8
  709. .endif
  710. 1: pushq $(~vector+0x80) /* Note: always in signed byte range */
  711. CFI_ADJUST_CFA_OFFSET 8
  712. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  713. jmp 2f
  714. .endif
  715. .previous
  716. .quad 1b
  717. .text
  718. vector=vector+1
  719. .endif
  720. .endr
  721. 2: jmp common_interrupt
  722. .endr
  723. CFI_ENDPROC
  724. END(irq_entries_start)
  725. .previous
  726. END(interrupt)
  727. .previous
  728. /*
  729. * Interrupt entry/exit.
  730. *
  731. * Interrupt entry points save only callee clobbered registers in fast path.
  732. *
  733. * Entry runs with interrupts off.
  734. */
  735. /* 0(%rsp): ~(interrupt number) */
  736. .macro interrupt func
  737. subq $10*8, %rsp
  738. CFI_ADJUST_CFA_OFFSET 10*8
  739. call save_args
  740. PARTIAL_FRAME 0
  741. call \func
  742. .endm
  743. /*
  744. * The interrupt stubs push (~vector+0x80) onto the stack and
  745. * then jump to common_interrupt.
  746. */
  747. .p2align CONFIG_X86_L1_CACHE_SHIFT
  748. common_interrupt:
  749. XCPT_FRAME
  750. addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
  751. interrupt do_IRQ
  752. /* 0(%rsp): oldrsp-ARGOFFSET */
  753. ret_from_intr:
  754. DISABLE_INTERRUPTS(CLBR_NONE)
  755. TRACE_IRQS_OFF
  756. decl %gs:pda_irqcount
  757. leaveq
  758. CFI_DEF_CFA_REGISTER rsp
  759. CFI_ADJUST_CFA_OFFSET -8
  760. exit_intr:
  761. GET_THREAD_INFO(%rcx)
  762. testl $3,CS-ARGOFFSET(%rsp)
  763. je retint_kernel
  764. /* Interrupt came from user space */
  765. /*
  766. * Has a correct top of stack, but a partial stack frame
  767. * %rcx: thread info. Interrupts off.
  768. */
  769. retint_with_reschedule:
  770. movl $_TIF_WORK_MASK,%edi
  771. retint_check:
  772. LOCKDEP_SYS_EXIT_IRQ
  773. movl TI_flags(%rcx),%edx
  774. andl %edi,%edx
  775. CFI_REMEMBER_STATE
  776. jnz retint_careful
  777. retint_swapgs: /* return to user-space */
  778. /*
  779. * The iretq could re-enable interrupts:
  780. */
  781. DISABLE_INTERRUPTS(CLBR_ANY)
  782. TRACE_IRQS_IRETQ
  783. SWAPGS
  784. jmp restore_args
  785. retint_restore_args: /* return to kernel space */
  786. DISABLE_INTERRUPTS(CLBR_ANY)
  787. /*
  788. * The iretq could re-enable interrupts:
  789. */
  790. TRACE_IRQS_IRETQ
  791. restore_args:
  792. RESTORE_ARGS 0,8,0
  793. irq_return:
  794. INTERRUPT_RETURN
  795. .section __ex_table, "a"
  796. .quad irq_return, bad_iret
  797. .previous
  798. #ifdef CONFIG_PARAVIRT
  799. ENTRY(native_iret)
  800. iretq
  801. .section __ex_table,"a"
  802. .quad native_iret, bad_iret
  803. .previous
  804. #endif
  805. .section .fixup,"ax"
  806. bad_iret:
  807. /*
  808. * The iret traps when the %cs or %ss being restored is bogus.
  809. * We've lost the original trap vector and error code.
  810. * #GPF is the most likely one to get for an invalid selector.
  811. * So pretend we completed the iret and took the #GPF in user mode.
  812. *
  813. * We are now running with the kernel GS after exception recovery.
  814. * But error_entry expects us to have user GS to match the user %cs,
  815. * so swap back.
  816. */
  817. pushq $0
  818. SWAPGS
  819. jmp general_protection
  820. .previous
  821. /* edi: workmask, edx: work */
  822. retint_careful:
  823. CFI_RESTORE_STATE
  824. bt $TIF_NEED_RESCHED,%edx
  825. jnc retint_signal
  826. TRACE_IRQS_ON
  827. ENABLE_INTERRUPTS(CLBR_NONE)
  828. pushq %rdi
  829. CFI_ADJUST_CFA_OFFSET 8
  830. call schedule
  831. popq %rdi
  832. CFI_ADJUST_CFA_OFFSET -8
  833. GET_THREAD_INFO(%rcx)
  834. DISABLE_INTERRUPTS(CLBR_NONE)
  835. TRACE_IRQS_OFF
  836. jmp retint_check
  837. retint_signal:
  838. testl $_TIF_DO_NOTIFY_MASK,%edx
  839. jz retint_swapgs
  840. TRACE_IRQS_ON
  841. ENABLE_INTERRUPTS(CLBR_NONE)
  842. SAVE_REST
  843. movq $-1,ORIG_RAX(%rsp)
  844. xorl %esi,%esi # oldset
  845. movq %rsp,%rdi # &pt_regs
  846. call do_notify_resume
  847. RESTORE_REST
  848. DISABLE_INTERRUPTS(CLBR_NONE)
  849. TRACE_IRQS_OFF
  850. GET_THREAD_INFO(%rcx)
  851. jmp retint_with_reschedule
  852. #ifdef CONFIG_PREEMPT
  853. /* Returning to kernel space. Check if we need preemption */
  854. /* rcx: threadinfo. interrupts off. */
  855. ENTRY(retint_kernel)
  856. cmpl $0,TI_preempt_count(%rcx)
  857. jnz retint_restore_args
  858. bt $TIF_NEED_RESCHED,TI_flags(%rcx)
  859. jnc retint_restore_args
  860. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  861. jnc retint_restore_args
  862. call preempt_schedule_irq
  863. jmp exit_intr
  864. #endif
  865. CFI_ENDPROC
  866. END(common_interrupt)
  867. /*
  868. * APIC interrupts.
  869. */
  870. .p2align 5
  871. .macro apicinterrupt num,func
  872. INTR_FRAME
  873. pushq $~(\num)
  874. CFI_ADJUST_CFA_OFFSET 8
  875. interrupt \func
  876. jmp ret_from_intr
  877. CFI_ENDPROC
  878. .endm
  879. ENTRY(thermal_interrupt)
  880. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  881. END(thermal_interrupt)
  882. ENTRY(threshold_interrupt)
  883. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  884. END(threshold_interrupt)
  885. #ifdef CONFIG_SMP
  886. ENTRY(reschedule_interrupt)
  887. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  888. END(reschedule_interrupt)
  889. .macro INVALIDATE_ENTRY num
  890. ENTRY(invalidate_interrupt\num)
  891. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  892. END(invalidate_interrupt\num)
  893. .endm
  894. INVALIDATE_ENTRY 0
  895. INVALIDATE_ENTRY 1
  896. INVALIDATE_ENTRY 2
  897. INVALIDATE_ENTRY 3
  898. INVALIDATE_ENTRY 4
  899. INVALIDATE_ENTRY 5
  900. INVALIDATE_ENTRY 6
  901. INVALIDATE_ENTRY 7
  902. ENTRY(call_function_interrupt)
  903. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  904. END(call_function_interrupt)
  905. ENTRY(call_function_single_interrupt)
  906. apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
  907. END(call_function_single_interrupt)
  908. ENTRY(irq_move_cleanup_interrupt)
  909. apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
  910. END(irq_move_cleanup_interrupt)
  911. #endif
  912. ENTRY(apic_timer_interrupt)
  913. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  914. END(apic_timer_interrupt)
  915. ENTRY(uv_bau_message_intr1)
  916. apicinterrupt 220,uv_bau_message_interrupt
  917. END(uv_bau_message_intr1)
  918. ENTRY(error_interrupt)
  919. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  920. END(error_interrupt)
  921. ENTRY(spurious_interrupt)
  922. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  923. END(spurious_interrupt)
  924. /*
  925. * Exception entry points.
  926. */
  927. .macro zeroentry sym
  928. INTR_FRAME
  929. PARAVIRT_ADJUST_EXCEPTION_FRAME
  930. pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
  931. subq $15*8,%rsp
  932. CFI_ADJUST_CFA_OFFSET 15*8
  933. call error_entry
  934. DEFAULT_FRAME 0
  935. movq %rsp,%rdi /* pt_regs pointer */
  936. xorl %esi,%esi /* no error code */
  937. call \sym
  938. jmp error_exit /* %ebx: no swapgs flag */
  939. CFI_ENDPROC
  940. .endm
  941. .macro paranoidzeroentry sym
  942. INTR_FRAME
  943. PARAVIRT_ADJUST_EXCEPTION_FRAME
  944. pushq $-1 /* ORIG_RAX: no syscall to restart */
  945. CFI_ADJUST_CFA_OFFSET 8
  946. subq $15*8, %rsp
  947. call save_paranoid
  948. TRACE_IRQS_OFF
  949. movq %rsp,%rdi /* pt_regs pointer */
  950. xorl %esi,%esi /* no error code */
  951. call \sym
  952. jmp paranoid_exit /* %ebx: no swapgs flag */
  953. CFI_ENDPROC
  954. .endm
  955. .macro paranoidzeroentry_ist sym ist
  956. INTR_FRAME
  957. PARAVIRT_ADJUST_EXCEPTION_FRAME
  958. pushq $-1 /* ORIG_RAX: no syscall to restart */
  959. CFI_ADJUST_CFA_OFFSET 8
  960. subq $15*8, %rsp
  961. call save_paranoid
  962. TRACE_IRQS_OFF
  963. movq %rsp,%rdi /* pt_regs pointer */
  964. xorl %esi,%esi /* no error code */
  965. movq %gs:pda_data_offset, %rbp
  966. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  967. call \sym
  968. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  969. jmp paranoid_exit /* %ebx: no swapgs flag */
  970. CFI_ENDPROC
  971. .endm
  972. .macro errorentry sym
  973. XCPT_FRAME
  974. PARAVIRT_ADJUST_EXCEPTION_FRAME
  975. subq $15*8,%rsp
  976. CFI_ADJUST_CFA_OFFSET 15*8
  977. call error_entry
  978. DEFAULT_FRAME 0
  979. movq %rsp,%rdi /* pt_regs pointer */
  980. movq ORIG_RAX(%rsp),%rsi /* get error code */
  981. movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
  982. call \sym
  983. jmp error_exit /* %ebx: no swapgs flag */
  984. CFI_ENDPROC
  985. .endm
  986. /* error code is on the stack already */
  987. .macro paranoiderrorentry sym
  988. XCPT_FRAME
  989. PARAVIRT_ADJUST_EXCEPTION_FRAME
  990. subq $15*8,%rsp
  991. CFI_ADJUST_CFA_OFFSET 15*8
  992. call save_paranoid
  993. DEFAULT_FRAME 0
  994. TRACE_IRQS_OFF
  995. movq %rsp,%rdi /* pt_regs pointer */
  996. movq ORIG_RAX(%rsp),%rsi /* get error code */
  997. movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
  998. call \sym
  999. jmp paranoid_exit /* %ebx: no swapgs flag */
  1000. CFI_ENDPROC
  1001. .endm
  1002. /*
  1003. * "Paranoid" exit path from exception stack.
  1004. * Paranoid because this is used by NMIs and cannot take
  1005. * any kernel state for granted.
  1006. * We don't do kernel preemption checks here, because only
  1007. * NMI should be common and it does not enable IRQs and
  1008. * cannot get reschedule ticks.
  1009. *
  1010. * "trace" is 0 for the NMI handler only, because irq-tracing
  1011. * is fundamentally NMI-unsafe. (we cannot change the soft and
  1012. * hard flags at once, atomically)
  1013. */
  1014. /* ebx: no swapgs flag */
  1015. KPROBE_ENTRY(paranoid_exit)
  1016. INTR_FRAME
  1017. DISABLE_INTERRUPTS(CLBR_NONE)
  1018. TRACE_IRQS_OFF
  1019. testl %ebx,%ebx /* swapgs needed? */
  1020. jnz paranoid_restore
  1021. testl $3,CS(%rsp)
  1022. jnz paranoid_userspace
  1023. paranoid_swapgs:
  1024. TRACE_IRQS_IRETQ 0
  1025. SWAPGS_UNSAFE_STACK
  1026. paranoid_restore:
  1027. RESTORE_ALL 8
  1028. jmp irq_return
  1029. paranoid_userspace:
  1030. GET_THREAD_INFO(%rcx)
  1031. movl TI_flags(%rcx),%ebx
  1032. andl $_TIF_WORK_MASK,%ebx
  1033. jz paranoid_swapgs
  1034. movq %rsp,%rdi /* &pt_regs */
  1035. call sync_regs
  1036. movq %rax,%rsp /* switch stack for scheduling */
  1037. testl $_TIF_NEED_RESCHED,%ebx
  1038. jnz paranoid_schedule
  1039. movl %ebx,%edx /* arg3: thread flags */
  1040. TRACE_IRQS_ON
  1041. ENABLE_INTERRUPTS(CLBR_NONE)
  1042. xorl %esi,%esi /* arg2: oldset */
  1043. movq %rsp,%rdi /* arg1: &pt_regs */
  1044. call do_notify_resume
  1045. DISABLE_INTERRUPTS(CLBR_NONE)
  1046. TRACE_IRQS_OFF
  1047. jmp paranoid_userspace
  1048. paranoid_schedule:
  1049. TRACE_IRQS_ON
  1050. ENABLE_INTERRUPTS(CLBR_ANY)
  1051. call schedule
  1052. DISABLE_INTERRUPTS(CLBR_ANY)
  1053. TRACE_IRQS_OFF
  1054. jmp paranoid_userspace
  1055. CFI_ENDPROC
  1056. END(paranoid_exit)
  1057. /*
  1058. * Exception entry point. This expects an error code/orig_rax on the stack.
  1059. * returns in "no swapgs flag" in %ebx.
  1060. */
  1061. KPROBE_ENTRY(error_entry)
  1062. XCPT_FRAME
  1063. CFI_ADJUST_CFA_OFFSET 15*8
  1064. /* oldrax contains error code */
  1065. cld
  1066. movq_cfi rdi, RDI+8
  1067. movq_cfi rsi, RSI+8
  1068. movq_cfi rdx, RDX+8
  1069. movq_cfi rcx, RCX+8
  1070. movq_cfi rax, RAX+8
  1071. movq_cfi r8, R8+8
  1072. movq_cfi r9, R9+8
  1073. movq_cfi r10, R10+8
  1074. movq_cfi r11, R11+8
  1075. movq_cfi rbx, RBX+8
  1076. movq_cfi rbp, RBP+8
  1077. movq_cfi r12, R12+8
  1078. movq_cfi r13, R13+8
  1079. movq_cfi r14, R14+8
  1080. movq_cfi r15, R15+8
  1081. xorl %ebx,%ebx
  1082. testl $3,CS+8(%rsp)
  1083. je error_kernelspace
  1084. error_swapgs:
  1085. SWAPGS
  1086. error_sti:
  1087. TRACE_IRQS_OFF
  1088. ret
  1089. CFI_ENDPROC
  1090. /*
  1091. * There are two places in the kernel that can potentially fault with
  1092. * usergs. Handle them here. The exception handlers after iret run with
  1093. * kernel gs again, so don't set the user space flag. B stepping K8s
  1094. * sometimes report an truncated RIP for IRET exceptions returning to
  1095. * compat mode. Check for these here too.
  1096. */
  1097. error_kernelspace:
  1098. incl %ebx
  1099. leaq irq_return(%rip),%rcx
  1100. cmpq %rcx,RIP+8(%rsp)
  1101. je error_swapgs
  1102. movl %ecx,%ecx /* zero extend */
  1103. cmpq %rcx,RIP+8(%rsp)
  1104. je error_swapgs
  1105. cmpq $gs_change,RIP+8(%rsp)
  1106. je error_swapgs
  1107. jmp error_sti
  1108. KPROBE_END(error_entry)
  1109. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  1110. KPROBE_ENTRY(error_exit)
  1111. DEFAULT_FRAME
  1112. movl %ebx,%eax
  1113. RESTORE_REST
  1114. DISABLE_INTERRUPTS(CLBR_NONE)
  1115. TRACE_IRQS_OFF
  1116. GET_THREAD_INFO(%rcx)
  1117. testl %eax,%eax
  1118. jne retint_kernel
  1119. LOCKDEP_SYS_EXIT_IRQ
  1120. movl TI_flags(%rcx),%edx
  1121. movl $_TIF_WORK_MASK,%edi
  1122. andl %edi,%edx
  1123. jnz retint_careful
  1124. jmp retint_swapgs
  1125. CFI_ENDPROC
  1126. KPROBE_END(error_exit)
  1127. /* Reload gs selector with exception handling */
  1128. /* edi: new selector */
  1129. ENTRY(native_load_gs_index)
  1130. CFI_STARTPROC
  1131. pushf
  1132. CFI_ADJUST_CFA_OFFSET 8
  1133. DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
  1134. SWAPGS
  1135. gs_change:
  1136. movl %edi,%gs
  1137. 2: mfence /* workaround */
  1138. SWAPGS
  1139. popf
  1140. CFI_ADJUST_CFA_OFFSET -8
  1141. ret
  1142. CFI_ENDPROC
  1143. ENDPROC(native_load_gs_index)
  1144. .section __ex_table,"a"
  1145. .align 8
  1146. .quad gs_change,bad_gs
  1147. .previous
  1148. .section .fixup,"ax"
  1149. /* running with kernelgs */
  1150. bad_gs:
  1151. SWAPGS /* switch back to user gs */
  1152. xorl %eax,%eax
  1153. movl %eax,%gs
  1154. jmp 2b
  1155. .previous
  1156. /*
  1157. * Create a kernel thread.
  1158. *
  1159. * C extern interface:
  1160. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  1161. *
  1162. * asm input arguments:
  1163. * rdi: fn, rsi: arg, rdx: flags
  1164. */
  1165. ENTRY(kernel_thread)
  1166. CFI_STARTPROC
  1167. FAKE_STACK_FRAME $child_rip
  1168. SAVE_ALL
  1169. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  1170. movq %rdx,%rdi
  1171. orq kernel_thread_flags(%rip),%rdi
  1172. movq $-1, %rsi
  1173. movq %rsp, %rdx
  1174. xorl %r8d,%r8d
  1175. xorl %r9d,%r9d
  1176. # clone now
  1177. call do_fork
  1178. movq %rax,RAX(%rsp)
  1179. xorl %edi,%edi
  1180. /*
  1181. * It isn't worth to check for reschedule here,
  1182. * so internally to the x86_64 port you can rely on kernel_thread()
  1183. * not to reschedule the child before returning, this avoids the need
  1184. * of hacks for example to fork off the per-CPU idle tasks.
  1185. * [Hopefully no generic code relies on the reschedule -AK]
  1186. */
  1187. RESTORE_ALL
  1188. UNFAKE_STACK_FRAME
  1189. ret
  1190. CFI_ENDPROC
  1191. ENDPROC(kernel_thread)
  1192. child_rip:
  1193. pushq $0 # fake return address
  1194. CFI_STARTPROC
  1195. /*
  1196. * Here we are in the child and the registers are set as they were
  1197. * at kernel_thread() invocation in the parent.
  1198. */
  1199. movq %rdi, %rax
  1200. movq %rsi, %rdi
  1201. call *%rax
  1202. # exit
  1203. mov %eax, %edi
  1204. call do_exit
  1205. CFI_ENDPROC
  1206. ENDPROC(child_rip)
  1207. /*
  1208. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  1209. *
  1210. * C extern interface:
  1211. * extern long execve(char *name, char **argv, char **envp)
  1212. *
  1213. * asm input arguments:
  1214. * rdi: name, rsi: argv, rdx: envp
  1215. *
  1216. * We want to fallback into:
  1217. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
  1218. *
  1219. * do_sys_execve asm fallback arguments:
  1220. * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
  1221. */
  1222. ENTRY(kernel_execve)
  1223. CFI_STARTPROC
  1224. FAKE_STACK_FRAME $0
  1225. SAVE_ALL
  1226. movq %rsp,%rcx
  1227. call sys_execve
  1228. movq %rax, RAX(%rsp)
  1229. RESTORE_REST
  1230. testq %rax,%rax
  1231. je int_ret_from_sys_call
  1232. RESTORE_ARGS
  1233. UNFAKE_STACK_FRAME
  1234. ret
  1235. CFI_ENDPROC
  1236. ENDPROC(kernel_execve)
  1237. KPROBE_ENTRY(page_fault)
  1238. errorentry do_page_fault
  1239. KPROBE_END(page_fault)
  1240. ENTRY(coprocessor_error)
  1241. zeroentry do_coprocessor_error
  1242. END(coprocessor_error)
  1243. ENTRY(simd_coprocessor_error)
  1244. zeroentry do_simd_coprocessor_error
  1245. END(simd_coprocessor_error)
  1246. ENTRY(device_not_available)
  1247. zeroentry do_device_not_available
  1248. END(device_not_available)
  1249. /* runs on exception stack */
  1250. KPROBE_ENTRY(debug)
  1251. paranoidzeroentry_ist do_debug, DEBUG_STACK
  1252. KPROBE_END(debug)
  1253. /* runs on exception stack */
  1254. KPROBE_ENTRY(nmi)
  1255. INTR_FRAME
  1256. PARAVIRT_ADJUST_EXCEPTION_FRAME
  1257. pushq_cfi $-1
  1258. subq $15*8, %rsp
  1259. CFI_ADJUST_CFA_OFFSET 15*8
  1260. call save_paranoid
  1261. DEFAULT_FRAME 0
  1262. /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
  1263. movq %rsp,%rdi
  1264. movq $-1,%rsi
  1265. call do_nmi
  1266. #ifdef CONFIG_TRACE_IRQFLAGS
  1267. /* paranoidexit; without TRACE_IRQS_OFF */
  1268. /* ebx: no swapgs flag */
  1269. DISABLE_INTERRUPTS(CLBR_NONE)
  1270. testl %ebx,%ebx /* swapgs needed? */
  1271. jnz nmi_restore
  1272. testl $3,CS(%rsp)
  1273. jnz nmi_userspace
  1274. nmi_swapgs:
  1275. SWAPGS_UNSAFE_STACK
  1276. nmi_restore:
  1277. RESTORE_ALL 8
  1278. jmp irq_return
  1279. nmi_userspace:
  1280. GET_THREAD_INFO(%rcx)
  1281. movl TI_flags(%rcx),%ebx
  1282. andl $_TIF_WORK_MASK,%ebx
  1283. jz nmi_swapgs
  1284. movq %rsp,%rdi /* &pt_regs */
  1285. call sync_regs
  1286. movq %rax,%rsp /* switch stack for scheduling */
  1287. testl $_TIF_NEED_RESCHED,%ebx
  1288. jnz nmi_schedule
  1289. movl %ebx,%edx /* arg3: thread flags */
  1290. ENABLE_INTERRUPTS(CLBR_NONE)
  1291. xorl %esi,%esi /* arg2: oldset */
  1292. movq %rsp,%rdi /* arg1: &pt_regs */
  1293. call do_notify_resume
  1294. DISABLE_INTERRUPTS(CLBR_NONE)
  1295. jmp nmi_userspace
  1296. nmi_schedule:
  1297. ENABLE_INTERRUPTS(CLBR_ANY)
  1298. call schedule
  1299. DISABLE_INTERRUPTS(CLBR_ANY)
  1300. jmp nmi_userspace
  1301. CFI_ENDPROC
  1302. #else
  1303. jmp paranoid_exit
  1304. CFI_ENDPROC
  1305. #endif
  1306. KPROBE_END(nmi)
  1307. KPROBE_ENTRY(int3)
  1308. paranoidzeroentry_ist do_int3, DEBUG_STACK
  1309. KPROBE_END(int3)
  1310. ENTRY(overflow)
  1311. zeroentry do_overflow
  1312. END(overflow)
  1313. ENTRY(bounds)
  1314. zeroentry do_bounds
  1315. END(bounds)
  1316. ENTRY(invalid_op)
  1317. zeroentry do_invalid_op
  1318. END(invalid_op)
  1319. ENTRY(coprocessor_segment_overrun)
  1320. zeroentry do_coprocessor_segment_overrun
  1321. END(coprocessor_segment_overrun)
  1322. /* runs on exception stack */
  1323. ENTRY(double_fault)
  1324. paranoiderrorentry do_double_fault
  1325. END(double_fault)
  1326. ENTRY(invalid_TSS)
  1327. errorentry do_invalid_TSS
  1328. END(invalid_TSS)
  1329. ENTRY(segment_not_present)
  1330. errorentry do_segment_not_present
  1331. END(segment_not_present)
  1332. /* runs on exception stack */
  1333. ENTRY(stack_segment)
  1334. paranoiderrorentry do_stack_segment
  1335. END(stack_segment)
  1336. KPROBE_ENTRY(general_protection)
  1337. errorentry do_general_protection
  1338. KPROBE_END(general_protection)
  1339. ENTRY(alignment_check)
  1340. errorentry do_alignment_check
  1341. END(alignment_check)
  1342. ENTRY(divide_error)
  1343. zeroentry do_divide_error
  1344. END(divide_error)
  1345. ENTRY(spurious_interrupt_bug)
  1346. zeroentry do_spurious_interrupt_bug
  1347. END(spurious_interrupt_bug)
  1348. #ifdef CONFIG_X86_MCE
  1349. /* runs on exception stack */
  1350. ENTRY(machine_check)
  1351. paranoidzeroentry do_machine_check
  1352. END(machine_check)
  1353. #endif
  1354. /* Call softirq on interrupt stack. Interrupts are off. */
  1355. ENTRY(call_softirq)
  1356. CFI_STARTPROC
  1357. push %rbp
  1358. CFI_ADJUST_CFA_OFFSET 8
  1359. CFI_REL_OFFSET rbp,0
  1360. mov %rsp,%rbp
  1361. CFI_DEF_CFA_REGISTER rbp
  1362. incl %gs:pda_irqcount
  1363. cmove %gs:pda_irqstackptr,%rsp
  1364. push %rbp # backlink for old unwinder
  1365. call __do_softirq
  1366. leaveq
  1367. CFI_DEF_CFA_REGISTER rsp
  1368. CFI_ADJUST_CFA_OFFSET -8
  1369. decl %gs:pda_irqcount
  1370. ret
  1371. CFI_ENDPROC
  1372. ENDPROC(call_softirq)
  1373. KPROBE_ENTRY(ignore_sysret)
  1374. CFI_STARTPROC
  1375. mov $-ENOSYS,%eax
  1376. sysret
  1377. CFI_ENDPROC
  1378. ENDPROC(ignore_sysret)
  1379. #ifdef CONFIG_XEN
  1380. ENTRY(xen_hypervisor_callback)
  1381. zeroentry xen_do_hypervisor_callback
  1382. END(xen_hypervisor_callback)
  1383. /*
  1384. # A note on the "critical region" in our callback handler.
  1385. # We want to avoid stacking callback handlers due to events occurring
  1386. # during handling of the last event. To do this, we keep events disabled
  1387. # until we've done all processing. HOWEVER, we must enable events before
  1388. # popping the stack frame (can't be done atomically) and so it would still
  1389. # be possible to get enough handler activations to overflow the stack.
  1390. # Although unlikely, bugs of that kind are hard to track down, so we'd
  1391. # like to avoid the possibility.
  1392. # So, on entry to the handler we detect whether we interrupted an
  1393. # existing activation in its critical region -- if so, we pop the current
  1394. # activation and restart the handler using the previous one.
  1395. */
  1396. ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
  1397. CFI_STARTPROC
  1398. /* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
  1399. see the correct pointer to the pt_regs */
  1400. movq %rdi, %rsp # we don't return, adjust the stack frame
  1401. CFI_ENDPROC
  1402. DEFAULT_FRAME
  1403. 11: incl %gs:pda_irqcount
  1404. movq %rsp,%rbp
  1405. CFI_DEF_CFA_REGISTER rbp
  1406. cmovzq %gs:pda_irqstackptr,%rsp
  1407. pushq %rbp # backlink for old unwinder
  1408. call xen_evtchn_do_upcall
  1409. popq %rsp
  1410. CFI_DEF_CFA_REGISTER rsp
  1411. decl %gs:pda_irqcount
  1412. jmp error_exit
  1413. CFI_ENDPROC
  1414. END(do_hypervisor_callback)
  1415. /*
  1416. # Hypervisor uses this for application faults while it executes.
  1417. # We get here for two reasons:
  1418. # 1. Fault while reloading DS, ES, FS or GS
  1419. # 2. Fault while executing IRET
  1420. # Category 1 we do not need to fix up as Xen has already reloaded all segment
  1421. # registers that could be reloaded and zeroed the others.
  1422. # Category 2 we fix up by killing the current process. We cannot use the
  1423. # normal Linux return path in this case because if we use the IRET hypercall
  1424. # to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  1425. # We distinguish between categories by comparing each saved segment register
  1426. # with its current contents: any discrepancy means we in category 1.
  1427. */
  1428. ENTRY(xen_failsafe_callback)
  1429. INTR_FRAME 1 (6*8)
  1430. /*CFI_REL_OFFSET gs,GS*/
  1431. /*CFI_REL_OFFSET fs,FS*/
  1432. /*CFI_REL_OFFSET es,ES*/
  1433. /*CFI_REL_OFFSET ds,DS*/
  1434. CFI_REL_OFFSET r11,8
  1435. CFI_REL_OFFSET rcx,0
  1436. movw %ds,%cx
  1437. cmpw %cx,0x10(%rsp)
  1438. CFI_REMEMBER_STATE
  1439. jne 1f
  1440. movw %es,%cx
  1441. cmpw %cx,0x18(%rsp)
  1442. jne 1f
  1443. movw %fs,%cx
  1444. cmpw %cx,0x20(%rsp)
  1445. jne 1f
  1446. movw %gs,%cx
  1447. cmpw %cx,0x28(%rsp)
  1448. jne 1f
  1449. /* All segments match their saved values => Category 2 (Bad IRET). */
  1450. movq (%rsp),%rcx
  1451. CFI_RESTORE rcx
  1452. movq 8(%rsp),%r11
  1453. CFI_RESTORE r11
  1454. addq $0x30,%rsp
  1455. CFI_ADJUST_CFA_OFFSET -0x30
  1456. pushq_cfi $0 /* RIP */
  1457. pushq_cfi %r11
  1458. pushq_cfi %rcx
  1459. jmp general_protection
  1460. CFI_RESTORE_STATE
  1461. 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
  1462. movq (%rsp),%rcx
  1463. CFI_RESTORE rcx
  1464. movq 8(%rsp),%r11
  1465. CFI_RESTORE r11
  1466. addq $0x30,%rsp
  1467. CFI_ADJUST_CFA_OFFSET -0x30
  1468. pushq_cfi $0
  1469. SAVE_ALL
  1470. jmp error_exit
  1471. CFI_ENDPROC
  1472. END(xen_failsafe_callback)
  1473. #endif /* CONFIG_XEN */