entry_64.S 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. #include <asm/paravirt.h>
  52. #include <asm/ftrace.h>
  53. #include <asm/percpu.h>
  54. /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
  55. #include <linux/elf-em.h>
  56. #define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
  57. #define __AUDIT_ARCH_64BIT 0x80000000
  58. #define __AUDIT_ARCH_LE 0x40000000
  59. .code64
  60. #ifdef CONFIG_FUNCTION_TRACER
  61. #ifdef CONFIG_DYNAMIC_FTRACE
  62. ENTRY(mcount)
  63. retq
  64. END(mcount)
  65. ENTRY(ftrace_caller)
  66. cmpl $0, function_trace_stop
  67. jne ftrace_stub
  68. MCOUNT_SAVE_FRAME
  69. movq 0x38(%rsp), %rdi
  70. movq 8(%rbp), %rsi
  71. subq $MCOUNT_INSN_SIZE, %rdi
  72. .globl ftrace_call
  73. ftrace_call:
  74. call ftrace_stub
  75. MCOUNT_RESTORE_FRAME
  76. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  77. .globl ftrace_graph_call
  78. ftrace_graph_call:
  79. jmp ftrace_stub
  80. #endif
  81. .globl ftrace_stub
  82. ftrace_stub:
  83. retq
  84. END(ftrace_caller)
  85. #else /* ! CONFIG_DYNAMIC_FTRACE */
  86. ENTRY(mcount)
  87. cmpl $0, function_trace_stop
  88. jne ftrace_stub
  89. cmpq $ftrace_stub, ftrace_trace_function
  90. jnz trace
  91. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  92. cmpq $ftrace_stub, ftrace_graph_return
  93. jnz ftrace_graph_caller
  94. cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
  95. jnz ftrace_graph_caller
  96. #endif
  97. .globl ftrace_stub
  98. ftrace_stub:
  99. retq
  100. trace:
  101. MCOUNT_SAVE_FRAME
  102. movq 0x38(%rsp), %rdi
  103. movq 8(%rbp), %rsi
  104. subq $MCOUNT_INSN_SIZE, %rdi
  105. call *ftrace_trace_function
  106. MCOUNT_RESTORE_FRAME
  107. jmp ftrace_stub
  108. END(mcount)
  109. #endif /* CONFIG_DYNAMIC_FTRACE */
  110. #endif /* CONFIG_FUNCTION_TRACER */
  111. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  112. ENTRY(ftrace_graph_caller)
  113. cmpl $0, function_trace_stop
  114. jne ftrace_stub
  115. MCOUNT_SAVE_FRAME
  116. leaq 8(%rbp), %rdi
  117. movq 0x38(%rsp), %rsi
  118. subq $MCOUNT_INSN_SIZE, %rsi
  119. call prepare_ftrace_return
  120. MCOUNT_RESTORE_FRAME
  121. retq
  122. END(ftrace_graph_caller)
  123. .globl return_to_handler
  124. return_to_handler:
  125. subq $80, %rsp
  126. movq %rax, (%rsp)
  127. movq %rcx, 8(%rsp)
  128. movq %rdx, 16(%rsp)
  129. movq %rsi, 24(%rsp)
  130. movq %rdi, 32(%rsp)
  131. movq %r8, 40(%rsp)
  132. movq %r9, 48(%rsp)
  133. movq %r10, 56(%rsp)
  134. movq %r11, 64(%rsp)
  135. call ftrace_return_to_handler
  136. movq %rax, 72(%rsp)
  137. movq 64(%rsp), %r11
  138. movq 56(%rsp), %r10
  139. movq 48(%rsp), %r9
  140. movq 40(%rsp), %r8
  141. movq 32(%rsp), %rdi
  142. movq 24(%rsp), %rsi
  143. movq 16(%rsp), %rdx
  144. movq 8(%rsp), %rcx
  145. movq (%rsp), %rax
  146. addq $72, %rsp
  147. retq
  148. #endif
  149. #ifndef CONFIG_PREEMPT
  150. #define retint_kernel retint_restore_args
  151. #endif
  152. #ifdef CONFIG_PARAVIRT
  153. ENTRY(native_usergs_sysret64)
  154. swapgs
  155. sysretq
  156. #endif /* CONFIG_PARAVIRT */
  157. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  158. #ifdef CONFIG_TRACE_IRQFLAGS
  159. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  160. jnc 1f
  161. TRACE_IRQS_ON
  162. 1:
  163. #endif
  164. .endm
  165. /*
  166. * C code is not supposed to know about undefined top of stack. Every time
  167. * a C function with an pt_regs argument is called from the SYSCALL based
  168. * fast path FIXUP_TOP_OF_STACK is needed.
  169. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  170. * manipulation.
  171. */
  172. /* %rsp:at FRAMEEND */
  173. .macro FIXUP_TOP_OF_STACK tmp offset=0
  174. movq PER_CPU_VAR(old_rsp),\tmp
  175. movq \tmp,RSP+\offset(%rsp)
  176. movq $__USER_DS,SS+\offset(%rsp)
  177. movq $__USER_CS,CS+\offset(%rsp)
  178. movq $-1,RCX+\offset(%rsp)
  179. movq R11+\offset(%rsp),\tmp /* get eflags */
  180. movq \tmp,EFLAGS+\offset(%rsp)
  181. .endm
  182. .macro RESTORE_TOP_OF_STACK tmp offset=0
  183. movq RSP+\offset(%rsp),\tmp
  184. movq \tmp,PER_CPU_VAR(old_rsp)
  185. movq EFLAGS+\offset(%rsp),\tmp
  186. movq \tmp,R11+\offset(%rsp)
  187. .endm
  188. .macro FAKE_STACK_FRAME child_rip
  189. /* push in order ss, rsp, eflags, cs, rip */
  190. xorl %eax, %eax
  191. pushq $__KERNEL_DS /* ss */
  192. CFI_ADJUST_CFA_OFFSET 8
  193. /*CFI_REL_OFFSET ss,0*/
  194. pushq %rax /* rsp */
  195. CFI_ADJUST_CFA_OFFSET 8
  196. CFI_REL_OFFSET rsp,0
  197. pushq $X86_EFLAGS_IF /* eflags - interrupts on */
  198. CFI_ADJUST_CFA_OFFSET 8
  199. /*CFI_REL_OFFSET rflags,0*/
  200. pushq $__KERNEL_CS /* cs */
  201. CFI_ADJUST_CFA_OFFSET 8
  202. /*CFI_REL_OFFSET cs,0*/
  203. pushq \child_rip /* rip */
  204. CFI_ADJUST_CFA_OFFSET 8
  205. CFI_REL_OFFSET rip,0
  206. pushq %rax /* orig rax */
  207. CFI_ADJUST_CFA_OFFSET 8
  208. .endm
  209. .macro UNFAKE_STACK_FRAME
  210. addq $8*6, %rsp
  211. CFI_ADJUST_CFA_OFFSET -(6*8)
  212. .endm
  213. /*
  214. * initial frame state for interrupts (and exceptions without error code)
  215. */
  216. .macro EMPTY_FRAME start=1 offset=0
  217. .if \start
  218. CFI_STARTPROC simple
  219. CFI_SIGNAL_FRAME
  220. CFI_DEF_CFA rsp,8+\offset
  221. .else
  222. CFI_DEF_CFA_OFFSET 8+\offset
  223. .endif
  224. .endm
  225. /*
  226. * initial frame state for interrupts (and exceptions without error code)
  227. */
  228. .macro INTR_FRAME start=1 offset=0
  229. EMPTY_FRAME \start, SS+8+\offset-RIP
  230. /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
  231. CFI_REL_OFFSET rsp, RSP+\offset-RIP
  232. /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
  233. /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
  234. CFI_REL_OFFSET rip, RIP+\offset-RIP
  235. .endm
  236. /*
  237. * initial frame state for exceptions with error code (and interrupts
  238. * with vector already pushed)
  239. */
  240. .macro XCPT_FRAME start=1 offset=0
  241. INTR_FRAME \start, RIP+\offset-ORIG_RAX
  242. /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
  243. .endm
  244. /*
  245. * frame that enables calling into C.
  246. */
  247. .macro PARTIAL_FRAME start=1 offset=0
  248. XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
  249. CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
  250. CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
  251. CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
  252. CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
  253. CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
  254. CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
  255. CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
  256. CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
  257. CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
  258. .endm
  259. /*
  260. * frame that enables passing a complete pt_regs to a C function.
  261. */
  262. .macro DEFAULT_FRAME start=1 offset=0
  263. PARTIAL_FRAME \start, R11+\offset-R15
  264. CFI_REL_OFFSET rbx, RBX+\offset
  265. CFI_REL_OFFSET rbp, RBP+\offset
  266. CFI_REL_OFFSET r12, R12+\offset
  267. CFI_REL_OFFSET r13, R13+\offset
  268. CFI_REL_OFFSET r14, R14+\offset
  269. CFI_REL_OFFSET r15, R15+\offset
  270. .endm
  271. /* save partial stack frame */
  272. ENTRY(save_args)
  273. XCPT_FRAME
  274. cld
  275. movq_cfi rdi, RDI+16-ARGOFFSET
  276. movq_cfi rsi, RSI+16-ARGOFFSET
  277. movq_cfi rdx, RDX+16-ARGOFFSET
  278. movq_cfi rcx, RCX+16-ARGOFFSET
  279. movq_cfi rax, RAX+16-ARGOFFSET
  280. movq_cfi r8, R8+16-ARGOFFSET
  281. movq_cfi r9, R9+16-ARGOFFSET
  282. movq_cfi r10, R10+16-ARGOFFSET
  283. movq_cfi r11, R11+16-ARGOFFSET
  284. leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
  285. movq_cfi rbp, 8 /* push %rbp */
  286. leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
  287. testl $3, CS(%rdi)
  288. je 1f
  289. SWAPGS
  290. /*
  291. * irq_count is used to check if a CPU is already on an interrupt stack
  292. * or not. While this is essentially redundant with preempt_count it is
  293. * a little cheaper to use a separate counter in the PDA (short of
  294. * moving irq_enter into assembly, which would be too much work)
  295. */
  296. 1: incl PER_CPU_VAR(irq_count)
  297. jne 2f
  298. popq_cfi %rax /* move return address... */
  299. mov PER_CPU_VAR(irq_stack_ptr),%rsp
  300. EMPTY_FRAME 0
  301. pushq_cfi %rax /* ... to the new stack */
  302. /*
  303. * We entered an interrupt context - irqs are off:
  304. */
  305. 2: TRACE_IRQS_OFF
  306. ret
  307. CFI_ENDPROC
  308. END(save_args)
  309. ENTRY(save_rest)
  310. PARTIAL_FRAME 1 REST_SKIP+8
  311. movq 5*8+16(%rsp), %r11 /* save return address */
  312. movq_cfi rbx, RBX+16
  313. movq_cfi rbp, RBP+16
  314. movq_cfi r12, R12+16
  315. movq_cfi r13, R13+16
  316. movq_cfi r14, R14+16
  317. movq_cfi r15, R15+16
  318. movq %r11, 8(%rsp) /* return address */
  319. FIXUP_TOP_OF_STACK %r11, 16
  320. ret
  321. CFI_ENDPROC
  322. END(save_rest)
  323. /* save complete stack frame */
  324. ENTRY(save_paranoid)
  325. XCPT_FRAME 1 RDI+8
  326. cld
  327. movq_cfi rdi, RDI+8
  328. movq_cfi rsi, RSI+8
  329. movq_cfi rdx, RDX+8
  330. movq_cfi rcx, RCX+8
  331. movq_cfi rax, RAX+8
  332. movq_cfi r8, R8+8
  333. movq_cfi r9, R9+8
  334. movq_cfi r10, R10+8
  335. movq_cfi r11, R11+8
  336. movq_cfi rbx, RBX+8
  337. movq_cfi rbp, RBP+8
  338. movq_cfi r12, R12+8
  339. movq_cfi r13, R13+8
  340. movq_cfi r14, R14+8
  341. movq_cfi r15, R15+8
  342. movl $1,%ebx
  343. movl $MSR_GS_BASE,%ecx
  344. rdmsr
  345. testl %edx,%edx
  346. js 1f /* negative -> in kernel */
  347. SWAPGS
  348. xorl %ebx,%ebx
  349. 1: ret
  350. CFI_ENDPROC
  351. END(save_paranoid)
  352. /*
  353. * A newly forked process directly context switches into this address.
  354. *
  355. * rdi: prev task we switched from
  356. */
  357. ENTRY(ret_from_fork)
  358. DEFAULT_FRAME
  359. push kernel_eflags(%rip)
  360. CFI_ADJUST_CFA_OFFSET 8
  361. popf # reset kernel eflags
  362. CFI_ADJUST_CFA_OFFSET -8
  363. call schedule_tail # rdi: 'prev' task parameter
  364. GET_THREAD_INFO(%rcx)
  365. CFI_REMEMBER_STATE
  366. RESTORE_REST
  367. testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
  368. je int_ret_from_sys_call
  369. testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
  370. jnz int_ret_from_sys_call
  371. RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
  372. jmp ret_from_sys_call # go to the SYSRET fastpath
  373. CFI_RESTORE_STATE
  374. CFI_ENDPROC
  375. END(ret_from_fork)
  376. /*
  377. * System call entry. Upto 6 arguments in registers are supported.
  378. *
  379. * SYSCALL does not save anything on the stack and does not change the
  380. * stack pointer.
  381. */
  382. /*
  383. * Register setup:
  384. * rax system call number
  385. * rdi arg0
  386. * rcx return address for syscall/sysret, C arg3
  387. * rsi arg1
  388. * rdx arg2
  389. * r10 arg3 (--> moved to rcx for C)
  390. * r8 arg4
  391. * r9 arg5
  392. * r11 eflags for syscall/sysret, temporary for C
  393. * r12-r15,rbp,rbx saved by C code, not touched.
  394. *
  395. * Interrupts are off on entry.
  396. * Only called from user space.
  397. *
  398. * XXX if we had a free scratch register we could save the RSP into the stack frame
  399. * and report it properly in ps. Unfortunately we haven't.
  400. *
  401. * When user can change the frames always force IRET. That is because
  402. * it deals with uncanonical addresses better. SYSRET has trouble
  403. * with them due to bugs in both AMD and Intel CPUs.
  404. */
  405. ENTRY(system_call)
  406. CFI_STARTPROC simple
  407. CFI_SIGNAL_FRAME
  408. CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
  409. CFI_REGISTER rip,rcx
  410. /*CFI_REGISTER rflags,r11*/
  411. SWAPGS_UNSAFE_STACK
  412. /*
  413. * A hypervisor implementation might want to use a label
  414. * after the swapgs, so that it can do the swapgs
  415. * for the guest and jump here on syscall.
  416. */
  417. ENTRY(system_call_after_swapgs)
  418. movq %rsp,PER_CPU_VAR(old_rsp)
  419. movq PER_CPU_VAR(kernel_stack),%rsp
  420. /*
  421. * No need to follow this irqs off/on section - it's straight
  422. * and short:
  423. */
  424. ENABLE_INTERRUPTS(CLBR_NONE)
  425. SAVE_ARGS 8,1
  426. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  427. movq %rcx,RIP-ARGOFFSET(%rsp)
  428. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  429. GET_THREAD_INFO(%rcx)
  430. testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
  431. jnz tracesys
  432. system_call_fastpath:
  433. cmpq $__NR_syscall_max,%rax
  434. ja badsys
  435. movq %r10,%rcx
  436. call *sys_call_table(,%rax,8) # XXX: rip relative
  437. movq %rax,RAX-ARGOFFSET(%rsp)
  438. /*
  439. * Syscall return path ending with SYSRET (fast path)
  440. * Has incomplete stack frame and undefined top of stack.
  441. */
  442. ret_from_sys_call:
  443. movl $_TIF_ALLWORK_MASK,%edi
  444. /* edi: flagmask */
  445. sysret_check:
  446. LOCKDEP_SYS_EXIT
  447. GET_THREAD_INFO(%rcx)
  448. DISABLE_INTERRUPTS(CLBR_NONE)
  449. TRACE_IRQS_OFF
  450. movl TI_flags(%rcx),%edx
  451. andl %edi,%edx
  452. jnz sysret_careful
  453. CFI_REMEMBER_STATE
  454. /*
  455. * sysretq will re-enable interrupts:
  456. */
  457. TRACE_IRQS_ON
  458. movq RIP-ARGOFFSET(%rsp),%rcx
  459. CFI_REGISTER rip,rcx
  460. RESTORE_ARGS 0,-ARG_SKIP,1
  461. /*CFI_REGISTER rflags,r11*/
  462. movq PER_CPU_VAR(old_rsp), %rsp
  463. USERGS_SYSRET64
  464. CFI_RESTORE_STATE
  465. /* Handle reschedules */
  466. /* edx: work, edi: workmask */
  467. sysret_careful:
  468. bt $TIF_NEED_RESCHED,%edx
  469. jnc sysret_signal
  470. TRACE_IRQS_ON
  471. ENABLE_INTERRUPTS(CLBR_NONE)
  472. pushq %rdi
  473. CFI_ADJUST_CFA_OFFSET 8
  474. call schedule
  475. popq %rdi
  476. CFI_ADJUST_CFA_OFFSET -8
  477. jmp sysret_check
  478. /* Handle a signal */
  479. sysret_signal:
  480. TRACE_IRQS_ON
  481. ENABLE_INTERRUPTS(CLBR_NONE)
  482. #ifdef CONFIG_AUDITSYSCALL
  483. bt $TIF_SYSCALL_AUDIT,%edx
  484. jc sysret_audit
  485. #endif
  486. /* edx: work flags (arg3) */
  487. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  488. xorl %esi,%esi # oldset -> arg2
  489. SAVE_REST
  490. FIXUP_TOP_OF_STACK %r11
  491. call do_notify_resume
  492. RESTORE_TOP_OF_STACK %r11
  493. RESTORE_REST
  494. movl $_TIF_WORK_MASK,%edi
  495. /* Use IRET because user could have changed frame. This
  496. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  497. DISABLE_INTERRUPTS(CLBR_NONE)
  498. TRACE_IRQS_OFF
  499. jmp int_with_check
  500. badsys:
  501. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  502. jmp ret_from_sys_call
  503. #ifdef CONFIG_AUDITSYSCALL
  504. /*
  505. * Fast path for syscall audit without full syscall trace.
  506. * We just call audit_syscall_entry() directly, and then
  507. * jump back to the normal fast path.
  508. */
  509. auditsys:
  510. movq %r10,%r9 /* 6th arg: 4th syscall arg */
  511. movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
  512. movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
  513. movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
  514. movq %rax,%rsi /* 2nd arg: syscall number */
  515. movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
  516. call audit_syscall_entry
  517. LOAD_ARGS 0 /* reload call-clobbered registers */
  518. jmp system_call_fastpath
  519. /*
  520. * Return fast path for syscall audit. Call audit_syscall_exit()
  521. * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
  522. * masked off.
  523. */
  524. sysret_audit:
  525. movq %rax,%rsi /* second arg, syscall return value */
  526. cmpq $0,%rax /* is it < 0? */
  527. setl %al /* 1 if so, 0 if not */
  528. movzbl %al,%edi /* zero-extend that into %edi */
  529. inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
  530. call audit_syscall_exit
  531. movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
  532. jmp sysret_check
  533. #endif /* CONFIG_AUDITSYSCALL */
  534. /* Do syscall tracing */
  535. tracesys:
  536. #ifdef CONFIG_AUDITSYSCALL
  537. testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
  538. jz auditsys
  539. #endif
  540. SAVE_REST
  541. movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
  542. FIXUP_TOP_OF_STACK %rdi
  543. movq %rsp,%rdi
  544. call syscall_trace_enter
  545. /*
  546. * Reload arg registers from stack in case ptrace changed them.
  547. * We don't reload %rax because syscall_trace_enter() returned
  548. * the value it wants us to use in the table lookup.
  549. */
  550. LOAD_ARGS ARGOFFSET, 1
  551. RESTORE_REST
  552. cmpq $__NR_syscall_max,%rax
  553. ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
  554. movq %r10,%rcx /* fixup for C */
  555. call *sys_call_table(,%rax,8)
  556. movq %rax,RAX-ARGOFFSET(%rsp)
  557. /* Use IRET because user could have changed frame */
  558. /*
  559. * Syscall return path ending with IRET.
  560. * Has correct top of stack, but partial stack frame.
  561. */
  562. .globl int_ret_from_sys_call
  563. .globl int_with_check
  564. int_ret_from_sys_call:
  565. DISABLE_INTERRUPTS(CLBR_NONE)
  566. TRACE_IRQS_OFF
  567. testl $3,CS-ARGOFFSET(%rsp)
  568. je retint_restore_args
  569. movl $_TIF_ALLWORK_MASK,%edi
  570. /* edi: mask to check */
  571. int_with_check:
  572. LOCKDEP_SYS_EXIT_IRQ
  573. GET_THREAD_INFO(%rcx)
  574. movl TI_flags(%rcx),%edx
  575. andl %edi,%edx
  576. jnz int_careful
  577. andl $~TS_COMPAT,TI_status(%rcx)
  578. jmp retint_swapgs
  579. /* Either reschedule or signal or syscall exit tracking needed. */
  580. /* First do a reschedule test. */
  581. /* edx: work, edi: workmask */
  582. int_careful:
  583. bt $TIF_NEED_RESCHED,%edx
  584. jnc int_very_careful
  585. TRACE_IRQS_ON
  586. ENABLE_INTERRUPTS(CLBR_NONE)
  587. pushq %rdi
  588. CFI_ADJUST_CFA_OFFSET 8
  589. call schedule
  590. popq %rdi
  591. CFI_ADJUST_CFA_OFFSET -8
  592. DISABLE_INTERRUPTS(CLBR_NONE)
  593. TRACE_IRQS_OFF
  594. jmp int_with_check
  595. /* handle signals and tracing -- both require a full stack frame */
  596. int_very_careful:
  597. TRACE_IRQS_ON
  598. ENABLE_INTERRUPTS(CLBR_NONE)
  599. SAVE_REST
  600. /* Check for syscall exit trace */
  601. testl $_TIF_WORK_SYSCALL_EXIT,%edx
  602. jz int_signal
  603. pushq %rdi
  604. CFI_ADJUST_CFA_OFFSET 8
  605. leaq 8(%rsp),%rdi # &ptregs -> arg1
  606. call syscall_trace_leave
  607. popq %rdi
  608. CFI_ADJUST_CFA_OFFSET -8
  609. andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
  610. jmp int_restore_rest
  611. int_signal:
  612. testl $_TIF_DO_NOTIFY_MASK,%edx
  613. jz 1f
  614. movq %rsp,%rdi # &ptregs -> arg1
  615. xorl %esi,%esi # oldset -> arg2
  616. call do_notify_resume
  617. 1: movl $_TIF_WORK_MASK,%edi
  618. int_restore_rest:
  619. RESTORE_REST
  620. DISABLE_INTERRUPTS(CLBR_NONE)
  621. TRACE_IRQS_OFF
  622. jmp int_with_check
  623. CFI_ENDPROC
  624. END(system_call)
  625. /*
  626. * Certain special system calls that need to save a complete full stack frame.
  627. */
  628. .macro PTREGSCALL label,func,arg
  629. ENTRY(\label)
  630. PARTIAL_FRAME 1 8 /* offset 8: return address */
  631. subq $REST_SKIP, %rsp
  632. CFI_ADJUST_CFA_OFFSET REST_SKIP
  633. call save_rest
  634. DEFAULT_FRAME 0 8 /* offset 8: return address */
  635. leaq 8(%rsp), \arg /* pt_regs pointer */
  636. call \func
  637. jmp ptregscall_common
  638. CFI_ENDPROC
  639. END(\label)
  640. .endm
  641. PTREGSCALL stub_clone, sys_clone, %r8
  642. PTREGSCALL stub_fork, sys_fork, %rdi
  643. PTREGSCALL stub_vfork, sys_vfork, %rdi
  644. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  645. PTREGSCALL stub_iopl, sys_iopl, %rsi
  646. ENTRY(ptregscall_common)
  647. DEFAULT_FRAME 1 8 /* offset 8: return address */
  648. RESTORE_TOP_OF_STACK %r11, 8
  649. movq_cfi_restore R15+8, r15
  650. movq_cfi_restore R14+8, r14
  651. movq_cfi_restore R13+8, r13
  652. movq_cfi_restore R12+8, r12
  653. movq_cfi_restore RBP+8, rbp
  654. movq_cfi_restore RBX+8, rbx
  655. ret $REST_SKIP /* pop extended registers */
  656. CFI_ENDPROC
  657. END(ptregscall_common)
  658. ENTRY(stub_execve)
  659. CFI_STARTPROC
  660. popq %r11
  661. CFI_ADJUST_CFA_OFFSET -8
  662. CFI_REGISTER rip, r11
  663. SAVE_REST
  664. FIXUP_TOP_OF_STACK %r11
  665. movq %rsp, %rcx
  666. call sys_execve
  667. RESTORE_TOP_OF_STACK %r11
  668. movq %rax,RAX(%rsp)
  669. RESTORE_REST
  670. jmp int_ret_from_sys_call
  671. CFI_ENDPROC
  672. END(stub_execve)
  673. /*
  674. * sigreturn is special because it needs to restore all registers on return.
  675. * This cannot be done with SYSRET, so use the IRET return path instead.
  676. */
  677. ENTRY(stub_rt_sigreturn)
  678. CFI_STARTPROC
  679. addq $8, %rsp
  680. CFI_ADJUST_CFA_OFFSET -8
  681. SAVE_REST
  682. movq %rsp,%rdi
  683. FIXUP_TOP_OF_STACK %r11
  684. call sys_rt_sigreturn
  685. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  686. RESTORE_REST
  687. jmp int_ret_from_sys_call
  688. CFI_ENDPROC
  689. END(stub_rt_sigreturn)
  690. /*
  691. * Build the entry stubs and pointer table with some assembler magic.
  692. * We pack 7 stubs into a single 32-byte chunk, which will fit in a
  693. * single cache line on all modern x86 implementations.
  694. */
  695. .section .init.rodata,"a"
  696. ENTRY(interrupt)
  697. .text
  698. .p2align 5
  699. .p2align CONFIG_X86_L1_CACHE_SHIFT
  700. ENTRY(irq_entries_start)
  701. INTR_FRAME
  702. vector=FIRST_EXTERNAL_VECTOR
  703. .rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
  704. .balign 32
  705. .rept 7
  706. .if vector < NR_VECTORS
  707. .if vector <> FIRST_EXTERNAL_VECTOR
  708. CFI_ADJUST_CFA_OFFSET -8
  709. .endif
  710. 1: pushq $(~vector+0x80) /* Note: always in signed byte range */
  711. CFI_ADJUST_CFA_OFFSET 8
  712. .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
  713. jmp 2f
  714. .endif
  715. .previous
  716. .quad 1b
  717. .text
  718. vector=vector+1
  719. .endif
  720. .endr
  721. 2: jmp common_interrupt
  722. .endr
  723. CFI_ENDPROC
  724. END(irq_entries_start)
  725. .previous
  726. END(interrupt)
  727. .previous
  728. /*
  729. * Interrupt entry/exit.
  730. *
  731. * Interrupt entry points save only callee clobbered registers in fast path.
  732. *
  733. * Entry runs with interrupts off.
  734. */
  735. /* 0(%rsp): ~(interrupt number) */
  736. .macro interrupt func
  737. subq $10*8, %rsp
  738. CFI_ADJUST_CFA_OFFSET 10*8
  739. call save_args
  740. PARTIAL_FRAME 0
  741. call \func
  742. .endm
  743. /*
  744. * The interrupt stubs push (~vector+0x80) onto the stack and
  745. * then jump to common_interrupt.
  746. */
  747. .p2align CONFIG_X86_L1_CACHE_SHIFT
  748. common_interrupt:
  749. XCPT_FRAME
  750. addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
  751. interrupt do_IRQ
  752. /* 0(%rsp): old_rsp-ARGOFFSET */
  753. ret_from_intr:
  754. DISABLE_INTERRUPTS(CLBR_NONE)
  755. TRACE_IRQS_OFF
  756. decl PER_CPU_VAR(irq_count)
  757. leaveq
  758. CFI_DEF_CFA_REGISTER rsp
  759. CFI_ADJUST_CFA_OFFSET -8
  760. exit_intr:
  761. GET_THREAD_INFO(%rcx)
  762. testl $3,CS-ARGOFFSET(%rsp)
  763. je retint_kernel
  764. /* Interrupt came from user space */
  765. /*
  766. * Has a correct top of stack, but a partial stack frame
  767. * %rcx: thread info. Interrupts off.
  768. */
  769. retint_with_reschedule:
  770. movl $_TIF_WORK_MASK,%edi
  771. retint_check:
  772. LOCKDEP_SYS_EXIT_IRQ
  773. movl TI_flags(%rcx),%edx
  774. andl %edi,%edx
  775. CFI_REMEMBER_STATE
  776. jnz retint_careful
  777. retint_swapgs: /* return to user-space */
  778. /*
  779. * The iretq could re-enable interrupts:
  780. */
  781. DISABLE_INTERRUPTS(CLBR_ANY)
  782. TRACE_IRQS_IRETQ
  783. SWAPGS
  784. jmp restore_args
  785. retint_restore_args: /* return to kernel space */
  786. DISABLE_INTERRUPTS(CLBR_ANY)
  787. /*
  788. * The iretq could re-enable interrupts:
  789. */
  790. TRACE_IRQS_IRETQ
  791. restore_args:
  792. RESTORE_ARGS 0,8,0
  793. irq_return:
  794. INTERRUPT_RETURN
  795. .section __ex_table, "a"
  796. .quad irq_return, bad_iret
  797. .previous
  798. #ifdef CONFIG_PARAVIRT
  799. ENTRY(native_iret)
  800. iretq
  801. .section __ex_table,"a"
  802. .quad native_iret, bad_iret
  803. .previous
  804. #endif
  805. .section .fixup,"ax"
  806. bad_iret:
  807. /*
  808. * The iret traps when the %cs or %ss being restored is bogus.
  809. * We've lost the original trap vector and error code.
  810. * #GPF is the most likely one to get for an invalid selector.
  811. * So pretend we completed the iret and took the #GPF in user mode.
  812. *
  813. * We are now running with the kernel GS after exception recovery.
  814. * But error_entry expects us to have user GS to match the user %cs,
  815. * so swap back.
  816. */
  817. pushq $0
  818. SWAPGS
  819. jmp general_protection
  820. .previous
  821. /* edi: workmask, edx: work */
  822. retint_careful:
  823. CFI_RESTORE_STATE
  824. bt $TIF_NEED_RESCHED,%edx
  825. jnc retint_signal
  826. TRACE_IRQS_ON
  827. ENABLE_INTERRUPTS(CLBR_NONE)
  828. pushq %rdi
  829. CFI_ADJUST_CFA_OFFSET 8
  830. call schedule
  831. popq %rdi
  832. CFI_ADJUST_CFA_OFFSET -8
  833. GET_THREAD_INFO(%rcx)
  834. DISABLE_INTERRUPTS(CLBR_NONE)
  835. TRACE_IRQS_OFF
  836. jmp retint_check
  837. retint_signal:
  838. testl $_TIF_DO_NOTIFY_MASK,%edx
  839. jz retint_swapgs
  840. TRACE_IRQS_ON
  841. ENABLE_INTERRUPTS(CLBR_NONE)
  842. SAVE_REST
  843. movq $-1,ORIG_RAX(%rsp)
  844. xorl %esi,%esi # oldset
  845. movq %rsp,%rdi # &pt_regs
  846. call do_notify_resume
  847. RESTORE_REST
  848. DISABLE_INTERRUPTS(CLBR_NONE)
  849. TRACE_IRQS_OFF
  850. GET_THREAD_INFO(%rcx)
  851. jmp retint_with_reschedule
  852. #ifdef CONFIG_PREEMPT
  853. /* Returning to kernel space. Check if we need preemption */
  854. /* rcx: threadinfo. interrupts off. */
  855. ENTRY(retint_kernel)
  856. cmpl $0,TI_preempt_count(%rcx)
  857. jnz retint_restore_args
  858. bt $TIF_NEED_RESCHED,TI_flags(%rcx)
  859. jnc retint_restore_args
  860. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  861. jnc retint_restore_args
  862. call preempt_schedule_irq
  863. jmp exit_intr
  864. #endif
  865. CFI_ENDPROC
  866. END(common_interrupt)
  867. /*
  868. * APIC interrupts.
  869. */
  870. .macro apicinterrupt num sym do_sym
  871. ENTRY(\sym)
  872. INTR_FRAME
  873. pushq $~(\num)
  874. CFI_ADJUST_CFA_OFFSET 8
  875. interrupt \do_sym
  876. jmp ret_from_intr
  877. CFI_ENDPROC
  878. END(\sym)
  879. .endm
  880. #ifdef CONFIG_SMP
  881. apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
  882. irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
  883. #endif
  884. #ifdef CONFIG_X86_UV
  885. apicinterrupt UV_BAU_MESSAGE \
  886. uv_bau_message_intr1 uv_bau_message_interrupt
  887. #endif
  888. apicinterrupt LOCAL_TIMER_VECTOR \
  889. apic_timer_interrupt smp_apic_timer_interrupt
  890. #ifdef CONFIG_SMP
  891. apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
  892. invalidate_interrupt0 smp_invalidate_interrupt
  893. apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \
  894. invalidate_interrupt1 smp_invalidate_interrupt
  895. apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
  896. invalidate_interrupt2 smp_invalidate_interrupt
  897. apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
  898. invalidate_interrupt3 smp_invalidate_interrupt
  899. apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
  900. invalidate_interrupt4 smp_invalidate_interrupt
  901. apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \
  902. invalidate_interrupt5 smp_invalidate_interrupt
  903. apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \
  904. invalidate_interrupt6 smp_invalidate_interrupt
  905. apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
  906. invalidate_interrupt7 smp_invalidate_interrupt
  907. #endif
  908. apicinterrupt THRESHOLD_APIC_VECTOR \
  909. threshold_interrupt mce_threshold_interrupt
  910. apicinterrupt THERMAL_APIC_VECTOR \
  911. thermal_interrupt smp_thermal_interrupt
  912. #ifdef CONFIG_SMP
  913. apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
  914. call_function_single_interrupt smp_call_function_single_interrupt
  915. apicinterrupt CALL_FUNCTION_VECTOR \
  916. call_function_interrupt smp_call_function_interrupt
  917. apicinterrupt RESCHEDULE_VECTOR \
  918. reschedule_interrupt smp_reschedule_interrupt
  919. #endif
  920. apicinterrupt ERROR_APIC_VECTOR \
  921. error_interrupt smp_error_interrupt
  922. apicinterrupt SPURIOUS_APIC_VECTOR \
  923. spurious_interrupt smp_spurious_interrupt
  924. #ifdef CONFIG_PERF_COUNTERS
  925. apicinterrupt LOCAL_PERF_VECTOR \
  926. perf_counter_interrupt smp_perf_counter_interrupt
  927. #endif
  928. /*
  929. * Exception entry points.
  930. */
  931. .macro zeroentry sym do_sym
  932. ENTRY(\sym)
  933. INTR_FRAME
  934. PARAVIRT_ADJUST_EXCEPTION_FRAME
  935. pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
  936. subq $15*8,%rsp
  937. CFI_ADJUST_CFA_OFFSET 15*8
  938. call error_entry
  939. DEFAULT_FRAME 0
  940. movq %rsp,%rdi /* pt_regs pointer */
  941. xorl %esi,%esi /* no error code */
  942. call \do_sym
  943. jmp error_exit /* %ebx: no swapgs flag */
  944. CFI_ENDPROC
  945. END(\sym)
  946. .endm
  947. .macro paranoidzeroentry sym do_sym
  948. ENTRY(\sym)
  949. INTR_FRAME
  950. PARAVIRT_ADJUST_EXCEPTION_FRAME
  951. pushq $-1 /* ORIG_RAX: no syscall to restart */
  952. CFI_ADJUST_CFA_OFFSET 8
  953. subq $15*8, %rsp
  954. call save_paranoid
  955. TRACE_IRQS_OFF
  956. movq %rsp,%rdi /* pt_regs pointer */
  957. xorl %esi,%esi /* no error code */
  958. call \do_sym
  959. jmp paranoid_exit /* %ebx: no swapgs flag */
  960. CFI_ENDPROC
  961. END(\sym)
  962. .endm
  963. .macro paranoidzeroentry_ist sym do_sym ist
  964. ENTRY(\sym)
  965. INTR_FRAME
  966. PARAVIRT_ADJUST_EXCEPTION_FRAME
  967. pushq $-1 /* ORIG_RAX: no syscall to restart */
  968. CFI_ADJUST_CFA_OFFSET 8
  969. subq $15*8, %rsp
  970. call save_paranoid
  971. TRACE_IRQS_OFF
  972. movq %rsp,%rdi /* pt_regs pointer */
  973. xorl %esi,%esi /* no error code */
  974. PER_CPU(init_tss, %rbp)
  975. subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
  976. call \do_sym
  977. addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
  978. jmp paranoid_exit /* %ebx: no swapgs flag */
  979. CFI_ENDPROC
  980. END(\sym)
  981. .endm
  982. .macro errorentry sym do_sym
  983. ENTRY(\sym)
  984. XCPT_FRAME
  985. PARAVIRT_ADJUST_EXCEPTION_FRAME
  986. subq $15*8,%rsp
  987. CFI_ADJUST_CFA_OFFSET 15*8
  988. call error_entry
  989. DEFAULT_FRAME 0
  990. movq %rsp,%rdi /* pt_regs pointer */
  991. movq ORIG_RAX(%rsp),%rsi /* get error code */
  992. movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
  993. call \do_sym
  994. jmp error_exit /* %ebx: no swapgs flag */
  995. CFI_ENDPROC
  996. END(\sym)
  997. .endm
  998. /* error code is on the stack already */
  999. .macro paranoiderrorentry sym do_sym
  1000. ENTRY(\sym)
  1001. XCPT_FRAME
  1002. PARAVIRT_ADJUST_EXCEPTION_FRAME
  1003. subq $15*8,%rsp
  1004. CFI_ADJUST_CFA_OFFSET 15*8
  1005. call save_paranoid
  1006. DEFAULT_FRAME 0
  1007. TRACE_IRQS_OFF
  1008. movq %rsp,%rdi /* pt_regs pointer */
  1009. movq ORIG_RAX(%rsp),%rsi /* get error code */
  1010. movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
  1011. call \do_sym
  1012. jmp paranoid_exit /* %ebx: no swapgs flag */
  1013. CFI_ENDPROC
  1014. END(\sym)
  1015. .endm
  1016. zeroentry divide_error do_divide_error
  1017. zeroentry overflow do_overflow
  1018. zeroentry bounds do_bounds
  1019. zeroentry invalid_op do_invalid_op
  1020. zeroentry device_not_available do_device_not_available
  1021. paranoiderrorentry double_fault do_double_fault
  1022. zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
  1023. errorentry invalid_TSS do_invalid_TSS
  1024. errorentry segment_not_present do_segment_not_present
  1025. zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
  1026. zeroentry coprocessor_error do_coprocessor_error
  1027. errorentry alignment_check do_alignment_check
  1028. zeroentry simd_coprocessor_error do_simd_coprocessor_error
  1029. /* Reload gs selector with exception handling */
  1030. /* edi: new selector */
  1031. ENTRY(native_load_gs_index)
  1032. CFI_STARTPROC
  1033. pushf
  1034. CFI_ADJUST_CFA_OFFSET 8
  1035. DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
  1036. SWAPGS
  1037. gs_change:
  1038. movl %edi,%gs
  1039. 2: mfence /* workaround */
  1040. SWAPGS
  1041. popf
  1042. CFI_ADJUST_CFA_OFFSET -8
  1043. ret
  1044. CFI_ENDPROC
  1045. END(native_load_gs_index)
  1046. .section __ex_table,"a"
  1047. .align 8
  1048. .quad gs_change,bad_gs
  1049. .previous
  1050. .section .fixup,"ax"
  1051. /* running with kernelgs */
  1052. bad_gs:
  1053. SWAPGS /* switch back to user gs */
  1054. xorl %eax,%eax
  1055. movl %eax,%gs
  1056. jmp 2b
  1057. .previous
  1058. /*
  1059. * Create a kernel thread.
  1060. *
  1061. * C extern interface:
  1062. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  1063. *
  1064. * asm input arguments:
  1065. * rdi: fn, rsi: arg, rdx: flags
  1066. */
  1067. ENTRY(kernel_thread)
  1068. CFI_STARTPROC
  1069. FAKE_STACK_FRAME $child_rip
  1070. SAVE_ALL
  1071. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  1072. movq %rdx,%rdi
  1073. orq kernel_thread_flags(%rip),%rdi
  1074. movq $-1, %rsi
  1075. movq %rsp, %rdx
  1076. xorl %r8d,%r8d
  1077. xorl %r9d,%r9d
  1078. # clone now
  1079. call do_fork
  1080. movq %rax,RAX(%rsp)
  1081. xorl %edi,%edi
  1082. /*
  1083. * It isn't worth to check for reschedule here,
  1084. * so internally to the x86_64 port you can rely on kernel_thread()
  1085. * not to reschedule the child before returning, this avoids the need
  1086. * of hacks for example to fork off the per-CPU idle tasks.
  1087. * [Hopefully no generic code relies on the reschedule -AK]
  1088. */
  1089. RESTORE_ALL
  1090. UNFAKE_STACK_FRAME
  1091. ret
  1092. CFI_ENDPROC
  1093. END(kernel_thread)
  1094. ENTRY(child_rip)
  1095. pushq $0 # fake return address
  1096. CFI_STARTPROC
  1097. /*
  1098. * Here we are in the child and the registers are set as they were
  1099. * at kernel_thread() invocation in the parent.
  1100. */
  1101. movq %rdi, %rax
  1102. movq %rsi, %rdi
  1103. call *%rax
  1104. # exit
  1105. mov %eax, %edi
  1106. call do_exit
  1107. ud2 # padding for call trace
  1108. CFI_ENDPROC
  1109. END(child_rip)
  1110. /*
  1111. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  1112. *
  1113. * C extern interface:
  1114. * extern long execve(char *name, char **argv, char **envp)
  1115. *
  1116. * asm input arguments:
  1117. * rdi: name, rsi: argv, rdx: envp
  1118. *
  1119. * We want to fallback into:
  1120. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
  1121. *
  1122. * do_sys_execve asm fallback arguments:
  1123. * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
  1124. */
  1125. ENTRY(kernel_execve)
  1126. CFI_STARTPROC
  1127. FAKE_STACK_FRAME $0
  1128. SAVE_ALL
  1129. movq %rsp,%rcx
  1130. call sys_execve
  1131. movq %rax, RAX(%rsp)
  1132. RESTORE_REST
  1133. testq %rax,%rax
  1134. je int_ret_from_sys_call
  1135. RESTORE_ARGS
  1136. UNFAKE_STACK_FRAME
  1137. ret
  1138. CFI_ENDPROC
  1139. END(kernel_execve)
  1140. /* Call softirq on interrupt stack. Interrupts are off. */
  1141. ENTRY(call_softirq)
  1142. CFI_STARTPROC
  1143. push %rbp
  1144. CFI_ADJUST_CFA_OFFSET 8
  1145. CFI_REL_OFFSET rbp,0
  1146. mov %rsp,%rbp
  1147. CFI_DEF_CFA_REGISTER rbp
  1148. incl PER_CPU_VAR(irq_count)
  1149. cmove PER_CPU_VAR(irq_stack_ptr),%rsp
  1150. push %rbp # backlink for old unwinder
  1151. call __do_softirq
  1152. leaveq
  1153. CFI_DEF_CFA_REGISTER rsp
  1154. CFI_ADJUST_CFA_OFFSET -8
  1155. decl PER_CPU_VAR(irq_count)
  1156. ret
  1157. CFI_ENDPROC
  1158. END(call_softirq)
  1159. #ifdef CONFIG_XEN
  1160. zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
  1161. /*
  1162. * A note on the "critical region" in our callback handler.
  1163. * We want to avoid stacking callback handlers due to events occurring
  1164. * during handling of the last event. To do this, we keep events disabled
  1165. * until we've done all processing. HOWEVER, we must enable events before
  1166. * popping the stack frame (can't be done atomically) and so it would still
  1167. * be possible to get enough handler activations to overflow the stack.
  1168. * Although unlikely, bugs of that kind are hard to track down, so we'd
  1169. * like to avoid the possibility.
  1170. * So, on entry to the handler we detect whether we interrupted an
  1171. * existing activation in its critical region -- if so, we pop the current
  1172. * activation and restart the handler using the previous one.
  1173. */
  1174. ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
  1175. CFI_STARTPROC
  1176. /*
  1177. * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
  1178. * see the correct pointer to the pt_regs
  1179. */
  1180. movq %rdi, %rsp # we don't return, adjust the stack frame
  1181. CFI_ENDPROC
  1182. DEFAULT_FRAME
  1183. 11: incl PER_CPU_VAR(irq_count)
  1184. movq %rsp,%rbp
  1185. CFI_DEF_CFA_REGISTER rbp
  1186. cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
  1187. pushq %rbp # backlink for old unwinder
  1188. call xen_evtchn_do_upcall
  1189. popq %rsp
  1190. CFI_DEF_CFA_REGISTER rsp
  1191. decl PER_CPU_VAR(irq_count)
  1192. jmp error_exit
  1193. CFI_ENDPROC
  1194. END(do_hypervisor_callback)
  1195. /*
  1196. * Hypervisor uses this for application faults while it executes.
  1197. * We get here for two reasons:
  1198. * 1. Fault while reloading DS, ES, FS or GS
  1199. * 2. Fault while executing IRET
  1200. * Category 1 we do not need to fix up as Xen has already reloaded all segment
  1201. * registers that could be reloaded and zeroed the others.
  1202. * Category 2 we fix up by killing the current process. We cannot use the
  1203. * normal Linux return path in this case because if we use the IRET hypercall
  1204. * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
  1205. * We distinguish between categories by comparing each saved segment register
  1206. * with its current contents: any discrepancy means we in category 1.
  1207. */
  1208. ENTRY(xen_failsafe_callback)
  1209. INTR_FRAME 1 (6*8)
  1210. /*CFI_REL_OFFSET gs,GS*/
  1211. /*CFI_REL_OFFSET fs,FS*/
  1212. /*CFI_REL_OFFSET es,ES*/
  1213. /*CFI_REL_OFFSET ds,DS*/
  1214. CFI_REL_OFFSET r11,8
  1215. CFI_REL_OFFSET rcx,0
  1216. movw %ds,%cx
  1217. cmpw %cx,0x10(%rsp)
  1218. CFI_REMEMBER_STATE
  1219. jne 1f
  1220. movw %es,%cx
  1221. cmpw %cx,0x18(%rsp)
  1222. jne 1f
  1223. movw %fs,%cx
  1224. cmpw %cx,0x20(%rsp)
  1225. jne 1f
  1226. movw %gs,%cx
  1227. cmpw %cx,0x28(%rsp)
  1228. jne 1f
  1229. /* All segments match their saved values => Category 2 (Bad IRET). */
  1230. movq (%rsp),%rcx
  1231. CFI_RESTORE rcx
  1232. movq 8(%rsp),%r11
  1233. CFI_RESTORE r11
  1234. addq $0x30,%rsp
  1235. CFI_ADJUST_CFA_OFFSET -0x30
  1236. pushq_cfi $0 /* RIP */
  1237. pushq_cfi %r11
  1238. pushq_cfi %rcx
  1239. jmp general_protection
  1240. CFI_RESTORE_STATE
  1241. 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
  1242. movq (%rsp),%rcx
  1243. CFI_RESTORE rcx
  1244. movq 8(%rsp),%r11
  1245. CFI_RESTORE r11
  1246. addq $0x30,%rsp
  1247. CFI_ADJUST_CFA_OFFSET -0x30
  1248. pushq_cfi $0
  1249. SAVE_ALL
  1250. jmp error_exit
  1251. CFI_ENDPROC
  1252. END(xen_failsafe_callback)
  1253. #endif /* CONFIG_XEN */
  1254. /*
  1255. * Some functions should be protected against kprobes
  1256. */
  1257. .pushsection .kprobes.text, "ax"
  1258. paranoidzeroentry_ist debug do_debug DEBUG_STACK
  1259. paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
  1260. paranoiderrorentry stack_segment do_stack_segment
  1261. errorentry general_protection do_general_protection
  1262. errorentry page_fault do_page_fault
  1263. #ifdef CONFIG_X86_MCE
  1264. paranoidzeroentry machine_check do_machine_check
  1265. #endif
  1266. /*
  1267. * "Paranoid" exit path from exception stack.
  1268. * Paranoid because this is used by NMIs and cannot take
  1269. * any kernel state for granted.
  1270. * We don't do kernel preemption checks here, because only
  1271. * NMI should be common and it does not enable IRQs and
  1272. * cannot get reschedule ticks.
  1273. *
  1274. * "trace" is 0 for the NMI handler only, because irq-tracing
  1275. * is fundamentally NMI-unsafe. (we cannot change the soft and
  1276. * hard flags at once, atomically)
  1277. */
  1278. /* ebx: no swapgs flag */
  1279. ENTRY(paranoid_exit)
  1280. INTR_FRAME
  1281. DISABLE_INTERRUPTS(CLBR_NONE)
  1282. TRACE_IRQS_OFF
  1283. testl %ebx,%ebx /* swapgs needed? */
  1284. jnz paranoid_restore
  1285. testl $3,CS(%rsp)
  1286. jnz paranoid_userspace
  1287. paranoid_swapgs:
  1288. TRACE_IRQS_IRETQ 0
  1289. SWAPGS_UNSAFE_STACK
  1290. paranoid_restore:
  1291. RESTORE_ALL 8
  1292. jmp irq_return
  1293. paranoid_userspace:
  1294. GET_THREAD_INFO(%rcx)
  1295. movl TI_flags(%rcx),%ebx
  1296. andl $_TIF_WORK_MASK,%ebx
  1297. jz paranoid_swapgs
  1298. movq %rsp,%rdi /* &pt_regs */
  1299. call sync_regs
  1300. movq %rax,%rsp /* switch stack for scheduling */
  1301. testl $_TIF_NEED_RESCHED,%ebx
  1302. jnz paranoid_schedule
  1303. movl %ebx,%edx /* arg3: thread flags */
  1304. TRACE_IRQS_ON
  1305. ENABLE_INTERRUPTS(CLBR_NONE)
  1306. xorl %esi,%esi /* arg2: oldset */
  1307. movq %rsp,%rdi /* arg1: &pt_regs */
  1308. call do_notify_resume
  1309. DISABLE_INTERRUPTS(CLBR_NONE)
  1310. TRACE_IRQS_OFF
  1311. jmp paranoid_userspace
  1312. paranoid_schedule:
  1313. TRACE_IRQS_ON
  1314. ENABLE_INTERRUPTS(CLBR_ANY)
  1315. call schedule
  1316. DISABLE_INTERRUPTS(CLBR_ANY)
  1317. TRACE_IRQS_OFF
  1318. jmp paranoid_userspace
  1319. CFI_ENDPROC
  1320. END(paranoid_exit)
  1321. /*
  1322. * Exception entry point. This expects an error code/orig_rax on the stack.
  1323. * returns in "no swapgs flag" in %ebx.
  1324. */
  1325. ENTRY(error_entry)
  1326. XCPT_FRAME
  1327. CFI_ADJUST_CFA_OFFSET 15*8
  1328. /* oldrax contains error code */
  1329. cld
  1330. movq_cfi rdi, RDI+8
  1331. movq_cfi rsi, RSI+8
  1332. movq_cfi rdx, RDX+8
  1333. movq_cfi rcx, RCX+8
  1334. movq_cfi rax, RAX+8
  1335. movq_cfi r8, R8+8
  1336. movq_cfi r9, R9+8
  1337. movq_cfi r10, R10+8
  1338. movq_cfi r11, R11+8
  1339. movq_cfi rbx, RBX+8
  1340. movq_cfi rbp, RBP+8
  1341. movq_cfi r12, R12+8
  1342. movq_cfi r13, R13+8
  1343. movq_cfi r14, R14+8
  1344. movq_cfi r15, R15+8
  1345. xorl %ebx,%ebx
  1346. testl $3,CS+8(%rsp)
  1347. je error_kernelspace
  1348. error_swapgs:
  1349. SWAPGS
  1350. error_sti:
  1351. TRACE_IRQS_OFF
  1352. ret
  1353. CFI_ENDPROC
  1354. /*
  1355. * There are two places in the kernel that can potentially fault with
  1356. * usergs. Handle them here. The exception handlers after iret run with
  1357. * kernel gs again, so don't set the user space flag. B stepping K8s
  1358. * sometimes report an truncated RIP for IRET exceptions returning to
  1359. * compat mode. Check for these here too.
  1360. */
  1361. error_kernelspace:
  1362. incl %ebx
  1363. leaq irq_return(%rip),%rcx
  1364. cmpq %rcx,RIP+8(%rsp)
  1365. je error_swapgs
  1366. movl %ecx,%ecx /* zero extend */
  1367. cmpq %rcx,RIP+8(%rsp)
  1368. je error_swapgs
  1369. cmpq $gs_change,RIP+8(%rsp)
  1370. je error_swapgs
  1371. jmp error_sti
  1372. END(error_entry)
  1373. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  1374. ENTRY(error_exit)
  1375. DEFAULT_FRAME
  1376. movl %ebx,%eax
  1377. RESTORE_REST
  1378. DISABLE_INTERRUPTS(CLBR_NONE)
  1379. TRACE_IRQS_OFF
  1380. GET_THREAD_INFO(%rcx)
  1381. testl %eax,%eax
  1382. jne retint_kernel
  1383. LOCKDEP_SYS_EXIT_IRQ
  1384. movl TI_flags(%rcx),%edx
  1385. movl $_TIF_WORK_MASK,%edi
  1386. andl %edi,%edx
  1387. jnz retint_careful
  1388. jmp retint_swapgs
  1389. CFI_ENDPROC
  1390. END(error_exit)
  1391. /* runs on exception stack */
  1392. ENTRY(nmi)
  1393. INTR_FRAME
  1394. PARAVIRT_ADJUST_EXCEPTION_FRAME
  1395. pushq_cfi $-1
  1396. subq $15*8, %rsp
  1397. CFI_ADJUST_CFA_OFFSET 15*8
  1398. call save_paranoid
  1399. DEFAULT_FRAME 0
  1400. /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
  1401. movq %rsp,%rdi
  1402. movq $-1,%rsi
  1403. call do_nmi
  1404. #ifdef CONFIG_TRACE_IRQFLAGS
  1405. /* paranoidexit; without TRACE_IRQS_OFF */
  1406. /* ebx: no swapgs flag */
  1407. DISABLE_INTERRUPTS(CLBR_NONE)
  1408. testl %ebx,%ebx /* swapgs needed? */
  1409. jnz nmi_restore
  1410. testl $3,CS(%rsp)
  1411. jnz nmi_userspace
  1412. nmi_swapgs:
  1413. SWAPGS_UNSAFE_STACK
  1414. nmi_restore:
  1415. RESTORE_ALL 8
  1416. jmp irq_return
  1417. nmi_userspace:
  1418. GET_THREAD_INFO(%rcx)
  1419. movl TI_flags(%rcx),%ebx
  1420. andl $_TIF_WORK_MASK,%ebx
  1421. jz nmi_swapgs
  1422. movq %rsp,%rdi /* &pt_regs */
  1423. call sync_regs
  1424. movq %rax,%rsp /* switch stack for scheduling */
  1425. testl $_TIF_NEED_RESCHED,%ebx
  1426. jnz nmi_schedule
  1427. movl %ebx,%edx /* arg3: thread flags */
  1428. ENABLE_INTERRUPTS(CLBR_NONE)
  1429. xorl %esi,%esi /* arg2: oldset */
  1430. movq %rsp,%rdi /* arg1: &pt_regs */
  1431. call do_notify_resume
  1432. DISABLE_INTERRUPTS(CLBR_NONE)
  1433. jmp nmi_userspace
  1434. nmi_schedule:
  1435. ENABLE_INTERRUPTS(CLBR_ANY)
  1436. call schedule
  1437. DISABLE_INTERRUPTS(CLBR_ANY)
  1438. jmp nmi_userspace
  1439. CFI_ENDPROC
  1440. #else
  1441. jmp paranoid_exit
  1442. CFI_ENDPROC
  1443. #endif
  1444. END(nmi)
  1445. ENTRY(ignore_sysret)
  1446. CFI_STARTPROC
  1447. mov $-ENOSYS,%eax
  1448. sysret
  1449. CFI_ENDPROC
  1450. END(ignore_sysret)
  1451. /*
  1452. * End of kprobes section
  1453. */
  1454. .popsection