entry.S 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. .code64
  52. #ifndef CONFIG_PREEMPT
  53. #define retint_kernel retint_restore_args
  54. #endif
  55. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  56. #ifdef CONFIG_TRACE_IRQFLAGS
  57. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  58. jnc 1f
  59. TRACE_IRQS_ON
  60. 1:
  61. #endif
  62. .endm
  63. /*
  64. * C code is not supposed to know about undefined top of stack. Every time
  65. * a C function with an pt_regs argument is called from the SYSCALL based
  66. * fast path FIXUP_TOP_OF_STACK is needed.
  67. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  68. * manipulation.
  69. */
  70. /* %rsp:at FRAMEEND */
  71. .macro FIXUP_TOP_OF_STACK tmp
  72. movq %gs:pda_oldrsp,\tmp
  73. movq \tmp,RSP(%rsp)
  74. movq $__USER_DS,SS(%rsp)
  75. movq $__USER_CS,CS(%rsp)
  76. movq $-1,RCX(%rsp)
  77. movq R11(%rsp),\tmp /* get eflags */
  78. movq \tmp,EFLAGS(%rsp)
  79. .endm
  80. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  81. movq RSP-\offset(%rsp),\tmp
  82. movq \tmp,%gs:pda_oldrsp
  83. movq EFLAGS-\offset(%rsp),\tmp
  84. movq \tmp,R11-\offset(%rsp)
  85. .endm
  86. .macro FAKE_STACK_FRAME child_rip
  87. /* push in order ss, rsp, eflags, cs, rip */
  88. xorl %eax, %eax
  89. pushq %rax /* ss */
  90. CFI_ADJUST_CFA_OFFSET 8
  91. /*CFI_REL_OFFSET ss,0*/
  92. pushq %rax /* rsp */
  93. CFI_ADJUST_CFA_OFFSET 8
  94. CFI_REL_OFFSET rsp,0
  95. pushq $(1<<9) /* eflags - interrupts on */
  96. CFI_ADJUST_CFA_OFFSET 8
  97. /*CFI_REL_OFFSET rflags,0*/
  98. pushq $__KERNEL_CS /* cs */
  99. CFI_ADJUST_CFA_OFFSET 8
  100. /*CFI_REL_OFFSET cs,0*/
  101. pushq \child_rip /* rip */
  102. CFI_ADJUST_CFA_OFFSET 8
  103. CFI_REL_OFFSET rip,0
  104. pushq %rax /* orig rax */
  105. CFI_ADJUST_CFA_OFFSET 8
  106. .endm
  107. .macro UNFAKE_STACK_FRAME
  108. addq $8*6, %rsp
  109. CFI_ADJUST_CFA_OFFSET -(6*8)
  110. .endm
  111. .macro CFI_DEFAULT_STACK start=1
  112. .if \start
  113. CFI_STARTPROC simple
  114. CFI_SIGNAL_FRAME
  115. CFI_DEF_CFA rsp,SS+8
  116. .else
  117. CFI_DEF_CFA_OFFSET SS+8
  118. .endif
  119. CFI_REL_OFFSET r15,R15
  120. CFI_REL_OFFSET r14,R14
  121. CFI_REL_OFFSET r13,R13
  122. CFI_REL_OFFSET r12,R12
  123. CFI_REL_OFFSET rbp,RBP
  124. CFI_REL_OFFSET rbx,RBX
  125. CFI_REL_OFFSET r11,R11
  126. CFI_REL_OFFSET r10,R10
  127. CFI_REL_OFFSET r9,R9
  128. CFI_REL_OFFSET r8,R8
  129. CFI_REL_OFFSET rax,RAX
  130. CFI_REL_OFFSET rcx,RCX
  131. CFI_REL_OFFSET rdx,RDX
  132. CFI_REL_OFFSET rsi,RSI
  133. CFI_REL_OFFSET rdi,RDI
  134. CFI_REL_OFFSET rip,RIP
  135. /*CFI_REL_OFFSET cs,CS*/
  136. /*CFI_REL_OFFSET rflags,EFLAGS*/
  137. CFI_REL_OFFSET rsp,RSP
  138. /*CFI_REL_OFFSET ss,SS*/
  139. .endm
  140. /*
  141. * A newly forked process directly context switches into this.
  142. */
  143. /* rdi: prev */
  144. ENTRY(ret_from_fork)
  145. CFI_DEFAULT_STACK
  146. call schedule_tail
  147. GET_THREAD_INFO(%rcx)
  148. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
  149. jnz rff_trace
  150. rff_action:
  151. RESTORE_REST
  152. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  153. je int_ret_from_sys_call
  154. testl $_TIF_IA32,threadinfo_flags(%rcx)
  155. jnz int_ret_from_sys_call
  156. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  157. jmp ret_from_sys_call
  158. rff_trace:
  159. movq %rsp,%rdi
  160. call syscall_trace_leave
  161. GET_THREAD_INFO(%rcx)
  162. jmp rff_action
  163. CFI_ENDPROC
  164. END(ret_from_fork)
  165. /*
  166. * System call entry. Upto 6 arguments in registers are supported.
  167. *
  168. * SYSCALL does not save anything on the stack and does not change the
  169. * stack pointer.
  170. */
  171. /*
  172. * Register setup:
  173. * rax system call number
  174. * rdi arg0
  175. * rcx return address for syscall/sysret, C arg3
  176. * rsi arg1
  177. * rdx arg2
  178. * r10 arg3 (--> moved to rcx for C)
  179. * r8 arg4
  180. * r9 arg5
  181. * r11 eflags for syscall/sysret, temporary for C
  182. * r12-r15,rbp,rbx saved by C code, not touched.
  183. *
  184. * Interrupts are off on entry.
  185. * Only called from user space.
  186. *
  187. * XXX if we had a free scratch register we could save the RSP into the stack frame
  188. * and report it properly in ps. Unfortunately we haven't.
  189. *
  190. * When user can change the frames always force IRET. That is because
  191. * it deals with uncanonical addresses better. SYSRET has trouble
  192. * with them due to bugs in both AMD and Intel CPUs.
  193. */
  194. ENTRY(system_call)
  195. CFI_STARTPROC simple
  196. CFI_SIGNAL_FRAME
  197. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  198. CFI_REGISTER rip,rcx
  199. /*CFI_REGISTER rflags,r11*/
  200. swapgs
  201. movq %rsp,%gs:pda_oldrsp
  202. movq %gs:pda_kernelstack,%rsp
  203. /*
  204. * No need to follow this irqs off/on section - it's straight
  205. * and short:
  206. */
  207. sti
  208. SAVE_ARGS 8,1
  209. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  210. movq %rcx,RIP-ARGOFFSET(%rsp)
  211. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  212. GET_THREAD_INFO(%rcx)
  213. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
  214. CFI_REMEMBER_STATE
  215. jnz tracesys
  216. cmpq $__NR_syscall_max,%rax
  217. ja badsys
  218. movq %r10,%rcx
  219. call *sys_call_table(,%rax,8) # XXX: rip relative
  220. movq %rax,RAX-ARGOFFSET(%rsp)
  221. /*
  222. * Syscall return path ending with SYSRET (fast path)
  223. * Has incomplete stack frame and undefined top of stack.
  224. */
  225. .globl ret_from_sys_call
  226. ret_from_sys_call:
  227. movl $_TIF_ALLWORK_MASK,%edi
  228. /* edi: flagmask */
  229. sysret_check:
  230. GET_THREAD_INFO(%rcx)
  231. cli
  232. TRACE_IRQS_OFF
  233. movl threadinfo_flags(%rcx),%edx
  234. andl %edi,%edx
  235. CFI_REMEMBER_STATE
  236. jnz sysret_careful
  237. /*
  238. * sysretq will re-enable interrupts:
  239. */
  240. TRACE_IRQS_ON
  241. movq RIP-ARGOFFSET(%rsp),%rcx
  242. CFI_REGISTER rip,rcx
  243. RESTORE_ARGS 0,-ARG_SKIP,1
  244. /*CFI_REGISTER rflags,r11*/
  245. movq %gs:pda_oldrsp,%rsp
  246. swapgs
  247. sysretq
  248. /* Handle reschedules */
  249. /* edx: work, edi: workmask */
  250. sysret_careful:
  251. CFI_RESTORE_STATE
  252. bt $TIF_NEED_RESCHED,%edx
  253. jnc sysret_signal
  254. TRACE_IRQS_ON
  255. sti
  256. pushq %rdi
  257. CFI_ADJUST_CFA_OFFSET 8
  258. call schedule
  259. popq %rdi
  260. CFI_ADJUST_CFA_OFFSET -8
  261. jmp sysret_check
  262. /* Handle a signal */
  263. sysret_signal:
  264. TRACE_IRQS_ON
  265. sti
  266. testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
  267. jz 1f
  268. /* Really a signal */
  269. /* edx: work flags (arg3) */
  270. leaq do_notify_resume(%rip),%rax
  271. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  272. xorl %esi,%esi # oldset -> arg2
  273. call ptregscall_common
  274. 1: movl $_TIF_NEED_RESCHED,%edi
  275. /* Use IRET because user could have changed frame. This
  276. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  277. cli
  278. TRACE_IRQS_OFF
  279. jmp int_with_check
  280. badsys:
  281. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  282. jmp ret_from_sys_call
  283. /* Do syscall tracing */
  284. tracesys:
  285. CFI_RESTORE_STATE
  286. SAVE_REST
  287. movq $-ENOSYS,RAX(%rsp)
  288. FIXUP_TOP_OF_STACK %rdi
  289. movq %rsp,%rdi
  290. call syscall_trace_enter
  291. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  292. RESTORE_REST
  293. cmpq $__NR_syscall_max,%rax
  294. ja 1f
  295. movq %r10,%rcx /* fixup for C */
  296. call *sys_call_table(,%rax,8)
  297. 1: movq %rax,RAX-ARGOFFSET(%rsp)
  298. /* Use IRET because user could have changed frame */
  299. jmp int_ret_from_sys_call
  300. CFI_ENDPROC
  301. END(system_call)
  302. /*
  303. * Syscall return path ending with IRET.
  304. * Has correct top of stack, but partial stack frame.
  305. */
  306. ENTRY(int_ret_from_sys_call)
  307. CFI_STARTPROC simple
  308. CFI_SIGNAL_FRAME
  309. CFI_DEF_CFA rsp,SS+8-ARGOFFSET
  310. /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
  311. CFI_REL_OFFSET rsp,RSP-ARGOFFSET
  312. /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
  313. /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
  314. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  315. CFI_REL_OFFSET rdx,RDX-ARGOFFSET
  316. CFI_REL_OFFSET rcx,RCX-ARGOFFSET
  317. CFI_REL_OFFSET rax,RAX-ARGOFFSET
  318. CFI_REL_OFFSET rdi,RDI-ARGOFFSET
  319. CFI_REL_OFFSET rsi,RSI-ARGOFFSET
  320. CFI_REL_OFFSET r8,R8-ARGOFFSET
  321. CFI_REL_OFFSET r9,R9-ARGOFFSET
  322. CFI_REL_OFFSET r10,R10-ARGOFFSET
  323. CFI_REL_OFFSET r11,R11-ARGOFFSET
  324. cli
  325. TRACE_IRQS_OFF
  326. testl $3,CS-ARGOFFSET(%rsp)
  327. je retint_restore_args
  328. movl $_TIF_ALLWORK_MASK,%edi
  329. /* edi: mask to check */
  330. int_with_check:
  331. GET_THREAD_INFO(%rcx)
  332. movl threadinfo_flags(%rcx),%edx
  333. andl %edi,%edx
  334. jnz int_careful
  335. andl $~TS_COMPAT,threadinfo_status(%rcx)
  336. jmp retint_swapgs
  337. /* Either reschedule or signal or syscall exit tracking needed. */
  338. /* First do a reschedule test. */
  339. /* edx: work, edi: workmask */
  340. int_careful:
  341. bt $TIF_NEED_RESCHED,%edx
  342. jnc int_very_careful
  343. TRACE_IRQS_ON
  344. sti
  345. pushq %rdi
  346. CFI_ADJUST_CFA_OFFSET 8
  347. call schedule
  348. popq %rdi
  349. CFI_ADJUST_CFA_OFFSET -8
  350. cli
  351. TRACE_IRQS_OFF
  352. jmp int_with_check
  353. /* handle signals and tracing -- both require a full stack frame */
  354. int_very_careful:
  355. TRACE_IRQS_ON
  356. sti
  357. SAVE_REST
  358. /* Check for syscall exit trace */
  359. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  360. jz int_signal
  361. pushq %rdi
  362. CFI_ADJUST_CFA_OFFSET 8
  363. leaq 8(%rsp),%rdi # &ptregs -> arg1
  364. call syscall_trace_leave
  365. popq %rdi
  366. CFI_ADJUST_CFA_OFFSET -8
  367. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  368. cli
  369. TRACE_IRQS_OFF
  370. jmp int_restore_rest
  371. int_signal:
  372. testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
  373. jz 1f
  374. movq %rsp,%rdi # &ptregs -> arg1
  375. xorl %esi,%esi # oldset -> arg2
  376. call do_notify_resume
  377. 1: movl $_TIF_NEED_RESCHED,%edi
  378. int_restore_rest:
  379. RESTORE_REST
  380. cli
  381. TRACE_IRQS_OFF
  382. jmp int_with_check
  383. CFI_ENDPROC
  384. END(int_ret_from_sys_call)
  385. /*
  386. * Certain special system calls that need to save a complete full stack frame.
  387. */
  388. .macro PTREGSCALL label,func,arg
  389. .globl \label
  390. \label:
  391. leaq \func(%rip),%rax
  392. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  393. jmp ptregscall_common
  394. END(\label)
  395. .endm
  396. CFI_STARTPROC
  397. PTREGSCALL stub_clone, sys_clone, %r8
  398. PTREGSCALL stub_fork, sys_fork, %rdi
  399. PTREGSCALL stub_vfork, sys_vfork, %rdi
  400. PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
  401. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  402. PTREGSCALL stub_iopl, sys_iopl, %rsi
  403. ENTRY(ptregscall_common)
  404. popq %r11
  405. CFI_ADJUST_CFA_OFFSET -8
  406. CFI_REGISTER rip, r11
  407. SAVE_REST
  408. movq %r11, %r15
  409. CFI_REGISTER rip, r15
  410. FIXUP_TOP_OF_STACK %r11
  411. call *%rax
  412. RESTORE_TOP_OF_STACK %r11
  413. movq %r15, %r11
  414. CFI_REGISTER rip, r11
  415. RESTORE_REST
  416. pushq %r11
  417. CFI_ADJUST_CFA_OFFSET 8
  418. CFI_REL_OFFSET rip, 0
  419. ret
  420. CFI_ENDPROC
  421. END(ptregscall_common)
  422. ENTRY(stub_execve)
  423. CFI_STARTPROC
  424. popq %r11
  425. CFI_ADJUST_CFA_OFFSET -8
  426. CFI_REGISTER rip, r11
  427. SAVE_REST
  428. FIXUP_TOP_OF_STACK %r11
  429. call sys_execve
  430. RESTORE_TOP_OF_STACK %r11
  431. movq %rax,RAX(%rsp)
  432. RESTORE_REST
  433. jmp int_ret_from_sys_call
  434. CFI_ENDPROC
  435. END(stub_execve)
  436. /*
  437. * sigreturn is special because it needs to restore all registers on return.
  438. * This cannot be done with SYSRET, so use the IRET return path instead.
  439. */
  440. ENTRY(stub_rt_sigreturn)
  441. CFI_STARTPROC
  442. addq $8, %rsp
  443. CFI_ADJUST_CFA_OFFSET -8
  444. SAVE_REST
  445. movq %rsp,%rdi
  446. FIXUP_TOP_OF_STACK %r11
  447. call sys_rt_sigreturn
  448. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  449. RESTORE_REST
  450. jmp int_ret_from_sys_call
  451. CFI_ENDPROC
  452. END(stub_rt_sigreturn)
  453. /*
  454. * initial frame state for interrupts and exceptions
  455. */
  456. .macro _frame ref
  457. CFI_STARTPROC simple
  458. CFI_SIGNAL_FRAME
  459. CFI_DEF_CFA rsp,SS+8-\ref
  460. /*CFI_REL_OFFSET ss,SS-\ref*/
  461. CFI_REL_OFFSET rsp,RSP-\ref
  462. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  463. /*CFI_REL_OFFSET cs,CS-\ref*/
  464. CFI_REL_OFFSET rip,RIP-\ref
  465. .endm
  466. /* initial frame state for interrupts (and exceptions without error code) */
  467. #define INTR_FRAME _frame RIP
  468. /* initial frame state for exceptions with error code (and interrupts with
  469. vector already pushed) */
  470. #define XCPT_FRAME _frame ORIG_RAX
  471. /*
  472. * Interrupt entry/exit.
  473. *
  474. * Interrupt entry points save only callee clobbered registers in fast path.
  475. *
  476. * Entry runs with interrupts off.
  477. */
  478. /* 0(%rsp): interrupt number */
  479. .macro interrupt func
  480. cld
  481. SAVE_ARGS
  482. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  483. pushq %rbp
  484. CFI_ADJUST_CFA_OFFSET 8
  485. CFI_REL_OFFSET rbp, 0
  486. movq %rsp,%rbp
  487. CFI_DEF_CFA_REGISTER rbp
  488. testl $3,CS(%rdi)
  489. je 1f
  490. swapgs
  491. /* irqcount is used to check if a CPU is already on an interrupt
  492. stack or not. While this is essentially redundant with preempt_count
  493. it is a little cheaper to use a separate counter in the PDA
  494. (short of moving irq_enter into assembly, which would be too
  495. much work) */
  496. 1: incl %gs:pda_irqcount
  497. cmoveq %gs:pda_irqstackptr,%rsp
  498. push %rbp # backlink for old unwinder
  499. /*
  500. * We entered an interrupt context - irqs are off:
  501. */
  502. TRACE_IRQS_OFF
  503. call \func
  504. .endm
  505. ENTRY(common_interrupt)
  506. XCPT_FRAME
  507. interrupt do_IRQ
  508. /* 0(%rsp): oldrsp-ARGOFFSET */
  509. ret_from_intr:
  510. cli
  511. TRACE_IRQS_OFF
  512. decl %gs:pda_irqcount
  513. leaveq
  514. CFI_DEF_CFA_REGISTER rsp
  515. CFI_ADJUST_CFA_OFFSET -8
  516. exit_intr:
  517. GET_THREAD_INFO(%rcx)
  518. testl $3,CS-ARGOFFSET(%rsp)
  519. je retint_kernel
  520. /* Interrupt came from user space */
  521. /*
  522. * Has a correct top of stack, but a partial stack frame
  523. * %rcx: thread info. Interrupts off.
  524. */
  525. retint_with_reschedule:
  526. movl $_TIF_WORK_MASK,%edi
  527. retint_check:
  528. movl threadinfo_flags(%rcx),%edx
  529. andl %edi,%edx
  530. CFI_REMEMBER_STATE
  531. jnz retint_careful
  532. retint_swapgs:
  533. /*
  534. * The iretq could re-enable interrupts:
  535. */
  536. cli
  537. TRACE_IRQS_IRETQ
  538. swapgs
  539. jmp restore_args
  540. retint_restore_args:
  541. cli
  542. /*
  543. * The iretq could re-enable interrupts:
  544. */
  545. TRACE_IRQS_IRETQ
  546. restore_args:
  547. RESTORE_ARGS 0,8,0
  548. iret_label:
  549. iretq
  550. .section __ex_table,"a"
  551. .quad iret_label,bad_iret
  552. .previous
  553. .section .fixup,"ax"
  554. /* force a signal here? this matches i386 behaviour */
  555. /* running with kernel gs */
  556. bad_iret:
  557. movq $11,%rdi /* SIGSEGV */
  558. TRACE_IRQS_ON
  559. sti
  560. jmp do_exit
  561. .previous
  562. /* edi: workmask, edx: work */
  563. retint_careful:
  564. CFI_RESTORE_STATE
  565. bt $TIF_NEED_RESCHED,%edx
  566. jnc retint_signal
  567. TRACE_IRQS_ON
  568. sti
  569. pushq %rdi
  570. CFI_ADJUST_CFA_OFFSET 8
  571. call schedule
  572. popq %rdi
  573. CFI_ADJUST_CFA_OFFSET -8
  574. GET_THREAD_INFO(%rcx)
  575. cli
  576. TRACE_IRQS_OFF
  577. jmp retint_check
  578. retint_signal:
  579. testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
  580. jz retint_swapgs
  581. TRACE_IRQS_ON
  582. sti
  583. SAVE_REST
  584. movq $-1,ORIG_RAX(%rsp)
  585. xorl %esi,%esi # oldset
  586. movq %rsp,%rdi # &pt_regs
  587. call do_notify_resume
  588. RESTORE_REST
  589. cli
  590. TRACE_IRQS_OFF
  591. movl $_TIF_NEED_RESCHED,%edi
  592. GET_THREAD_INFO(%rcx)
  593. jmp retint_check
  594. #ifdef CONFIG_PREEMPT
  595. /* Returning to kernel space. Check if we need preemption */
  596. /* rcx: threadinfo. interrupts off. */
  597. ENTRY(retint_kernel)
  598. cmpl $0,threadinfo_preempt_count(%rcx)
  599. jnz retint_restore_args
  600. bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
  601. jnc retint_restore_args
  602. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  603. jnc retint_restore_args
  604. call preempt_schedule_irq
  605. jmp exit_intr
  606. #endif
  607. CFI_ENDPROC
  608. END(common_interrupt)
  609. /*
  610. * APIC interrupts.
  611. */
  612. .macro apicinterrupt num,func
  613. INTR_FRAME
  614. pushq $~(\num)
  615. CFI_ADJUST_CFA_OFFSET 8
  616. interrupt \func
  617. jmp ret_from_intr
  618. CFI_ENDPROC
  619. .endm
  620. ENTRY(thermal_interrupt)
  621. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  622. END(thermal_interrupt)
  623. ENTRY(threshold_interrupt)
  624. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  625. END(threshold_interrupt)
  626. #ifdef CONFIG_SMP
  627. ENTRY(reschedule_interrupt)
  628. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  629. END(reschedule_interrupt)
  630. .macro INVALIDATE_ENTRY num
  631. ENTRY(invalidate_interrupt\num)
  632. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  633. END(invalidate_interrupt\num)
  634. .endm
  635. INVALIDATE_ENTRY 0
  636. INVALIDATE_ENTRY 1
  637. INVALIDATE_ENTRY 2
  638. INVALIDATE_ENTRY 3
  639. INVALIDATE_ENTRY 4
  640. INVALIDATE_ENTRY 5
  641. INVALIDATE_ENTRY 6
  642. INVALIDATE_ENTRY 7
  643. ENTRY(call_function_interrupt)
  644. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  645. END(call_function_interrupt)
  646. #endif
  647. ENTRY(apic_timer_interrupt)
  648. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  649. END(apic_timer_interrupt)
  650. ENTRY(error_interrupt)
  651. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  652. END(error_interrupt)
  653. ENTRY(spurious_interrupt)
  654. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  655. END(spurious_interrupt)
  656. /*
  657. * Exception entry points.
  658. */
  659. .macro zeroentry sym
  660. INTR_FRAME
  661. pushq $0 /* push error code/oldrax */
  662. CFI_ADJUST_CFA_OFFSET 8
  663. pushq %rax /* push real oldrax to the rdi slot */
  664. CFI_ADJUST_CFA_OFFSET 8
  665. leaq \sym(%rip),%rax
  666. jmp error_entry
  667. CFI_ENDPROC
  668. .endm
  669. .macro errorentry sym
  670. XCPT_FRAME
  671. pushq %rax
  672. CFI_ADJUST_CFA_OFFSET 8
  673. leaq \sym(%rip),%rax
  674. jmp error_entry
  675. CFI_ENDPROC
  676. .endm
  677. /* error code is on the stack already */
  678. /* handle NMI like exceptions that can happen everywhere */
  679. .macro paranoidentry sym, ist=0, irqtrace=1
  680. SAVE_ALL
  681. cld
  682. movl $1,%ebx
  683. movl $MSR_GS_BASE,%ecx
  684. rdmsr
  685. testl %edx,%edx
  686. js 1f
  687. swapgs
  688. xorl %ebx,%ebx
  689. 1:
  690. .if \ist
  691. movq %gs:pda_data_offset, %rbp
  692. .endif
  693. movq %rsp,%rdi
  694. movq ORIG_RAX(%rsp),%rsi
  695. movq $-1,ORIG_RAX(%rsp)
  696. .if \ist
  697. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  698. .endif
  699. call \sym
  700. .if \ist
  701. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  702. .endif
  703. cli
  704. .if \irqtrace
  705. TRACE_IRQS_OFF
  706. .endif
  707. .endm
  708. /*
  709. * "Paranoid" exit path from exception stack.
  710. * Paranoid because this is used by NMIs and cannot take
  711. * any kernel state for granted.
  712. * We don't do kernel preemption checks here, because only
  713. * NMI should be common and it does not enable IRQs and
  714. * cannot get reschedule ticks.
  715. *
  716. * "trace" is 0 for the NMI handler only, because irq-tracing
  717. * is fundamentally NMI-unsafe. (we cannot change the soft and
  718. * hard flags at once, atomically)
  719. */
  720. .macro paranoidexit trace=1
  721. /* ebx: no swapgs flag */
  722. paranoid_exit\trace:
  723. testl %ebx,%ebx /* swapgs needed? */
  724. jnz paranoid_restore\trace
  725. testl $3,CS(%rsp)
  726. jnz paranoid_userspace\trace
  727. paranoid_swapgs\trace:
  728. .if \trace
  729. TRACE_IRQS_IRETQ 0
  730. .endif
  731. swapgs
  732. paranoid_restore\trace:
  733. RESTORE_ALL 8
  734. iretq
  735. paranoid_userspace\trace:
  736. GET_THREAD_INFO(%rcx)
  737. movl threadinfo_flags(%rcx),%ebx
  738. andl $_TIF_WORK_MASK,%ebx
  739. jz paranoid_swapgs\trace
  740. movq %rsp,%rdi /* &pt_regs */
  741. call sync_regs
  742. movq %rax,%rsp /* switch stack for scheduling */
  743. testl $_TIF_NEED_RESCHED,%ebx
  744. jnz paranoid_schedule\trace
  745. movl %ebx,%edx /* arg3: thread flags */
  746. .if \trace
  747. TRACE_IRQS_ON
  748. .endif
  749. sti
  750. xorl %esi,%esi /* arg2: oldset */
  751. movq %rsp,%rdi /* arg1: &pt_regs */
  752. call do_notify_resume
  753. cli
  754. .if \trace
  755. TRACE_IRQS_OFF
  756. .endif
  757. jmp paranoid_userspace\trace
  758. paranoid_schedule\trace:
  759. .if \trace
  760. TRACE_IRQS_ON
  761. .endif
  762. sti
  763. call schedule
  764. cli
  765. .if \trace
  766. TRACE_IRQS_OFF
  767. .endif
  768. jmp paranoid_userspace\trace
  769. CFI_ENDPROC
  770. .endm
  771. /*
  772. * Exception entry point. This expects an error code/orig_rax on the stack
  773. * and the exception handler in %rax.
  774. */
  775. KPROBE_ENTRY(error_entry)
  776. _frame RDI
  777. /* rdi slot contains rax, oldrax contains error code */
  778. cld
  779. subq $14*8,%rsp
  780. CFI_ADJUST_CFA_OFFSET (14*8)
  781. movq %rsi,13*8(%rsp)
  782. CFI_REL_OFFSET rsi,RSI
  783. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  784. movq %rdx,12*8(%rsp)
  785. CFI_REL_OFFSET rdx,RDX
  786. movq %rcx,11*8(%rsp)
  787. CFI_REL_OFFSET rcx,RCX
  788. movq %rsi,10*8(%rsp) /* store rax */
  789. CFI_REL_OFFSET rax,RAX
  790. movq %r8, 9*8(%rsp)
  791. CFI_REL_OFFSET r8,R8
  792. movq %r9, 8*8(%rsp)
  793. CFI_REL_OFFSET r9,R9
  794. movq %r10,7*8(%rsp)
  795. CFI_REL_OFFSET r10,R10
  796. movq %r11,6*8(%rsp)
  797. CFI_REL_OFFSET r11,R11
  798. movq %rbx,5*8(%rsp)
  799. CFI_REL_OFFSET rbx,RBX
  800. movq %rbp,4*8(%rsp)
  801. CFI_REL_OFFSET rbp,RBP
  802. movq %r12,3*8(%rsp)
  803. CFI_REL_OFFSET r12,R12
  804. movq %r13,2*8(%rsp)
  805. CFI_REL_OFFSET r13,R13
  806. movq %r14,1*8(%rsp)
  807. CFI_REL_OFFSET r14,R14
  808. movq %r15,(%rsp)
  809. CFI_REL_OFFSET r15,R15
  810. xorl %ebx,%ebx
  811. testl $3,CS(%rsp)
  812. je error_kernelspace
  813. error_swapgs:
  814. swapgs
  815. error_sti:
  816. movq %rdi,RDI(%rsp)
  817. movq %rsp,%rdi
  818. movq ORIG_RAX(%rsp),%rsi /* get error code */
  819. movq $-1,ORIG_RAX(%rsp)
  820. call *%rax
  821. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  822. error_exit:
  823. movl %ebx,%eax
  824. RESTORE_REST
  825. cli
  826. TRACE_IRQS_OFF
  827. GET_THREAD_INFO(%rcx)
  828. testl %eax,%eax
  829. jne retint_kernel
  830. movl threadinfo_flags(%rcx),%edx
  831. movl $_TIF_WORK_MASK,%edi
  832. andl %edi,%edx
  833. jnz retint_careful
  834. /*
  835. * The iret might restore flags:
  836. */
  837. TRACE_IRQS_IRETQ
  838. swapgs
  839. RESTORE_ARGS 0,8,0
  840. jmp iret_label
  841. CFI_ENDPROC
  842. error_kernelspace:
  843. incl %ebx
  844. /* There are two places in the kernel that can potentially fault with
  845. usergs. Handle them here. The exception handlers after
  846. iret run with kernel gs again, so don't set the user space flag.
  847. B stepping K8s sometimes report an truncated RIP for IRET
  848. exceptions returning to compat mode. Check for these here too. */
  849. leaq iret_label(%rip),%rbp
  850. cmpq %rbp,RIP(%rsp)
  851. je error_swapgs
  852. movl %ebp,%ebp /* zero extend */
  853. cmpq %rbp,RIP(%rsp)
  854. je error_swapgs
  855. cmpq $gs_change,RIP(%rsp)
  856. je error_swapgs
  857. jmp error_sti
  858. KPROBE_END(error_entry)
  859. /* Reload gs selector with exception handling */
  860. /* edi: new selector */
  861. ENTRY(load_gs_index)
  862. CFI_STARTPROC
  863. pushf
  864. CFI_ADJUST_CFA_OFFSET 8
  865. cli
  866. swapgs
  867. gs_change:
  868. movl %edi,%gs
  869. 2: mfence /* workaround */
  870. swapgs
  871. popf
  872. CFI_ADJUST_CFA_OFFSET -8
  873. ret
  874. CFI_ENDPROC
  875. ENDPROC(load_gs_index)
  876. .section __ex_table,"a"
  877. .align 8
  878. .quad gs_change,bad_gs
  879. .previous
  880. .section .fixup,"ax"
  881. /* running with kernelgs */
  882. bad_gs:
  883. swapgs /* switch back to user gs */
  884. xorl %eax,%eax
  885. movl %eax,%gs
  886. jmp 2b
  887. .previous
  888. /*
  889. * Create a kernel thread.
  890. *
  891. * C extern interface:
  892. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  893. *
  894. * asm input arguments:
  895. * rdi: fn, rsi: arg, rdx: flags
  896. */
  897. ENTRY(kernel_thread)
  898. CFI_STARTPROC
  899. FAKE_STACK_FRAME $child_rip
  900. SAVE_ALL
  901. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  902. movq %rdx,%rdi
  903. orq kernel_thread_flags(%rip),%rdi
  904. movq $-1, %rsi
  905. movq %rsp, %rdx
  906. xorl %r8d,%r8d
  907. xorl %r9d,%r9d
  908. # clone now
  909. call do_fork
  910. movq %rax,RAX(%rsp)
  911. xorl %edi,%edi
  912. /*
  913. * It isn't worth to check for reschedule here,
  914. * so internally to the x86_64 port you can rely on kernel_thread()
  915. * not to reschedule the child before returning, this avoids the need
  916. * of hacks for example to fork off the per-CPU idle tasks.
  917. * [Hopefully no generic code relies on the reschedule -AK]
  918. */
  919. RESTORE_ALL
  920. UNFAKE_STACK_FRAME
  921. ret
  922. CFI_ENDPROC
  923. ENDPROC(kernel_thread)
  924. child_rip:
  925. pushq $0 # fake return address
  926. CFI_STARTPROC
  927. /*
  928. * Here we are in the child and the registers are set as they were
  929. * at kernel_thread() invocation in the parent.
  930. */
  931. movq %rdi, %rax
  932. movq %rsi, %rdi
  933. call *%rax
  934. # exit
  935. xorl %edi, %edi
  936. call do_exit
  937. CFI_ENDPROC
  938. ENDPROC(child_rip)
  939. /*
  940. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  941. *
  942. * C extern interface:
  943. * extern long execve(char *name, char **argv, char **envp)
  944. *
  945. * asm input arguments:
  946. * rdi: name, rsi: argv, rdx: envp
  947. *
  948. * We want to fallback into:
  949. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
  950. *
  951. * do_sys_execve asm fallback arguments:
  952. * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
  953. */
  954. ENTRY(execve)
  955. CFI_STARTPROC
  956. FAKE_STACK_FRAME $0
  957. SAVE_ALL
  958. call sys_execve
  959. movq %rax, RAX(%rsp)
  960. RESTORE_REST
  961. testq %rax,%rax
  962. je int_ret_from_sys_call
  963. RESTORE_ARGS
  964. UNFAKE_STACK_FRAME
  965. ret
  966. CFI_ENDPROC
  967. ENDPROC(execve)
  968. KPROBE_ENTRY(page_fault)
  969. errorentry do_page_fault
  970. KPROBE_END(page_fault)
  971. ENTRY(coprocessor_error)
  972. zeroentry do_coprocessor_error
  973. END(coprocessor_error)
  974. ENTRY(simd_coprocessor_error)
  975. zeroentry do_simd_coprocessor_error
  976. END(simd_coprocessor_error)
  977. ENTRY(device_not_available)
  978. zeroentry math_state_restore
  979. END(device_not_available)
  980. /* runs on exception stack */
  981. KPROBE_ENTRY(debug)
  982. INTR_FRAME
  983. pushq $0
  984. CFI_ADJUST_CFA_OFFSET 8
  985. paranoidentry do_debug, DEBUG_STACK
  986. paranoidexit
  987. KPROBE_END(debug)
  988. /* runs on exception stack */
  989. KPROBE_ENTRY(nmi)
  990. INTR_FRAME
  991. pushq $-1
  992. CFI_ADJUST_CFA_OFFSET 8
  993. paranoidentry do_nmi, 0, 0
  994. #ifdef CONFIG_TRACE_IRQFLAGS
  995. paranoidexit 0
  996. #else
  997. jmp paranoid_exit1
  998. CFI_ENDPROC
  999. #endif
  1000. KPROBE_END(nmi)
  1001. KPROBE_ENTRY(int3)
  1002. INTR_FRAME
  1003. pushq $0
  1004. CFI_ADJUST_CFA_OFFSET 8
  1005. paranoidentry do_int3, DEBUG_STACK
  1006. jmp paranoid_exit1
  1007. CFI_ENDPROC
  1008. KPROBE_END(int3)
  1009. ENTRY(overflow)
  1010. zeroentry do_overflow
  1011. END(overflow)
  1012. ENTRY(bounds)
  1013. zeroentry do_bounds
  1014. END(bounds)
  1015. ENTRY(invalid_op)
  1016. zeroentry do_invalid_op
  1017. END(invalid_op)
  1018. ENTRY(coprocessor_segment_overrun)
  1019. zeroentry do_coprocessor_segment_overrun
  1020. END(coprocessor_segment_overrun)
  1021. ENTRY(reserved)
  1022. zeroentry do_reserved
  1023. END(reserved)
  1024. /* runs on exception stack */
  1025. ENTRY(double_fault)
  1026. XCPT_FRAME
  1027. paranoidentry do_double_fault
  1028. jmp paranoid_exit1
  1029. CFI_ENDPROC
  1030. END(double_fault)
  1031. ENTRY(invalid_TSS)
  1032. errorentry do_invalid_TSS
  1033. END(invalid_TSS)
  1034. ENTRY(segment_not_present)
  1035. errorentry do_segment_not_present
  1036. END(segment_not_present)
  1037. /* runs on exception stack */
  1038. ENTRY(stack_segment)
  1039. XCPT_FRAME
  1040. paranoidentry do_stack_segment
  1041. jmp paranoid_exit1
  1042. CFI_ENDPROC
  1043. END(stack_segment)
  1044. KPROBE_ENTRY(general_protection)
  1045. errorentry do_general_protection
  1046. KPROBE_END(general_protection)
  1047. ENTRY(alignment_check)
  1048. errorentry do_alignment_check
  1049. END(alignment_check)
  1050. ENTRY(divide_error)
  1051. zeroentry do_divide_error
  1052. END(divide_error)
  1053. ENTRY(spurious_interrupt_bug)
  1054. zeroentry do_spurious_interrupt_bug
  1055. END(spurious_interrupt_bug)
  1056. #ifdef CONFIG_X86_MCE
  1057. /* runs on exception stack */
  1058. ENTRY(machine_check)
  1059. INTR_FRAME
  1060. pushq $0
  1061. CFI_ADJUST_CFA_OFFSET 8
  1062. paranoidentry do_machine_check
  1063. jmp paranoid_exit1
  1064. CFI_ENDPROC
  1065. END(machine_check)
  1066. #endif
  1067. /* Call softirq on interrupt stack. Interrupts are off. */
  1068. ENTRY(call_softirq)
  1069. CFI_STARTPROC
  1070. push %rbp
  1071. CFI_ADJUST_CFA_OFFSET 8
  1072. CFI_REL_OFFSET rbp,0
  1073. mov %rsp,%rbp
  1074. CFI_DEF_CFA_REGISTER rbp
  1075. incl %gs:pda_irqcount
  1076. cmove %gs:pda_irqstackptr,%rsp
  1077. push %rbp # backlink for old unwinder
  1078. call __do_softirq
  1079. leaveq
  1080. CFI_DEF_CFA_REGISTER rsp
  1081. CFI_ADJUST_CFA_OFFSET -8
  1082. decl %gs:pda_irqcount
  1083. ret
  1084. CFI_ENDPROC
  1085. ENDPROC(call_softirq)
  1086. #ifdef CONFIG_STACK_UNWIND
  1087. ENTRY(arch_unwind_init_running)
  1088. CFI_STARTPROC
  1089. movq %r15, R15(%rdi)
  1090. movq %r14, R14(%rdi)
  1091. xchgq %rsi, %rdx
  1092. movq %r13, R13(%rdi)
  1093. movq %r12, R12(%rdi)
  1094. xorl %eax, %eax
  1095. movq %rbp, RBP(%rdi)
  1096. movq %rbx, RBX(%rdi)
  1097. movq (%rsp), %rcx
  1098. movq %rax, R11(%rdi)
  1099. movq %rax, R10(%rdi)
  1100. movq %rax, R9(%rdi)
  1101. movq %rax, R8(%rdi)
  1102. movq %rax, RAX(%rdi)
  1103. movq %rax, RCX(%rdi)
  1104. movq %rax, RDX(%rdi)
  1105. movq %rax, RSI(%rdi)
  1106. movq %rax, RDI(%rdi)
  1107. movq %rax, ORIG_RAX(%rdi)
  1108. movq %rcx, RIP(%rdi)
  1109. leaq 8(%rsp), %rcx
  1110. movq $__KERNEL_CS, CS(%rdi)
  1111. movq %rax, EFLAGS(%rdi)
  1112. movq %rcx, RSP(%rdi)
  1113. movq $__KERNEL_DS, SS(%rdi)
  1114. jmpq *%rdx
  1115. CFI_ENDPROC
  1116. ENDPROC(arch_unwind_init_running)
  1117. #endif