entry.S 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. .code64
  52. #ifndef CONFIG_PREEMPT
  53. #define retint_kernel retint_restore_args
  54. #endif
  55. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  56. #ifdef CONFIG_TRACE_IRQFLAGS
  57. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  58. jnc 1f
  59. TRACE_IRQS_ON
  60. 1:
  61. #endif
  62. .endm
  63. /*
  64. * C code is not supposed to know about undefined top of stack. Every time
  65. * a C function with an pt_regs argument is called from the SYSCALL based
  66. * fast path FIXUP_TOP_OF_STACK is needed.
  67. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  68. * manipulation.
  69. */
  70. /* %rsp:at FRAMEEND */
  71. .macro FIXUP_TOP_OF_STACK tmp
  72. movq %gs:pda_oldrsp,\tmp
  73. movq \tmp,RSP(%rsp)
  74. movq $__USER_DS,SS(%rsp)
  75. movq $__USER_CS,CS(%rsp)
  76. movq $-1,RCX(%rsp)
  77. movq R11(%rsp),\tmp /* get eflags */
  78. movq \tmp,EFLAGS(%rsp)
  79. .endm
  80. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  81. movq RSP-\offset(%rsp),\tmp
  82. movq \tmp,%gs:pda_oldrsp
  83. movq EFLAGS-\offset(%rsp),\tmp
  84. movq \tmp,R11-\offset(%rsp)
  85. .endm
  86. .macro FAKE_STACK_FRAME child_rip
  87. /* push in order ss, rsp, eflags, cs, rip */
  88. xorl %eax, %eax
  89. pushq %rax /* ss */
  90. CFI_ADJUST_CFA_OFFSET 8
  91. /*CFI_REL_OFFSET ss,0*/
  92. pushq %rax /* rsp */
  93. CFI_ADJUST_CFA_OFFSET 8
  94. CFI_REL_OFFSET rsp,0
  95. pushq $(1<<9) /* eflags - interrupts on */
  96. CFI_ADJUST_CFA_OFFSET 8
  97. /*CFI_REL_OFFSET rflags,0*/
  98. pushq $__KERNEL_CS /* cs */
  99. CFI_ADJUST_CFA_OFFSET 8
  100. /*CFI_REL_OFFSET cs,0*/
  101. pushq \child_rip /* rip */
  102. CFI_ADJUST_CFA_OFFSET 8
  103. CFI_REL_OFFSET rip,0
  104. pushq %rax /* orig rax */
  105. CFI_ADJUST_CFA_OFFSET 8
  106. .endm
  107. .macro UNFAKE_STACK_FRAME
  108. addq $8*6, %rsp
  109. CFI_ADJUST_CFA_OFFSET -(6*8)
  110. .endm
  111. .macro CFI_DEFAULT_STACK start=1
  112. .if \start
  113. CFI_STARTPROC simple
  114. CFI_SIGNAL_FRAME
  115. CFI_DEF_CFA rsp,SS+8
  116. .else
  117. CFI_DEF_CFA_OFFSET SS+8
  118. .endif
  119. CFI_REL_OFFSET r15,R15
  120. CFI_REL_OFFSET r14,R14
  121. CFI_REL_OFFSET r13,R13
  122. CFI_REL_OFFSET r12,R12
  123. CFI_REL_OFFSET rbp,RBP
  124. CFI_REL_OFFSET rbx,RBX
  125. CFI_REL_OFFSET r11,R11
  126. CFI_REL_OFFSET r10,R10
  127. CFI_REL_OFFSET r9,R9
  128. CFI_REL_OFFSET r8,R8
  129. CFI_REL_OFFSET rax,RAX
  130. CFI_REL_OFFSET rcx,RCX
  131. CFI_REL_OFFSET rdx,RDX
  132. CFI_REL_OFFSET rsi,RSI
  133. CFI_REL_OFFSET rdi,RDI
  134. CFI_REL_OFFSET rip,RIP
  135. /*CFI_REL_OFFSET cs,CS*/
  136. /*CFI_REL_OFFSET rflags,EFLAGS*/
  137. CFI_REL_OFFSET rsp,RSP
  138. /*CFI_REL_OFFSET ss,SS*/
  139. .endm
  140. /*
  141. * A newly forked process directly context switches into this.
  142. */
  143. /* rdi: prev */
  144. ENTRY(ret_from_fork)
  145. CFI_DEFAULT_STACK
  146. push kernel_eflags(%rip)
  147. CFI_ADJUST_CFA_OFFSET 4
  148. popf # reset kernel eflags
  149. CFI_ADJUST_CFA_OFFSET -4
  150. call schedule_tail
  151. GET_THREAD_INFO(%rcx)
  152. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
  153. jnz rff_trace
  154. rff_action:
  155. RESTORE_REST
  156. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  157. je int_ret_from_sys_call
  158. testl $_TIF_IA32,threadinfo_flags(%rcx)
  159. jnz int_ret_from_sys_call
  160. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  161. jmp ret_from_sys_call
  162. rff_trace:
  163. movq %rsp,%rdi
  164. call syscall_trace_leave
  165. GET_THREAD_INFO(%rcx)
  166. jmp rff_action
  167. CFI_ENDPROC
  168. END(ret_from_fork)
  169. /*
  170. * System call entry. Upto 6 arguments in registers are supported.
  171. *
  172. * SYSCALL does not save anything on the stack and does not change the
  173. * stack pointer.
  174. */
  175. /*
  176. * Register setup:
  177. * rax system call number
  178. * rdi arg0
  179. * rcx return address for syscall/sysret, C arg3
  180. * rsi arg1
  181. * rdx arg2
  182. * r10 arg3 (--> moved to rcx for C)
  183. * r8 arg4
  184. * r9 arg5
  185. * r11 eflags for syscall/sysret, temporary for C
  186. * r12-r15,rbp,rbx saved by C code, not touched.
  187. *
  188. * Interrupts are off on entry.
  189. * Only called from user space.
  190. *
  191. * XXX if we had a free scratch register we could save the RSP into the stack frame
  192. * and report it properly in ps. Unfortunately we haven't.
  193. *
  194. * When user can change the frames always force IRET. That is because
  195. * it deals with uncanonical addresses better. SYSRET has trouble
  196. * with them due to bugs in both AMD and Intel CPUs.
  197. */
  198. ENTRY(system_call)
  199. CFI_STARTPROC simple
  200. CFI_SIGNAL_FRAME
  201. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  202. CFI_REGISTER rip,rcx
  203. /*CFI_REGISTER rflags,r11*/
  204. swapgs
  205. movq %rsp,%gs:pda_oldrsp
  206. movq %gs:pda_kernelstack,%rsp
  207. /*
  208. * No need to follow this irqs off/on section - it's straight
  209. * and short:
  210. */
  211. sti
  212. SAVE_ARGS 8,1
  213. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  214. movq %rcx,RIP-ARGOFFSET(%rsp)
  215. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  216. GET_THREAD_INFO(%rcx)
  217. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
  218. CFI_REMEMBER_STATE
  219. jnz tracesys
  220. cmpq $__NR_syscall_max,%rax
  221. ja badsys
  222. movq %r10,%rcx
  223. call *sys_call_table(,%rax,8) # XXX: rip relative
  224. movq %rax,RAX-ARGOFFSET(%rsp)
  225. /*
  226. * Syscall return path ending with SYSRET (fast path)
  227. * Has incomplete stack frame and undefined top of stack.
  228. */
  229. .globl ret_from_sys_call
  230. ret_from_sys_call:
  231. movl $_TIF_ALLWORK_MASK,%edi
  232. /* edi: flagmask */
  233. sysret_check:
  234. GET_THREAD_INFO(%rcx)
  235. cli
  236. TRACE_IRQS_OFF
  237. movl threadinfo_flags(%rcx),%edx
  238. andl %edi,%edx
  239. CFI_REMEMBER_STATE
  240. jnz sysret_careful
  241. /*
  242. * sysretq will re-enable interrupts:
  243. */
  244. TRACE_IRQS_ON
  245. movq RIP-ARGOFFSET(%rsp),%rcx
  246. CFI_REGISTER rip,rcx
  247. RESTORE_ARGS 0,-ARG_SKIP,1
  248. /*CFI_REGISTER rflags,r11*/
  249. movq %gs:pda_oldrsp,%rsp
  250. swapgs
  251. sysretq
  252. /* Handle reschedules */
  253. /* edx: work, edi: workmask */
  254. sysret_careful:
  255. CFI_RESTORE_STATE
  256. bt $TIF_NEED_RESCHED,%edx
  257. jnc sysret_signal
  258. TRACE_IRQS_ON
  259. sti
  260. pushq %rdi
  261. CFI_ADJUST_CFA_OFFSET 8
  262. call schedule
  263. popq %rdi
  264. CFI_ADJUST_CFA_OFFSET -8
  265. jmp sysret_check
  266. /* Handle a signal */
  267. sysret_signal:
  268. TRACE_IRQS_ON
  269. sti
  270. testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
  271. jz 1f
  272. /* Really a signal */
  273. /* edx: work flags (arg3) */
  274. leaq do_notify_resume(%rip),%rax
  275. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  276. xorl %esi,%esi # oldset -> arg2
  277. call ptregscall_common
  278. 1: movl $_TIF_NEED_RESCHED,%edi
  279. /* Use IRET because user could have changed frame. This
  280. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  281. cli
  282. TRACE_IRQS_OFF
  283. jmp int_with_check
  284. badsys:
  285. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  286. jmp ret_from_sys_call
  287. /* Do syscall tracing */
  288. tracesys:
  289. CFI_RESTORE_STATE
  290. SAVE_REST
  291. movq $-ENOSYS,RAX(%rsp)
  292. FIXUP_TOP_OF_STACK %rdi
  293. movq %rsp,%rdi
  294. call syscall_trace_enter
  295. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  296. RESTORE_REST
  297. cmpq $__NR_syscall_max,%rax
  298. movq $-ENOSYS,%rcx
  299. cmova %rcx,%rax
  300. ja 1f
  301. movq %r10,%rcx /* fixup for C */
  302. call *sys_call_table(,%rax,8)
  303. 1: movq %rax,RAX-ARGOFFSET(%rsp)
  304. /* Use IRET because user could have changed frame */
  305. jmp int_ret_from_sys_call
  306. CFI_ENDPROC
  307. END(system_call)
  308. /*
  309. * Syscall return path ending with IRET.
  310. * Has correct top of stack, but partial stack frame.
  311. */
  312. ENTRY(int_ret_from_sys_call)
  313. CFI_STARTPROC simple
  314. CFI_SIGNAL_FRAME
  315. CFI_DEF_CFA rsp,SS+8-ARGOFFSET
  316. /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
  317. CFI_REL_OFFSET rsp,RSP-ARGOFFSET
  318. /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
  319. /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
  320. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  321. CFI_REL_OFFSET rdx,RDX-ARGOFFSET
  322. CFI_REL_OFFSET rcx,RCX-ARGOFFSET
  323. CFI_REL_OFFSET rax,RAX-ARGOFFSET
  324. CFI_REL_OFFSET rdi,RDI-ARGOFFSET
  325. CFI_REL_OFFSET rsi,RSI-ARGOFFSET
  326. CFI_REL_OFFSET r8,R8-ARGOFFSET
  327. CFI_REL_OFFSET r9,R9-ARGOFFSET
  328. CFI_REL_OFFSET r10,R10-ARGOFFSET
  329. CFI_REL_OFFSET r11,R11-ARGOFFSET
  330. cli
  331. TRACE_IRQS_OFF
  332. testl $3,CS-ARGOFFSET(%rsp)
  333. je retint_restore_args
  334. movl $_TIF_ALLWORK_MASK,%edi
  335. /* edi: mask to check */
  336. int_with_check:
  337. GET_THREAD_INFO(%rcx)
  338. movl threadinfo_flags(%rcx),%edx
  339. andl %edi,%edx
  340. jnz int_careful
  341. andl $~TS_COMPAT,threadinfo_status(%rcx)
  342. jmp retint_swapgs
  343. /* Either reschedule or signal or syscall exit tracking needed. */
  344. /* First do a reschedule test. */
  345. /* edx: work, edi: workmask */
  346. int_careful:
  347. bt $TIF_NEED_RESCHED,%edx
  348. jnc int_very_careful
  349. TRACE_IRQS_ON
  350. sti
  351. pushq %rdi
  352. CFI_ADJUST_CFA_OFFSET 8
  353. call schedule
  354. popq %rdi
  355. CFI_ADJUST_CFA_OFFSET -8
  356. cli
  357. TRACE_IRQS_OFF
  358. jmp int_with_check
  359. /* handle signals and tracing -- both require a full stack frame */
  360. int_very_careful:
  361. TRACE_IRQS_ON
  362. sti
  363. SAVE_REST
  364. /* Check for syscall exit trace */
  365. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  366. jz int_signal
  367. pushq %rdi
  368. CFI_ADJUST_CFA_OFFSET 8
  369. leaq 8(%rsp),%rdi # &ptregs -> arg1
  370. call syscall_trace_leave
  371. popq %rdi
  372. CFI_ADJUST_CFA_OFFSET -8
  373. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  374. cli
  375. TRACE_IRQS_OFF
  376. jmp int_restore_rest
  377. int_signal:
  378. testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
  379. jz 1f
  380. movq %rsp,%rdi # &ptregs -> arg1
  381. xorl %esi,%esi # oldset -> arg2
  382. call do_notify_resume
  383. 1: movl $_TIF_NEED_RESCHED,%edi
  384. int_restore_rest:
  385. RESTORE_REST
  386. cli
  387. TRACE_IRQS_OFF
  388. jmp int_with_check
  389. CFI_ENDPROC
  390. END(int_ret_from_sys_call)
  391. /*
  392. * Certain special system calls that need to save a complete full stack frame.
  393. */
  394. .macro PTREGSCALL label,func,arg
  395. .globl \label
  396. \label:
  397. leaq \func(%rip),%rax
  398. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  399. jmp ptregscall_common
  400. END(\label)
  401. .endm
  402. CFI_STARTPROC
  403. PTREGSCALL stub_clone, sys_clone, %r8
  404. PTREGSCALL stub_fork, sys_fork, %rdi
  405. PTREGSCALL stub_vfork, sys_vfork, %rdi
  406. PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
  407. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  408. PTREGSCALL stub_iopl, sys_iopl, %rsi
  409. ENTRY(ptregscall_common)
  410. popq %r11
  411. CFI_ADJUST_CFA_OFFSET -8
  412. CFI_REGISTER rip, r11
  413. SAVE_REST
  414. movq %r11, %r15
  415. CFI_REGISTER rip, r15
  416. FIXUP_TOP_OF_STACK %r11
  417. call *%rax
  418. RESTORE_TOP_OF_STACK %r11
  419. movq %r15, %r11
  420. CFI_REGISTER rip, r11
  421. RESTORE_REST
  422. pushq %r11
  423. CFI_ADJUST_CFA_OFFSET 8
  424. CFI_REL_OFFSET rip, 0
  425. ret
  426. CFI_ENDPROC
  427. END(ptregscall_common)
  428. ENTRY(stub_execve)
  429. CFI_STARTPROC
  430. popq %r11
  431. CFI_ADJUST_CFA_OFFSET -8
  432. CFI_REGISTER rip, r11
  433. SAVE_REST
  434. FIXUP_TOP_OF_STACK %r11
  435. call sys_execve
  436. RESTORE_TOP_OF_STACK %r11
  437. movq %rax,RAX(%rsp)
  438. RESTORE_REST
  439. jmp int_ret_from_sys_call
  440. CFI_ENDPROC
  441. END(stub_execve)
  442. /*
  443. * sigreturn is special because it needs to restore all registers on return.
  444. * This cannot be done with SYSRET, so use the IRET return path instead.
  445. */
  446. ENTRY(stub_rt_sigreturn)
  447. CFI_STARTPROC
  448. addq $8, %rsp
  449. CFI_ADJUST_CFA_OFFSET -8
  450. SAVE_REST
  451. movq %rsp,%rdi
  452. FIXUP_TOP_OF_STACK %r11
  453. call sys_rt_sigreturn
  454. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  455. RESTORE_REST
  456. jmp int_ret_from_sys_call
  457. CFI_ENDPROC
  458. END(stub_rt_sigreturn)
  459. /*
  460. * initial frame state for interrupts and exceptions
  461. */
  462. .macro _frame ref
  463. CFI_STARTPROC simple
  464. CFI_SIGNAL_FRAME
  465. CFI_DEF_CFA rsp,SS+8-\ref
  466. /*CFI_REL_OFFSET ss,SS-\ref*/
  467. CFI_REL_OFFSET rsp,RSP-\ref
  468. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  469. /*CFI_REL_OFFSET cs,CS-\ref*/
  470. CFI_REL_OFFSET rip,RIP-\ref
  471. .endm
  472. /* initial frame state for interrupts (and exceptions without error code) */
  473. #define INTR_FRAME _frame RIP
  474. /* initial frame state for exceptions with error code (and interrupts with
  475. vector already pushed) */
  476. #define XCPT_FRAME _frame ORIG_RAX
  477. /*
  478. * Interrupt entry/exit.
  479. *
  480. * Interrupt entry points save only callee clobbered registers in fast path.
  481. *
  482. * Entry runs with interrupts off.
  483. */
  484. /* 0(%rsp): interrupt number */
  485. .macro interrupt func
  486. cld
  487. SAVE_ARGS
  488. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  489. pushq %rbp
  490. CFI_ADJUST_CFA_OFFSET 8
  491. CFI_REL_OFFSET rbp, 0
  492. movq %rsp,%rbp
  493. CFI_DEF_CFA_REGISTER rbp
  494. testl $3,CS(%rdi)
  495. je 1f
  496. swapgs
  497. /* irqcount is used to check if a CPU is already on an interrupt
  498. stack or not. While this is essentially redundant with preempt_count
  499. it is a little cheaper to use a separate counter in the PDA
  500. (short of moving irq_enter into assembly, which would be too
  501. much work) */
  502. 1: incl %gs:pda_irqcount
  503. cmoveq %gs:pda_irqstackptr,%rsp
  504. push %rbp # backlink for old unwinder
  505. /*
  506. * We entered an interrupt context - irqs are off:
  507. */
  508. TRACE_IRQS_OFF
  509. call \func
  510. .endm
  511. ENTRY(common_interrupt)
  512. XCPT_FRAME
  513. interrupt do_IRQ
  514. /* 0(%rsp): oldrsp-ARGOFFSET */
  515. ret_from_intr:
  516. cli
  517. TRACE_IRQS_OFF
  518. decl %gs:pda_irqcount
  519. leaveq
  520. CFI_DEF_CFA_REGISTER rsp
  521. CFI_ADJUST_CFA_OFFSET -8
  522. exit_intr:
  523. GET_THREAD_INFO(%rcx)
  524. testl $3,CS-ARGOFFSET(%rsp)
  525. je retint_kernel
  526. /* Interrupt came from user space */
  527. /*
  528. * Has a correct top of stack, but a partial stack frame
  529. * %rcx: thread info. Interrupts off.
  530. */
  531. retint_with_reschedule:
  532. movl $_TIF_WORK_MASK,%edi
  533. retint_check:
  534. movl threadinfo_flags(%rcx),%edx
  535. andl %edi,%edx
  536. CFI_REMEMBER_STATE
  537. jnz retint_careful
  538. retint_swapgs:
  539. /*
  540. * The iretq could re-enable interrupts:
  541. */
  542. cli
  543. TRACE_IRQS_IRETQ
  544. swapgs
  545. jmp restore_args
  546. retint_restore_args:
  547. cli
  548. /*
  549. * The iretq could re-enable interrupts:
  550. */
  551. TRACE_IRQS_IRETQ
  552. restore_args:
  553. RESTORE_ARGS 0,8,0
  554. iret_label:
  555. iretq
  556. .section __ex_table,"a"
  557. .quad iret_label,bad_iret
  558. .previous
  559. .section .fixup,"ax"
  560. /* force a signal here? this matches i386 behaviour */
  561. /* running with kernel gs */
  562. bad_iret:
  563. movq $11,%rdi /* SIGSEGV */
  564. TRACE_IRQS_ON
  565. sti
  566. jmp do_exit
  567. .previous
  568. /* edi: workmask, edx: work */
  569. retint_careful:
  570. CFI_RESTORE_STATE
  571. bt $TIF_NEED_RESCHED,%edx
  572. jnc retint_signal
  573. TRACE_IRQS_ON
  574. sti
  575. pushq %rdi
  576. CFI_ADJUST_CFA_OFFSET 8
  577. call schedule
  578. popq %rdi
  579. CFI_ADJUST_CFA_OFFSET -8
  580. GET_THREAD_INFO(%rcx)
  581. cli
  582. TRACE_IRQS_OFF
  583. jmp retint_check
  584. retint_signal:
  585. testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
  586. jz retint_swapgs
  587. TRACE_IRQS_ON
  588. sti
  589. SAVE_REST
  590. movq $-1,ORIG_RAX(%rsp)
  591. xorl %esi,%esi # oldset
  592. movq %rsp,%rdi # &pt_regs
  593. call do_notify_resume
  594. RESTORE_REST
  595. cli
  596. TRACE_IRQS_OFF
  597. movl $_TIF_NEED_RESCHED,%edi
  598. GET_THREAD_INFO(%rcx)
  599. jmp retint_check
  600. #ifdef CONFIG_PREEMPT
  601. /* Returning to kernel space. Check if we need preemption */
  602. /* rcx: threadinfo. interrupts off. */
  603. ENTRY(retint_kernel)
  604. cmpl $0,threadinfo_preempt_count(%rcx)
  605. jnz retint_restore_args
  606. bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
  607. jnc retint_restore_args
  608. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  609. jnc retint_restore_args
  610. call preempt_schedule_irq
  611. jmp exit_intr
  612. #endif
  613. CFI_ENDPROC
  614. END(common_interrupt)
  615. /*
  616. * APIC interrupts.
  617. */
  618. .macro apicinterrupt num,func
  619. INTR_FRAME
  620. pushq $~(\num)
  621. CFI_ADJUST_CFA_OFFSET 8
  622. interrupt \func
  623. jmp ret_from_intr
  624. CFI_ENDPROC
  625. .endm
  626. ENTRY(thermal_interrupt)
  627. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  628. END(thermal_interrupt)
  629. ENTRY(threshold_interrupt)
  630. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  631. END(threshold_interrupt)
  632. #ifdef CONFIG_SMP
  633. ENTRY(reschedule_interrupt)
  634. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  635. END(reschedule_interrupt)
  636. .macro INVALIDATE_ENTRY num
  637. ENTRY(invalidate_interrupt\num)
  638. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  639. END(invalidate_interrupt\num)
  640. .endm
  641. INVALIDATE_ENTRY 0
  642. INVALIDATE_ENTRY 1
  643. INVALIDATE_ENTRY 2
  644. INVALIDATE_ENTRY 3
  645. INVALIDATE_ENTRY 4
  646. INVALIDATE_ENTRY 5
  647. INVALIDATE_ENTRY 6
  648. INVALIDATE_ENTRY 7
  649. ENTRY(call_function_interrupt)
  650. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  651. END(call_function_interrupt)
  652. #endif
  653. ENTRY(apic_timer_interrupt)
  654. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  655. END(apic_timer_interrupt)
  656. ENTRY(error_interrupt)
  657. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  658. END(error_interrupt)
  659. ENTRY(spurious_interrupt)
  660. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  661. END(spurious_interrupt)
  662. /*
  663. * Exception entry points.
  664. */
  665. .macro zeroentry sym
  666. INTR_FRAME
  667. pushq $0 /* push error code/oldrax */
  668. CFI_ADJUST_CFA_OFFSET 8
  669. pushq %rax /* push real oldrax to the rdi slot */
  670. CFI_ADJUST_CFA_OFFSET 8
  671. leaq \sym(%rip),%rax
  672. jmp error_entry
  673. CFI_ENDPROC
  674. .endm
  675. .macro errorentry sym
  676. XCPT_FRAME
  677. pushq %rax
  678. CFI_ADJUST_CFA_OFFSET 8
  679. leaq \sym(%rip),%rax
  680. jmp error_entry
  681. CFI_ENDPROC
  682. .endm
  683. /* error code is on the stack already */
  684. /* handle NMI like exceptions that can happen everywhere */
  685. .macro paranoidentry sym, ist=0, irqtrace=1
  686. SAVE_ALL
  687. cld
  688. movl $1,%ebx
  689. movl $MSR_GS_BASE,%ecx
  690. rdmsr
  691. testl %edx,%edx
  692. js 1f
  693. swapgs
  694. xorl %ebx,%ebx
  695. 1:
  696. .if \ist
  697. movq %gs:pda_data_offset, %rbp
  698. .endif
  699. movq %rsp,%rdi
  700. movq ORIG_RAX(%rsp),%rsi
  701. movq $-1,ORIG_RAX(%rsp)
  702. .if \ist
  703. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  704. .endif
  705. call \sym
  706. .if \ist
  707. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  708. .endif
  709. cli
  710. .if \irqtrace
  711. TRACE_IRQS_OFF
  712. .endif
  713. .endm
  714. /*
  715. * "Paranoid" exit path from exception stack.
  716. * Paranoid because this is used by NMIs and cannot take
  717. * any kernel state for granted.
  718. * We don't do kernel preemption checks here, because only
  719. * NMI should be common and it does not enable IRQs and
  720. * cannot get reschedule ticks.
  721. *
  722. * "trace" is 0 for the NMI handler only, because irq-tracing
  723. * is fundamentally NMI-unsafe. (we cannot change the soft and
  724. * hard flags at once, atomically)
  725. */
  726. .macro paranoidexit trace=1
  727. /* ebx: no swapgs flag */
  728. paranoid_exit\trace:
  729. testl %ebx,%ebx /* swapgs needed? */
  730. jnz paranoid_restore\trace
  731. testl $3,CS(%rsp)
  732. jnz paranoid_userspace\trace
  733. paranoid_swapgs\trace:
  734. .if \trace
  735. TRACE_IRQS_IRETQ 0
  736. .endif
  737. swapgs
  738. paranoid_restore\trace:
  739. RESTORE_ALL 8
  740. iretq
  741. paranoid_userspace\trace:
  742. GET_THREAD_INFO(%rcx)
  743. movl threadinfo_flags(%rcx),%ebx
  744. andl $_TIF_WORK_MASK,%ebx
  745. jz paranoid_swapgs\trace
  746. movq %rsp,%rdi /* &pt_regs */
  747. call sync_regs
  748. movq %rax,%rsp /* switch stack for scheduling */
  749. testl $_TIF_NEED_RESCHED,%ebx
  750. jnz paranoid_schedule\trace
  751. movl %ebx,%edx /* arg3: thread flags */
  752. .if \trace
  753. TRACE_IRQS_ON
  754. .endif
  755. sti
  756. xorl %esi,%esi /* arg2: oldset */
  757. movq %rsp,%rdi /* arg1: &pt_regs */
  758. call do_notify_resume
  759. cli
  760. .if \trace
  761. TRACE_IRQS_OFF
  762. .endif
  763. jmp paranoid_userspace\trace
  764. paranoid_schedule\trace:
  765. .if \trace
  766. TRACE_IRQS_ON
  767. .endif
  768. sti
  769. call schedule
  770. cli
  771. .if \trace
  772. TRACE_IRQS_OFF
  773. .endif
  774. jmp paranoid_userspace\trace
  775. CFI_ENDPROC
  776. .endm
  777. /*
  778. * Exception entry point. This expects an error code/orig_rax on the stack
  779. * and the exception handler in %rax.
  780. */
  781. KPROBE_ENTRY(error_entry)
  782. _frame RDI
  783. /* rdi slot contains rax, oldrax contains error code */
  784. cld
  785. subq $14*8,%rsp
  786. CFI_ADJUST_CFA_OFFSET (14*8)
  787. movq %rsi,13*8(%rsp)
  788. CFI_REL_OFFSET rsi,RSI
  789. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  790. movq %rdx,12*8(%rsp)
  791. CFI_REL_OFFSET rdx,RDX
  792. movq %rcx,11*8(%rsp)
  793. CFI_REL_OFFSET rcx,RCX
  794. movq %rsi,10*8(%rsp) /* store rax */
  795. CFI_REL_OFFSET rax,RAX
  796. movq %r8, 9*8(%rsp)
  797. CFI_REL_OFFSET r8,R8
  798. movq %r9, 8*8(%rsp)
  799. CFI_REL_OFFSET r9,R9
  800. movq %r10,7*8(%rsp)
  801. CFI_REL_OFFSET r10,R10
  802. movq %r11,6*8(%rsp)
  803. CFI_REL_OFFSET r11,R11
  804. movq %rbx,5*8(%rsp)
  805. CFI_REL_OFFSET rbx,RBX
  806. movq %rbp,4*8(%rsp)
  807. CFI_REL_OFFSET rbp,RBP
  808. movq %r12,3*8(%rsp)
  809. CFI_REL_OFFSET r12,R12
  810. movq %r13,2*8(%rsp)
  811. CFI_REL_OFFSET r13,R13
  812. movq %r14,1*8(%rsp)
  813. CFI_REL_OFFSET r14,R14
  814. movq %r15,(%rsp)
  815. CFI_REL_OFFSET r15,R15
  816. xorl %ebx,%ebx
  817. testl $3,CS(%rsp)
  818. je error_kernelspace
  819. error_swapgs:
  820. swapgs
  821. error_sti:
  822. movq %rdi,RDI(%rsp)
  823. movq %rsp,%rdi
  824. movq ORIG_RAX(%rsp),%rsi /* get error code */
  825. movq $-1,ORIG_RAX(%rsp)
  826. call *%rax
  827. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  828. error_exit:
  829. movl %ebx,%eax
  830. RESTORE_REST
  831. cli
  832. TRACE_IRQS_OFF
  833. GET_THREAD_INFO(%rcx)
  834. testl %eax,%eax
  835. jne retint_kernel
  836. movl threadinfo_flags(%rcx),%edx
  837. movl $_TIF_WORK_MASK,%edi
  838. andl %edi,%edx
  839. jnz retint_careful
  840. /*
  841. * The iret might restore flags:
  842. */
  843. TRACE_IRQS_IRETQ
  844. swapgs
  845. RESTORE_ARGS 0,8,0
  846. jmp iret_label
  847. CFI_ENDPROC
  848. error_kernelspace:
  849. incl %ebx
  850. /* There are two places in the kernel that can potentially fault with
  851. usergs. Handle them here. The exception handlers after
  852. iret run with kernel gs again, so don't set the user space flag.
  853. B stepping K8s sometimes report an truncated RIP for IRET
  854. exceptions returning to compat mode. Check for these here too. */
  855. leaq iret_label(%rip),%rbp
  856. cmpq %rbp,RIP(%rsp)
  857. je error_swapgs
  858. movl %ebp,%ebp /* zero extend */
  859. cmpq %rbp,RIP(%rsp)
  860. je error_swapgs
  861. cmpq $gs_change,RIP(%rsp)
  862. je error_swapgs
  863. jmp error_sti
  864. KPROBE_END(error_entry)
  865. /* Reload gs selector with exception handling */
  866. /* edi: new selector */
  867. ENTRY(load_gs_index)
  868. CFI_STARTPROC
  869. pushf
  870. CFI_ADJUST_CFA_OFFSET 8
  871. cli
  872. swapgs
  873. gs_change:
  874. movl %edi,%gs
  875. 2: mfence /* workaround */
  876. swapgs
  877. popf
  878. CFI_ADJUST_CFA_OFFSET -8
  879. ret
  880. CFI_ENDPROC
  881. ENDPROC(load_gs_index)
  882. .section __ex_table,"a"
  883. .align 8
  884. .quad gs_change,bad_gs
  885. .previous
  886. .section .fixup,"ax"
  887. /* running with kernelgs */
  888. bad_gs:
  889. swapgs /* switch back to user gs */
  890. xorl %eax,%eax
  891. movl %eax,%gs
  892. jmp 2b
  893. .previous
  894. /*
  895. * Create a kernel thread.
  896. *
  897. * C extern interface:
  898. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  899. *
  900. * asm input arguments:
  901. * rdi: fn, rsi: arg, rdx: flags
  902. */
  903. ENTRY(kernel_thread)
  904. CFI_STARTPROC
  905. FAKE_STACK_FRAME $child_rip
  906. SAVE_ALL
  907. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  908. movq %rdx,%rdi
  909. orq kernel_thread_flags(%rip),%rdi
  910. movq $-1, %rsi
  911. movq %rsp, %rdx
  912. xorl %r8d,%r8d
  913. xorl %r9d,%r9d
  914. # clone now
  915. call do_fork
  916. movq %rax,RAX(%rsp)
  917. xorl %edi,%edi
  918. /*
  919. * It isn't worth to check for reschedule here,
  920. * so internally to the x86_64 port you can rely on kernel_thread()
  921. * not to reschedule the child before returning, this avoids the need
  922. * of hacks for example to fork off the per-CPU idle tasks.
  923. * [Hopefully no generic code relies on the reschedule -AK]
  924. */
  925. RESTORE_ALL
  926. UNFAKE_STACK_FRAME
  927. ret
  928. CFI_ENDPROC
  929. ENDPROC(kernel_thread)
  930. child_rip:
  931. pushq $0 # fake return address
  932. CFI_STARTPROC
  933. /*
  934. * Here we are in the child and the registers are set as they were
  935. * at kernel_thread() invocation in the parent.
  936. */
  937. movq %rdi, %rax
  938. movq %rsi, %rdi
  939. call *%rax
  940. # exit
  941. xorl %edi, %edi
  942. call do_exit
  943. CFI_ENDPROC
  944. ENDPROC(child_rip)
  945. /*
  946. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  947. *
  948. * C extern interface:
  949. * extern long execve(char *name, char **argv, char **envp)
  950. *
  951. * asm input arguments:
  952. * rdi: name, rsi: argv, rdx: envp
  953. *
  954. * We want to fallback into:
  955. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
  956. *
  957. * do_sys_execve asm fallback arguments:
  958. * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
  959. */
  960. ENTRY(kernel_execve)
  961. CFI_STARTPROC
  962. FAKE_STACK_FRAME $0
  963. SAVE_ALL
  964. call sys_execve
  965. movq %rax, RAX(%rsp)
  966. RESTORE_REST
  967. testq %rax,%rax
  968. je int_ret_from_sys_call
  969. RESTORE_ARGS
  970. UNFAKE_STACK_FRAME
  971. ret
  972. CFI_ENDPROC
  973. ENDPROC(kernel_execve)
  974. KPROBE_ENTRY(page_fault)
  975. errorentry do_page_fault
  976. KPROBE_END(page_fault)
  977. ENTRY(coprocessor_error)
  978. zeroentry do_coprocessor_error
  979. END(coprocessor_error)
  980. ENTRY(simd_coprocessor_error)
  981. zeroentry do_simd_coprocessor_error
  982. END(simd_coprocessor_error)
  983. ENTRY(device_not_available)
  984. zeroentry math_state_restore
  985. END(device_not_available)
  986. /* runs on exception stack */
  987. KPROBE_ENTRY(debug)
  988. INTR_FRAME
  989. pushq $0
  990. CFI_ADJUST_CFA_OFFSET 8
  991. paranoidentry do_debug, DEBUG_STACK
  992. paranoidexit
  993. KPROBE_END(debug)
  994. /* runs on exception stack */
  995. KPROBE_ENTRY(nmi)
  996. INTR_FRAME
  997. pushq $-1
  998. CFI_ADJUST_CFA_OFFSET 8
  999. paranoidentry do_nmi, 0, 0
  1000. #ifdef CONFIG_TRACE_IRQFLAGS
  1001. paranoidexit 0
  1002. #else
  1003. jmp paranoid_exit1
  1004. CFI_ENDPROC
  1005. #endif
  1006. KPROBE_END(nmi)
  1007. KPROBE_ENTRY(int3)
  1008. INTR_FRAME
  1009. pushq $0
  1010. CFI_ADJUST_CFA_OFFSET 8
  1011. paranoidentry do_int3, DEBUG_STACK
  1012. jmp paranoid_exit1
  1013. CFI_ENDPROC
  1014. KPROBE_END(int3)
  1015. ENTRY(overflow)
  1016. zeroentry do_overflow
  1017. END(overflow)
  1018. ENTRY(bounds)
  1019. zeroentry do_bounds
  1020. END(bounds)
  1021. ENTRY(invalid_op)
  1022. zeroentry do_invalid_op
  1023. END(invalid_op)
  1024. ENTRY(coprocessor_segment_overrun)
  1025. zeroentry do_coprocessor_segment_overrun
  1026. END(coprocessor_segment_overrun)
  1027. ENTRY(reserved)
  1028. zeroentry do_reserved
  1029. END(reserved)
  1030. /* runs on exception stack */
  1031. ENTRY(double_fault)
  1032. XCPT_FRAME
  1033. paranoidentry do_double_fault
  1034. jmp paranoid_exit1
  1035. CFI_ENDPROC
  1036. END(double_fault)
  1037. ENTRY(invalid_TSS)
  1038. errorentry do_invalid_TSS
  1039. END(invalid_TSS)
  1040. ENTRY(segment_not_present)
  1041. errorentry do_segment_not_present
  1042. END(segment_not_present)
  1043. /* runs on exception stack */
  1044. ENTRY(stack_segment)
  1045. XCPT_FRAME
  1046. paranoidentry do_stack_segment
  1047. jmp paranoid_exit1
  1048. CFI_ENDPROC
  1049. END(stack_segment)
  1050. KPROBE_ENTRY(general_protection)
  1051. errorentry do_general_protection
  1052. KPROBE_END(general_protection)
  1053. ENTRY(alignment_check)
  1054. errorentry do_alignment_check
  1055. END(alignment_check)
  1056. ENTRY(divide_error)
  1057. zeroentry do_divide_error
  1058. END(divide_error)
  1059. ENTRY(spurious_interrupt_bug)
  1060. zeroentry do_spurious_interrupt_bug
  1061. END(spurious_interrupt_bug)
  1062. #ifdef CONFIG_X86_MCE
  1063. /* runs on exception stack */
  1064. ENTRY(machine_check)
  1065. INTR_FRAME
  1066. pushq $0
  1067. CFI_ADJUST_CFA_OFFSET 8
  1068. paranoidentry do_machine_check
  1069. jmp paranoid_exit1
  1070. CFI_ENDPROC
  1071. END(machine_check)
  1072. #endif
  1073. /* Call softirq on interrupt stack. Interrupts are off. */
  1074. ENTRY(call_softirq)
  1075. CFI_STARTPROC
  1076. push %rbp
  1077. CFI_ADJUST_CFA_OFFSET 8
  1078. CFI_REL_OFFSET rbp,0
  1079. mov %rsp,%rbp
  1080. CFI_DEF_CFA_REGISTER rbp
  1081. incl %gs:pda_irqcount
  1082. cmove %gs:pda_irqstackptr,%rsp
  1083. push %rbp # backlink for old unwinder
  1084. call __do_softirq
  1085. leaveq
  1086. CFI_DEF_CFA_REGISTER rsp
  1087. CFI_ADJUST_CFA_OFFSET -8
  1088. decl %gs:pda_irqcount
  1089. ret
  1090. CFI_ENDPROC
  1091. ENDPROC(call_softirq)
  1092. #ifdef CONFIG_STACK_UNWIND
  1093. ENTRY(arch_unwind_init_running)
  1094. CFI_STARTPROC
  1095. movq %r15, R15(%rdi)
  1096. movq %r14, R14(%rdi)
  1097. xchgq %rsi, %rdx
  1098. movq %r13, R13(%rdi)
  1099. movq %r12, R12(%rdi)
  1100. xorl %eax, %eax
  1101. movq %rbp, RBP(%rdi)
  1102. movq %rbx, RBX(%rdi)
  1103. movq (%rsp), %rcx
  1104. movq %rax, R11(%rdi)
  1105. movq %rax, R10(%rdi)
  1106. movq %rax, R9(%rdi)
  1107. movq %rax, R8(%rdi)
  1108. movq %rax, RAX(%rdi)
  1109. movq %rax, RCX(%rdi)
  1110. movq %rax, RDX(%rdi)
  1111. movq %rax, RSI(%rdi)
  1112. movq %rax, RDI(%rdi)
  1113. movq %rax, ORIG_RAX(%rdi)
  1114. movq %rcx, RIP(%rdi)
  1115. leaq 8(%rsp), %rcx
  1116. movq $__KERNEL_CS, CS(%rdi)
  1117. movq %rax, EFLAGS(%rdi)
  1118. movq %rcx, RSP(%rdi)
  1119. movq $__KERNEL_DS, SS(%rdi)
  1120. jmpq *%rdx
  1121. CFI_ENDPROC
  1122. ENDPROC(arch_unwind_init_running)
  1123. #endif