entry.S 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. .code64
  52. #ifndef CONFIG_PREEMPT
  53. #define retint_kernel retint_restore_args
  54. #endif
  55. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  56. #ifdef CONFIG_TRACE_IRQFLAGS
  57. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  58. jnc 1f
  59. TRACE_IRQS_ON
  60. 1:
  61. #endif
  62. .endm
  63. /*
  64. * C code is not supposed to know about undefined top of stack. Every time
  65. * a C function with an pt_regs argument is called from the SYSCALL based
  66. * fast path FIXUP_TOP_OF_STACK is needed.
  67. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  68. * manipulation.
  69. */
  70. /* %rsp:at FRAMEEND */
  71. .macro FIXUP_TOP_OF_STACK tmp
  72. movq %gs:pda_oldrsp,\tmp
  73. movq \tmp,RSP(%rsp)
  74. movq $__USER_DS,SS(%rsp)
  75. movq $__USER_CS,CS(%rsp)
  76. movq $-1,RCX(%rsp)
  77. movq R11(%rsp),\tmp /* get eflags */
  78. movq \tmp,EFLAGS(%rsp)
  79. .endm
  80. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  81. movq RSP-\offset(%rsp),\tmp
  82. movq \tmp,%gs:pda_oldrsp
  83. movq EFLAGS-\offset(%rsp),\tmp
  84. movq \tmp,R11-\offset(%rsp)
  85. .endm
  86. .macro FAKE_STACK_FRAME child_rip
  87. /* push in order ss, rsp, eflags, cs, rip */
  88. xorl %eax, %eax
  89. pushq %rax /* ss */
  90. CFI_ADJUST_CFA_OFFSET 8
  91. /*CFI_REL_OFFSET ss,0*/
  92. pushq %rax /* rsp */
  93. CFI_ADJUST_CFA_OFFSET 8
  94. CFI_REL_OFFSET rsp,0
  95. pushq $(1<<9) /* eflags - interrupts on */
  96. CFI_ADJUST_CFA_OFFSET 8
  97. /*CFI_REL_OFFSET rflags,0*/
  98. pushq $__KERNEL_CS /* cs */
  99. CFI_ADJUST_CFA_OFFSET 8
  100. /*CFI_REL_OFFSET cs,0*/
  101. pushq \child_rip /* rip */
  102. CFI_ADJUST_CFA_OFFSET 8
  103. CFI_REL_OFFSET rip,0
  104. pushq %rax /* orig rax */
  105. CFI_ADJUST_CFA_OFFSET 8
  106. .endm
  107. .macro UNFAKE_STACK_FRAME
  108. addq $8*6, %rsp
  109. CFI_ADJUST_CFA_OFFSET -(6*8)
  110. .endm
  111. .macro CFI_DEFAULT_STACK start=1
  112. .if \start
  113. CFI_STARTPROC simple
  114. CFI_SIGNAL_FRAME
  115. CFI_DEF_CFA rsp,SS+8
  116. .else
  117. CFI_DEF_CFA_OFFSET SS+8
  118. .endif
  119. CFI_REL_OFFSET r15,R15
  120. CFI_REL_OFFSET r14,R14
  121. CFI_REL_OFFSET r13,R13
  122. CFI_REL_OFFSET r12,R12
  123. CFI_REL_OFFSET rbp,RBP
  124. CFI_REL_OFFSET rbx,RBX
  125. CFI_REL_OFFSET r11,R11
  126. CFI_REL_OFFSET r10,R10
  127. CFI_REL_OFFSET r9,R9
  128. CFI_REL_OFFSET r8,R8
  129. CFI_REL_OFFSET rax,RAX
  130. CFI_REL_OFFSET rcx,RCX
  131. CFI_REL_OFFSET rdx,RDX
  132. CFI_REL_OFFSET rsi,RSI
  133. CFI_REL_OFFSET rdi,RDI
  134. CFI_REL_OFFSET rip,RIP
  135. /*CFI_REL_OFFSET cs,CS*/
  136. /*CFI_REL_OFFSET rflags,EFLAGS*/
  137. CFI_REL_OFFSET rsp,RSP
  138. /*CFI_REL_OFFSET ss,SS*/
  139. .endm
  140. /*
  141. * A newly forked process directly context switches into this.
  142. */
  143. /* rdi: prev */
  144. ENTRY(ret_from_fork)
  145. CFI_DEFAULT_STACK
  146. push kernel_eflags(%rip)
  147. CFI_ADJUST_CFA_OFFSET 4
  148. popf # reset kernel eflags
  149. CFI_ADJUST_CFA_OFFSET -4
  150. call schedule_tail
  151. GET_THREAD_INFO(%rcx)
  152. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
  153. jnz rff_trace
  154. rff_action:
  155. RESTORE_REST
  156. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  157. je int_ret_from_sys_call
  158. testl $_TIF_IA32,threadinfo_flags(%rcx)
  159. jnz int_ret_from_sys_call
  160. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  161. jmp ret_from_sys_call
  162. rff_trace:
  163. movq %rsp,%rdi
  164. call syscall_trace_leave
  165. GET_THREAD_INFO(%rcx)
  166. jmp rff_action
  167. CFI_ENDPROC
  168. END(ret_from_fork)
  169. /*
  170. * System call entry. Upto 6 arguments in registers are supported.
  171. *
  172. * SYSCALL does not save anything on the stack and does not change the
  173. * stack pointer.
  174. */
  175. /*
  176. * Register setup:
  177. * rax system call number
  178. * rdi arg0
  179. * rcx return address for syscall/sysret, C arg3
  180. * rsi arg1
  181. * rdx arg2
  182. * r10 arg3 (--> moved to rcx for C)
  183. * r8 arg4
  184. * r9 arg5
  185. * r11 eflags for syscall/sysret, temporary for C
  186. * r12-r15,rbp,rbx saved by C code, not touched.
  187. *
  188. * Interrupts are off on entry.
  189. * Only called from user space.
  190. *
  191. * XXX if we had a free scratch register we could save the RSP into the stack frame
  192. * and report it properly in ps. Unfortunately we haven't.
  193. *
  194. * When user can change the frames always force IRET. That is because
  195. * it deals with uncanonical addresses better. SYSRET has trouble
  196. * with them due to bugs in both AMD and Intel CPUs.
  197. */
  198. ENTRY(system_call)
  199. CFI_STARTPROC simple
  200. CFI_SIGNAL_FRAME
  201. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  202. CFI_REGISTER rip,rcx
  203. /*CFI_REGISTER rflags,r11*/
  204. swapgs
  205. movq %rsp,%gs:pda_oldrsp
  206. movq %gs:pda_kernelstack,%rsp
  207. /*
  208. * No need to follow this irqs off/on section - it's straight
  209. * and short:
  210. */
  211. sti
  212. SAVE_ARGS 8,1
  213. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  214. movq %rcx,RIP-ARGOFFSET(%rsp)
  215. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  216. GET_THREAD_INFO(%rcx)
  217. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
  218. CFI_REMEMBER_STATE
  219. jnz tracesys
  220. cmpq $__NR_syscall_max,%rax
  221. ja badsys
  222. movq %r10,%rcx
  223. call *sys_call_table(,%rax,8) # XXX: rip relative
  224. movq %rax,RAX-ARGOFFSET(%rsp)
  225. /*
  226. * Syscall return path ending with SYSRET (fast path)
  227. * Has incomplete stack frame and undefined top of stack.
  228. */
  229. .globl ret_from_sys_call
  230. ret_from_sys_call:
  231. movl $_TIF_ALLWORK_MASK,%edi
  232. /* edi: flagmask */
  233. sysret_check:
  234. GET_THREAD_INFO(%rcx)
  235. cli
  236. TRACE_IRQS_OFF
  237. movl threadinfo_flags(%rcx),%edx
  238. andl %edi,%edx
  239. CFI_REMEMBER_STATE
  240. jnz sysret_careful
  241. /*
  242. * sysretq will re-enable interrupts:
  243. */
  244. TRACE_IRQS_ON
  245. movq RIP-ARGOFFSET(%rsp),%rcx
  246. CFI_REGISTER rip,rcx
  247. RESTORE_ARGS 0,-ARG_SKIP,1
  248. /*CFI_REGISTER rflags,r11*/
  249. movq %gs:pda_oldrsp,%rsp
  250. swapgs
  251. sysretq
  252. /* Handle reschedules */
  253. /* edx: work, edi: workmask */
  254. sysret_careful:
  255. CFI_RESTORE_STATE
  256. bt $TIF_NEED_RESCHED,%edx
  257. jnc sysret_signal
  258. TRACE_IRQS_ON
  259. sti
  260. pushq %rdi
  261. CFI_ADJUST_CFA_OFFSET 8
  262. call schedule
  263. popq %rdi
  264. CFI_ADJUST_CFA_OFFSET -8
  265. jmp sysret_check
  266. /* Handle a signal */
  267. sysret_signal:
  268. TRACE_IRQS_ON
  269. sti
  270. testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
  271. jz 1f
  272. /* Really a signal */
  273. /* edx: work flags (arg3) */
  274. leaq do_notify_resume(%rip),%rax
  275. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  276. xorl %esi,%esi # oldset -> arg2
  277. call ptregscall_common
  278. 1: movl $_TIF_NEED_RESCHED,%edi
  279. /* Use IRET because user could have changed frame. This
  280. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  281. cli
  282. TRACE_IRQS_OFF
  283. jmp int_with_check
  284. badsys:
  285. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  286. jmp ret_from_sys_call
  287. /* Do syscall tracing */
  288. tracesys:
  289. CFI_RESTORE_STATE
  290. SAVE_REST
  291. movq $-ENOSYS,RAX(%rsp)
  292. FIXUP_TOP_OF_STACK %rdi
  293. movq %rsp,%rdi
  294. call syscall_trace_enter
  295. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  296. RESTORE_REST
  297. cmpq $__NR_syscall_max,%rax
  298. ja 1f
  299. movq %r10,%rcx /* fixup for C */
  300. call *sys_call_table(,%rax,8)
  301. 1: movq %rax,RAX-ARGOFFSET(%rsp)
  302. /* Use IRET because user could have changed frame */
  303. jmp int_ret_from_sys_call
  304. CFI_ENDPROC
  305. END(system_call)
  306. /*
  307. * Syscall return path ending with IRET.
  308. * Has correct top of stack, but partial stack frame.
  309. */
  310. ENTRY(int_ret_from_sys_call)
  311. CFI_STARTPROC simple
  312. CFI_SIGNAL_FRAME
  313. CFI_DEF_CFA rsp,SS+8-ARGOFFSET
  314. /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
  315. CFI_REL_OFFSET rsp,RSP-ARGOFFSET
  316. /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
  317. /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
  318. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  319. CFI_REL_OFFSET rdx,RDX-ARGOFFSET
  320. CFI_REL_OFFSET rcx,RCX-ARGOFFSET
  321. CFI_REL_OFFSET rax,RAX-ARGOFFSET
  322. CFI_REL_OFFSET rdi,RDI-ARGOFFSET
  323. CFI_REL_OFFSET rsi,RSI-ARGOFFSET
  324. CFI_REL_OFFSET r8,R8-ARGOFFSET
  325. CFI_REL_OFFSET r9,R9-ARGOFFSET
  326. CFI_REL_OFFSET r10,R10-ARGOFFSET
  327. CFI_REL_OFFSET r11,R11-ARGOFFSET
  328. cli
  329. TRACE_IRQS_OFF
  330. testl $3,CS-ARGOFFSET(%rsp)
  331. je retint_restore_args
  332. movl $_TIF_ALLWORK_MASK,%edi
  333. /* edi: mask to check */
  334. int_with_check:
  335. GET_THREAD_INFO(%rcx)
  336. movl threadinfo_flags(%rcx),%edx
  337. andl %edi,%edx
  338. jnz int_careful
  339. andl $~TS_COMPAT,threadinfo_status(%rcx)
  340. jmp retint_swapgs
  341. /* Either reschedule or signal or syscall exit tracking needed. */
  342. /* First do a reschedule test. */
  343. /* edx: work, edi: workmask */
  344. int_careful:
  345. bt $TIF_NEED_RESCHED,%edx
  346. jnc int_very_careful
  347. TRACE_IRQS_ON
  348. sti
  349. pushq %rdi
  350. CFI_ADJUST_CFA_OFFSET 8
  351. call schedule
  352. popq %rdi
  353. CFI_ADJUST_CFA_OFFSET -8
  354. cli
  355. TRACE_IRQS_OFF
  356. jmp int_with_check
  357. /* handle signals and tracing -- both require a full stack frame */
  358. int_very_careful:
  359. TRACE_IRQS_ON
  360. sti
  361. SAVE_REST
  362. /* Check for syscall exit trace */
  363. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  364. jz int_signal
  365. pushq %rdi
  366. CFI_ADJUST_CFA_OFFSET 8
  367. leaq 8(%rsp),%rdi # &ptregs -> arg1
  368. call syscall_trace_leave
  369. popq %rdi
  370. CFI_ADJUST_CFA_OFFSET -8
  371. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  372. cli
  373. TRACE_IRQS_OFF
  374. jmp int_restore_rest
  375. int_signal:
  376. testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
  377. jz 1f
  378. movq %rsp,%rdi # &ptregs -> arg1
  379. xorl %esi,%esi # oldset -> arg2
  380. call do_notify_resume
  381. 1: movl $_TIF_NEED_RESCHED,%edi
  382. int_restore_rest:
  383. RESTORE_REST
  384. cli
  385. TRACE_IRQS_OFF
  386. jmp int_with_check
  387. CFI_ENDPROC
  388. END(int_ret_from_sys_call)
  389. /*
  390. * Certain special system calls that need to save a complete full stack frame.
  391. */
  392. .macro PTREGSCALL label,func,arg
  393. .globl \label
  394. \label:
  395. leaq \func(%rip),%rax
  396. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  397. jmp ptregscall_common
  398. END(\label)
  399. .endm
  400. CFI_STARTPROC
  401. PTREGSCALL stub_clone, sys_clone, %r8
  402. PTREGSCALL stub_fork, sys_fork, %rdi
  403. PTREGSCALL stub_vfork, sys_vfork, %rdi
  404. PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
  405. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  406. PTREGSCALL stub_iopl, sys_iopl, %rsi
  407. ENTRY(ptregscall_common)
  408. popq %r11
  409. CFI_ADJUST_CFA_OFFSET -8
  410. CFI_REGISTER rip, r11
  411. SAVE_REST
  412. movq %r11, %r15
  413. CFI_REGISTER rip, r15
  414. FIXUP_TOP_OF_STACK %r11
  415. call *%rax
  416. RESTORE_TOP_OF_STACK %r11
  417. movq %r15, %r11
  418. CFI_REGISTER rip, r11
  419. RESTORE_REST
  420. pushq %r11
  421. CFI_ADJUST_CFA_OFFSET 8
  422. CFI_REL_OFFSET rip, 0
  423. ret
  424. CFI_ENDPROC
  425. END(ptregscall_common)
  426. ENTRY(stub_execve)
  427. CFI_STARTPROC
  428. popq %r11
  429. CFI_ADJUST_CFA_OFFSET -8
  430. CFI_REGISTER rip, r11
  431. SAVE_REST
  432. FIXUP_TOP_OF_STACK %r11
  433. call sys_execve
  434. RESTORE_TOP_OF_STACK %r11
  435. movq %rax,RAX(%rsp)
  436. RESTORE_REST
  437. jmp int_ret_from_sys_call
  438. CFI_ENDPROC
  439. END(stub_execve)
  440. /*
  441. * sigreturn is special because it needs to restore all registers on return.
  442. * This cannot be done with SYSRET, so use the IRET return path instead.
  443. */
  444. ENTRY(stub_rt_sigreturn)
  445. CFI_STARTPROC
  446. addq $8, %rsp
  447. CFI_ADJUST_CFA_OFFSET -8
  448. SAVE_REST
  449. movq %rsp,%rdi
  450. FIXUP_TOP_OF_STACK %r11
  451. call sys_rt_sigreturn
  452. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  453. RESTORE_REST
  454. jmp int_ret_from_sys_call
  455. CFI_ENDPROC
  456. END(stub_rt_sigreturn)
  457. /*
  458. * initial frame state for interrupts and exceptions
  459. */
  460. .macro _frame ref
  461. CFI_STARTPROC simple
  462. CFI_SIGNAL_FRAME
  463. CFI_DEF_CFA rsp,SS+8-\ref
  464. /*CFI_REL_OFFSET ss,SS-\ref*/
  465. CFI_REL_OFFSET rsp,RSP-\ref
  466. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  467. /*CFI_REL_OFFSET cs,CS-\ref*/
  468. CFI_REL_OFFSET rip,RIP-\ref
  469. .endm
  470. /* initial frame state for interrupts (and exceptions without error code) */
  471. #define INTR_FRAME _frame RIP
  472. /* initial frame state for exceptions with error code (and interrupts with
  473. vector already pushed) */
  474. #define XCPT_FRAME _frame ORIG_RAX
  475. /*
  476. * Interrupt entry/exit.
  477. *
  478. * Interrupt entry points save only callee clobbered registers in fast path.
  479. *
  480. * Entry runs with interrupts off.
  481. */
  482. /* 0(%rsp): interrupt number */
  483. .macro interrupt func
  484. cld
  485. SAVE_ARGS
  486. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  487. pushq %rbp
  488. CFI_ADJUST_CFA_OFFSET 8
  489. CFI_REL_OFFSET rbp, 0
  490. movq %rsp,%rbp
  491. CFI_DEF_CFA_REGISTER rbp
  492. testl $3,CS(%rdi)
  493. je 1f
  494. swapgs
  495. /* irqcount is used to check if a CPU is already on an interrupt
  496. stack or not. While this is essentially redundant with preempt_count
  497. it is a little cheaper to use a separate counter in the PDA
  498. (short of moving irq_enter into assembly, which would be too
  499. much work) */
  500. 1: incl %gs:pda_irqcount
  501. cmoveq %gs:pda_irqstackptr,%rsp
  502. push %rbp # backlink for old unwinder
  503. /*
  504. * We entered an interrupt context - irqs are off:
  505. */
  506. TRACE_IRQS_OFF
  507. call \func
  508. .endm
  509. ENTRY(common_interrupt)
  510. XCPT_FRAME
  511. interrupt do_IRQ
  512. /* 0(%rsp): oldrsp-ARGOFFSET */
  513. ret_from_intr:
  514. cli
  515. TRACE_IRQS_OFF
  516. decl %gs:pda_irqcount
  517. leaveq
  518. CFI_DEF_CFA_REGISTER rsp
  519. CFI_ADJUST_CFA_OFFSET -8
  520. exit_intr:
  521. GET_THREAD_INFO(%rcx)
  522. testl $3,CS-ARGOFFSET(%rsp)
  523. je retint_kernel
  524. /* Interrupt came from user space */
  525. /*
  526. * Has a correct top of stack, but a partial stack frame
  527. * %rcx: thread info. Interrupts off.
  528. */
  529. retint_with_reschedule:
  530. movl $_TIF_WORK_MASK,%edi
  531. retint_check:
  532. movl threadinfo_flags(%rcx),%edx
  533. andl %edi,%edx
  534. CFI_REMEMBER_STATE
  535. jnz retint_careful
  536. retint_swapgs:
  537. /*
  538. * The iretq could re-enable interrupts:
  539. */
  540. cli
  541. TRACE_IRQS_IRETQ
  542. swapgs
  543. jmp restore_args
  544. retint_restore_args:
  545. cli
  546. /*
  547. * The iretq could re-enable interrupts:
  548. */
  549. TRACE_IRQS_IRETQ
  550. restore_args:
  551. RESTORE_ARGS 0,8,0
  552. iret_label:
  553. iretq
  554. .section __ex_table,"a"
  555. .quad iret_label,bad_iret
  556. .previous
  557. .section .fixup,"ax"
  558. /* force a signal here? this matches i386 behaviour */
  559. /* running with kernel gs */
  560. bad_iret:
  561. movq $11,%rdi /* SIGSEGV */
  562. TRACE_IRQS_ON
  563. sti
  564. jmp do_exit
  565. .previous
  566. /* edi: workmask, edx: work */
  567. retint_careful:
  568. CFI_RESTORE_STATE
  569. bt $TIF_NEED_RESCHED,%edx
  570. jnc retint_signal
  571. TRACE_IRQS_ON
  572. sti
  573. pushq %rdi
  574. CFI_ADJUST_CFA_OFFSET 8
  575. call schedule
  576. popq %rdi
  577. CFI_ADJUST_CFA_OFFSET -8
  578. GET_THREAD_INFO(%rcx)
  579. cli
  580. TRACE_IRQS_OFF
  581. jmp retint_check
  582. retint_signal:
  583. testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
  584. jz retint_swapgs
  585. TRACE_IRQS_ON
  586. sti
  587. SAVE_REST
  588. movq $-1,ORIG_RAX(%rsp)
  589. xorl %esi,%esi # oldset
  590. movq %rsp,%rdi # &pt_regs
  591. call do_notify_resume
  592. RESTORE_REST
  593. cli
  594. TRACE_IRQS_OFF
  595. movl $_TIF_NEED_RESCHED,%edi
  596. GET_THREAD_INFO(%rcx)
  597. jmp retint_check
  598. #ifdef CONFIG_PREEMPT
  599. /* Returning to kernel space. Check if we need preemption */
  600. /* rcx: threadinfo. interrupts off. */
  601. ENTRY(retint_kernel)
  602. cmpl $0,threadinfo_preempt_count(%rcx)
  603. jnz retint_restore_args
  604. bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
  605. jnc retint_restore_args
  606. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  607. jnc retint_restore_args
  608. call preempt_schedule_irq
  609. jmp exit_intr
  610. #endif
  611. CFI_ENDPROC
  612. END(common_interrupt)
  613. /*
  614. * APIC interrupts.
  615. */
  616. .macro apicinterrupt num,func
  617. INTR_FRAME
  618. pushq $~(\num)
  619. CFI_ADJUST_CFA_OFFSET 8
  620. interrupt \func
  621. jmp ret_from_intr
  622. CFI_ENDPROC
  623. .endm
  624. ENTRY(thermal_interrupt)
  625. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  626. END(thermal_interrupt)
  627. ENTRY(threshold_interrupt)
  628. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  629. END(threshold_interrupt)
  630. #ifdef CONFIG_SMP
  631. ENTRY(reschedule_interrupt)
  632. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  633. END(reschedule_interrupt)
  634. .macro INVALIDATE_ENTRY num
  635. ENTRY(invalidate_interrupt\num)
  636. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  637. END(invalidate_interrupt\num)
  638. .endm
  639. INVALIDATE_ENTRY 0
  640. INVALIDATE_ENTRY 1
  641. INVALIDATE_ENTRY 2
  642. INVALIDATE_ENTRY 3
  643. INVALIDATE_ENTRY 4
  644. INVALIDATE_ENTRY 5
  645. INVALIDATE_ENTRY 6
  646. INVALIDATE_ENTRY 7
  647. ENTRY(call_function_interrupt)
  648. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  649. END(call_function_interrupt)
  650. #endif
  651. ENTRY(apic_timer_interrupt)
  652. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  653. END(apic_timer_interrupt)
  654. ENTRY(error_interrupt)
  655. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  656. END(error_interrupt)
  657. ENTRY(spurious_interrupt)
  658. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  659. END(spurious_interrupt)
  660. /*
  661. * Exception entry points.
  662. */
  663. .macro zeroentry sym
  664. INTR_FRAME
  665. pushq $0 /* push error code/oldrax */
  666. CFI_ADJUST_CFA_OFFSET 8
  667. pushq %rax /* push real oldrax to the rdi slot */
  668. CFI_ADJUST_CFA_OFFSET 8
  669. leaq \sym(%rip),%rax
  670. jmp error_entry
  671. CFI_ENDPROC
  672. .endm
  673. .macro errorentry sym
  674. XCPT_FRAME
  675. pushq %rax
  676. CFI_ADJUST_CFA_OFFSET 8
  677. leaq \sym(%rip),%rax
  678. jmp error_entry
  679. CFI_ENDPROC
  680. .endm
  681. /* error code is on the stack already */
  682. /* handle NMI like exceptions that can happen everywhere */
  683. .macro paranoidentry sym, ist=0, irqtrace=1
  684. SAVE_ALL
  685. cld
  686. movl $1,%ebx
  687. movl $MSR_GS_BASE,%ecx
  688. rdmsr
  689. testl %edx,%edx
  690. js 1f
  691. swapgs
  692. xorl %ebx,%ebx
  693. 1:
  694. .if \ist
  695. movq %gs:pda_data_offset, %rbp
  696. .endif
  697. movq %rsp,%rdi
  698. movq ORIG_RAX(%rsp),%rsi
  699. movq $-1,ORIG_RAX(%rsp)
  700. .if \ist
  701. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  702. .endif
  703. call \sym
  704. .if \ist
  705. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  706. .endif
  707. cli
  708. .if \irqtrace
  709. TRACE_IRQS_OFF
  710. .endif
  711. .endm
  712. /*
  713. * "Paranoid" exit path from exception stack.
  714. * Paranoid because this is used by NMIs and cannot take
  715. * any kernel state for granted.
  716. * We don't do kernel preemption checks here, because only
  717. * NMI should be common and it does not enable IRQs and
  718. * cannot get reschedule ticks.
  719. *
  720. * "trace" is 0 for the NMI handler only, because irq-tracing
  721. * is fundamentally NMI-unsafe. (we cannot change the soft and
  722. * hard flags at once, atomically)
  723. */
  724. .macro paranoidexit trace=1
  725. /* ebx: no swapgs flag */
  726. paranoid_exit\trace:
  727. testl %ebx,%ebx /* swapgs needed? */
  728. jnz paranoid_restore\trace
  729. testl $3,CS(%rsp)
  730. jnz paranoid_userspace\trace
  731. paranoid_swapgs\trace:
  732. .if \trace
  733. TRACE_IRQS_IRETQ 0
  734. .endif
  735. swapgs
  736. paranoid_restore\trace:
  737. RESTORE_ALL 8
  738. iretq
  739. paranoid_userspace\trace:
  740. GET_THREAD_INFO(%rcx)
  741. movl threadinfo_flags(%rcx),%ebx
  742. andl $_TIF_WORK_MASK,%ebx
  743. jz paranoid_swapgs\trace
  744. movq %rsp,%rdi /* &pt_regs */
  745. call sync_regs
  746. movq %rax,%rsp /* switch stack for scheduling */
  747. testl $_TIF_NEED_RESCHED,%ebx
  748. jnz paranoid_schedule\trace
  749. movl %ebx,%edx /* arg3: thread flags */
  750. .if \trace
  751. TRACE_IRQS_ON
  752. .endif
  753. sti
  754. xorl %esi,%esi /* arg2: oldset */
  755. movq %rsp,%rdi /* arg1: &pt_regs */
  756. call do_notify_resume
  757. cli
  758. .if \trace
  759. TRACE_IRQS_OFF
  760. .endif
  761. jmp paranoid_userspace\trace
  762. paranoid_schedule\trace:
  763. .if \trace
  764. TRACE_IRQS_ON
  765. .endif
  766. sti
  767. call schedule
  768. cli
  769. .if \trace
  770. TRACE_IRQS_OFF
  771. .endif
  772. jmp paranoid_userspace\trace
  773. CFI_ENDPROC
  774. .endm
  775. /*
  776. * Exception entry point. This expects an error code/orig_rax on the stack
  777. * and the exception handler in %rax.
  778. */
  779. KPROBE_ENTRY(error_entry)
  780. _frame RDI
  781. /* rdi slot contains rax, oldrax contains error code */
  782. cld
  783. subq $14*8,%rsp
  784. CFI_ADJUST_CFA_OFFSET (14*8)
  785. movq %rsi,13*8(%rsp)
  786. CFI_REL_OFFSET rsi,RSI
  787. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  788. movq %rdx,12*8(%rsp)
  789. CFI_REL_OFFSET rdx,RDX
  790. movq %rcx,11*8(%rsp)
  791. CFI_REL_OFFSET rcx,RCX
  792. movq %rsi,10*8(%rsp) /* store rax */
  793. CFI_REL_OFFSET rax,RAX
  794. movq %r8, 9*8(%rsp)
  795. CFI_REL_OFFSET r8,R8
  796. movq %r9, 8*8(%rsp)
  797. CFI_REL_OFFSET r9,R9
  798. movq %r10,7*8(%rsp)
  799. CFI_REL_OFFSET r10,R10
  800. movq %r11,6*8(%rsp)
  801. CFI_REL_OFFSET r11,R11
  802. movq %rbx,5*8(%rsp)
  803. CFI_REL_OFFSET rbx,RBX
  804. movq %rbp,4*8(%rsp)
  805. CFI_REL_OFFSET rbp,RBP
  806. movq %r12,3*8(%rsp)
  807. CFI_REL_OFFSET r12,R12
  808. movq %r13,2*8(%rsp)
  809. CFI_REL_OFFSET r13,R13
  810. movq %r14,1*8(%rsp)
  811. CFI_REL_OFFSET r14,R14
  812. movq %r15,(%rsp)
  813. CFI_REL_OFFSET r15,R15
  814. xorl %ebx,%ebx
  815. testl $3,CS(%rsp)
  816. je error_kernelspace
  817. error_swapgs:
  818. swapgs
  819. error_sti:
  820. movq %rdi,RDI(%rsp)
  821. movq %rsp,%rdi
  822. movq ORIG_RAX(%rsp),%rsi /* get error code */
  823. movq $-1,ORIG_RAX(%rsp)
  824. call *%rax
  825. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  826. error_exit:
  827. movl %ebx,%eax
  828. RESTORE_REST
  829. cli
  830. TRACE_IRQS_OFF
  831. GET_THREAD_INFO(%rcx)
  832. testl %eax,%eax
  833. jne retint_kernel
  834. movl threadinfo_flags(%rcx),%edx
  835. movl $_TIF_WORK_MASK,%edi
  836. andl %edi,%edx
  837. jnz retint_careful
  838. /*
  839. * The iret might restore flags:
  840. */
  841. TRACE_IRQS_IRETQ
  842. swapgs
  843. RESTORE_ARGS 0,8,0
  844. jmp iret_label
  845. CFI_ENDPROC
  846. error_kernelspace:
  847. incl %ebx
  848. /* There are two places in the kernel that can potentially fault with
  849. usergs. Handle them here. The exception handlers after
  850. iret run with kernel gs again, so don't set the user space flag.
  851. B stepping K8s sometimes report an truncated RIP for IRET
  852. exceptions returning to compat mode. Check for these here too. */
  853. leaq iret_label(%rip),%rbp
  854. cmpq %rbp,RIP(%rsp)
  855. je error_swapgs
  856. movl %ebp,%ebp /* zero extend */
  857. cmpq %rbp,RIP(%rsp)
  858. je error_swapgs
  859. cmpq $gs_change,RIP(%rsp)
  860. je error_swapgs
  861. jmp error_sti
  862. KPROBE_END(error_entry)
  863. /* Reload gs selector with exception handling */
  864. /* edi: new selector */
  865. ENTRY(load_gs_index)
  866. CFI_STARTPROC
  867. pushf
  868. CFI_ADJUST_CFA_OFFSET 8
  869. cli
  870. swapgs
  871. gs_change:
  872. movl %edi,%gs
  873. 2: mfence /* workaround */
  874. swapgs
  875. popf
  876. CFI_ADJUST_CFA_OFFSET -8
  877. ret
  878. CFI_ENDPROC
  879. ENDPROC(load_gs_index)
  880. .section __ex_table,"a"
  881. .align 8
  882. .quad gs_change,bad_gs
  883. .previous
  884. .section .fixup,"ax"
  885. /* running with kernelgs */
  886. bad_gs:
  887. swapgs /* switch back to user gs */
  888. xorl %eax,%eax
  889. movl %eax,%gs
  890. jmp 2b
  891. .previous
  892. /*
  893. * Create a kernel thread.
  894. *
  895. * C extern interface:
  896. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  897. *
  898. * asm input arguments:
  899. * rdi: fn, rsi: arg, rdx: flags
  900. */
  901. ENTRY(kernel_thread)
  902. CFI_STARTPROC
  903. FAKE_STACK_FRAME $child_rip
  904. SAVE_ALL
  905. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  906. movq %rdx,%rdi
  907. orq kernel_thread_flags(%rip),%rdi
  908. movq $-1, %rsi
  909. movq %rsp, %rdx
  910. xorl %r8d,%r8d
  911. xorl %r9d,%r9d
  912. # clone now
  913. call do_fork
  914. movq %rax,RAX(%rsp)
  915. xorl %edi,%edi
  916. /*
  917. * It isn't worth to check for reschedule here,
  918. * so internally to the x86_64 port you can rely on kernel_thread()
  919. * not to reschedule the child before returning, this avoids the need
  920. * of hacks for example to fork off the per-CPU idle tasks.
  921. * [Hopefully no generic code relies on the reschedule -AK]
  922. */
  923. RESTORE_ALL
  924. UNFAKE_STACK_FRAME
  925. ret
  926. CFI_ENDPROC
  927. ENDPROC(kernel_thread)
  928. child_rip:
  929. pushq $0 # fake return address
  930. CFI_STARTPROC
  931. /*
  932. * Here we are in the child and the registers are set as they were
  933. * at kernel_thread() invocation in the parent.
  934. */
  935. movq %rdi, %rax
  936. movq %rsi, %rdi
  937. call *%rax
  938. # exit
  939. xorl %edi, %edi
  940. call do_exit
  941. CFI_ENDPROC
  942. ENDPROC(child_rip)
  943. /*
  944. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  945. *
  946. * C extern interface:
  947. * extern long execve(char *name, char **argv, char **envp)
  948. *
  949. * asm input arguments:
  950. * rdi: name, rsi: argv, rdx: envp
  951. *
  952. * We want to fallback into:
  953. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
  954. *
  955. * do_sys_execve asm fallback arguments:
  956. * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
  957. */
  958. ENTRY(execve)
  959. CFI_STARTPROC
  960. FAKE_STACK_FRAME $0
  961. SAVE_ALL
  962. call sys_execve
  963. movq %rax, RAX(%rsp)
  964. RESTORE_REST
  965. testq %rax,%rax
  966. je int_ret_from_sys_call
  967. RESTORE_ARGS
  968. UNFAKE_STACK_FRAME
  969. ret
  970. CFI_ENDPROC
  971. ENDPROC(execve)
  972. KPROBE_ENTRY(page_fault)
  973. errorentry do_page_fault
  974. KPROBE_END(page_fault)
  975. ENTRY(coprocessor_error)
  976. zeroentry do_coprocessor_error
  977. END(coprocessor_error)
  978. ENTRY(simd_coprocessor_error)
  979. zeroentry do_simd_coprocessor_error
  980. END(simd_coprocessor_error)
  981. ENTRY(device_not_available)
  982. zeroentry math_state_restore
  983. END(device_not_available)
  984. /* runs on exception stack */
  985. KPROBE_ENTRY(debug)
  986. INTR_FRAME
  987. pushq $0
  988. CFI_ADJUST_CFA_OFFSET 8
  989. paranoidentry do_debug, DEBUG_STACK
  990. paranoidexit
  991. KPROBE_END(debug)
  992. /* runs on exception stack */
  993. KPROBE_ENTRY(nmi)
  994. INTR_FRAME
  995. pushq $-1
  996. CFI_ADJUST_CFA_OFFSET 8
  997. paranoidentry do_nmi, 0, 0
  998. #ifdef CONFIG_TRACE_IRQFLAGS
  999. paranoidexit 0
  1000. #else
  1001. jmp paranoid_exit1
  1002. CFI_ENDPROC
  1003. #endif
  1004. KPROBE_END(nmi)
  1005. KPROBE_ENTRY(int3)
  1006. INTR_FRAME
  1007. pushq $0
  1008. CFI_ADJUST_CFA_OFFSET 8
  1009. paranoidentry do_int3, DEBUG_STACK
  1010. jmp paranoid_exit1
  1011. CFI_ENDPROC
  1012. KPROBE_END(int3)
  1013. ENTRY(overflow)
  1014. zeroentry do_overflow
  1015. END(overflow)
  1016. ENTRY(bounds)
  1017. zeroentry do_bounds
  1018. END(bounds)
  1019. ENTRY(invalid_op)
  1020. zeroentry do_invalid_op
  1021. END(invalid_op)
  1022. ENTRY(coprocessor_segment_overrun)
  1023. zeroentry do_coprocessor_segment_overrun
  1024. END(coprocessor_segment_overrun)
  1025. ENTRY(reserved)
  1026. zeroentry do_reserved
  1027. END(reserved)
  1028. /* runs on exception stack */
  1029. ENTRY(double_fault)
  1030. XCPT_FRAME
  1031. paranoidentry do_double_fault
  1032. jmp paranoid_exit1
  1033. CFI_ENDPROC
  1034. END(double_fault)
  1035. ENTRY(invalid_TSS)
  1036. errorentry do_invalid_TSS
  1037. END(invalid_TSS)
  1038. ENTRY(segment_not_present)
  1039. errorentry do_segment_not_present
  1040. END(segment_not_present)
  1041. /* runs on exception stack */
  1042. ENTRY(stack_segment)
  1043. XCPT_FRAME
  1044. paranoidentry do_stack_segment
  1045. jmp paranoid_exit1
  1046. CFI_ENDPROC
  1047. END(stack_segment)
  1048. KPROBE_ENTRY(general_protection)
  1049. errorentry do_general_protection
  1050. KPROBE_END(general_protection)
  1051. ENTRY(alignment_check)
  1052. errorentry do_alignment_check
  1053. END(alignment_check)
  1054. ENTRY(divide_error)
  1055. zeroentry do_divide_error
  1056. END(divide_error)
  1057. ENTRY(spurious_interrupt_bug)
  1058. zeroentry do_spurious_interrupt_bug
  1059. END(spurious_interrupt_bug)
  1060. #ifdef CONFIG_X86_MCE
  1061. /* runs on exception stack */
  1062. ENTRY(machine_check)
  1063. INTR_FRAME
  1064. pushq $0
  1065. CFI_ADJUST_CFA_OFFSET 8
  1066. paranoidentry do_machine_check
  1067. jmp paranoid_exit1
  1068. CFI_ENDPROC
  1069. END(machine_check)
  1070. #endif
  1071. /* Call softirq on interrupt stack. Interrupts are off. */
  1072. ENTRY(call_softirq)
  1073. CFI_STARTPROC
  1074. push %rbp
  1075. CFI_ADJUST_CFA_OFFSET 8
  1076. CFI_REL_OFFSET rbp,0
  1077. mov %rsp,%rbp
  1078. CFI_DEF_CFA_REGISTER rbp
  1079. incl %gs:pda_irqcount
  1080. cmove %gs:pda_irqstackptr,%rsp
  1081. push %rbp # backlink for old unwinder
  1082. call __do_softirq
  1083. leaveq
  1084. CFI_DEF_CFA_REGISTER rsp
  1085. CFI_ADJUST_CFA_OFFSET -8
  1086. decl %gs:pda_irqcount
  1087. ret
  1088. CFI_ENDPROC
  1089. ENDPROC(call_softirq)
  1090. #ifdef CONFIG_STACK_UNWIND
  1091. ENTRY(arch_unwind_init_running)
  1092. CFI_STARTPROC
  1093. movq %r15, R15(%rdi)
  1094. movq %r14, R14(%rdi)
  1095. xchgq %rsi, %rdx
  1096. movq %r13, R13(%rdi)
  1097. movq %r12, R12(%rdi)
  1098. xorl %eax, %eax
  1099. movq %rbp, RBP(%rdi)
  1100. movq %rbx, RBX(%rdi)
  1101. movq (%rsp), %rcx
  1102. movq %rax, R11(%rdi)
  1103. movq %rax, R10(%rdi)
  1104. movq %rax, R9(%rdi)
  1105. movq %rax, R8(%rdi)
  1106. movq %rax, RAX(%rdi)
  1107. movq %rax, RCX(%rdi)
  1108. movq %rax, RDX(%rdi)
  1109. movq %rax, RSI(%rdi)
  1110. movq %rax, RDI(%rdi)
  1111. movq %rax, ORIG_RAX(%rdi)
  1112. movq %rcx, RIP(%rdi)
  1113. leaq 8(%rsp), %rcx
  1114. movq $__KERNEL_CS, CS(%rdi)
  1115. movq %rax, EFLAGS(%rdi)
  1116. movq %rcx, RSP(%rdi)
  1117. movq $__KERNEL_DS, SS(%rdi)
  1118. jmpq *%rdx
  1119. CFI_ENDPROC
  1120. ENDPROC(arch_unwind_init_running)
  1121. #endif