entry_64.S 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. #include <asm/paravirt.h>
  52. .code64
  53. #ifndef CONFIG_PREEMPT
  54. #define retint_kernel retint_restore_args
  55. #endif
  56. #ifdef CONFIG_PARAVIRT
  57. ENTRY(native_irq_enable_syscall_ret)
  58. movq %gs:pda_oldrsp,%rsp
  59. swapgs
  60. sysretq
  61. #endif /* CONFIG_PARAVIRT */
  62. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  63. #ifdef CONFIG_TRACE_IRQFLAGS
  64. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  65. jnc 1f
  66. TRACE_IRQS_ON
  67. 1:
  68. #endif
  69. .endm
  70. /*
  71. * C code is not supposed to know about undefined top of stack. Every time
  72. * a C function with an pt_regs argument is called from the SYSCALL based
  73. * fast path FIXUP_TOP_OF_STACK is needed.
  74. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  75. * manipulation.
  76. */
  77. /* %rsp:at FRAMEEND */
  78. .macro FIXUP_TOP_OF_STACK tmp
  79. movq %gs:pda_oldrsp,\tmp
  80. movq \tmp,RSP(%rsp)
  81. movq $__USER_DS,SS(%rsp)
  82. movq $__USER_CS,CS(%rsp)
  83. movq $-1,RCX(%rsp)
  84. movq R11(%rsp),\tmp /* get eflags */
  85. movq \tmp,EFLAGS(%rsp)
  86. .endm
  87. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  88. movq RSP-\offset(%rsp),\tmp
  89. movq \tmp,%gs:pda_oldrsp
  90. movq EFLAGS-\offset(%rsp),\tmp
  91. movq \tmp,R11-\offset(%rsp)
  92. .endm
  93. .macro FAKE_STACK_FRAME child_rip
  94. /* push in order ss, rsp, eflags, cs, rip */
  95. xorl %eax, %eax
  96. pushq %rax /* ss */
  97. CFI_ADJUST_CFA_OFFSET 8
  98. /*CFI_REL_OFFSET ss,0*/
  99. pushq %rax /* rsp */
  100. CFI_ADJUST_CFA_OFFSET 8
  101. CFI_REL_OFFSET rsp,0
  102. pushq $(1<<9) /* eflags - interrupts on */
  103. CFI_ADJUST_CFA_OFFSET 8
  104. /*CFI_REL_OFFSET rflags,0*/
  105. pushq $__KERNEL_CS /* cs */
  106. CFI_ADJUST_CFA_OFFSET 8
  107. /*CFI_REL_OFFSET cs,0*/
  108. pushq \child_rip /* rip */
  109. CFI_ADJUST_CFA_OFFSET 8
  110. CFI_REL_OFFSET rip,0
  111. pushq %rax /* orig rax */
  112. CFI_ADJUST_CFA_OFFSET 8
  113. .endm
  114. .macro UNFAKE_STACK_FRAME
  115. addq $8*6, %rsp
  116. CFI_ADJUST_CFA_OFFSET -(6*8)
  117. .endm
  118. .macro CFI_DEFAULT_STACK start=1
  119. .if \start
  120. CFI_STARTPROC simple
  121. CFI_SIGNAL_FRAME
  122. CFI_DEF_CFA rsp,SS+8
  123. .else
  124. CFI_DEF_CFA_OFFSET SS+8
  125. .endif
  126. CFI_REL_OFFSET r15,R15
  127. CFI_REL_OFFSET r14,R14
  128. CFI_REL_OFFSET r13,R13
  129. CFI_REL_OFFSET r12,R12
  130. CFI_REL_OFFSET rbp,RBP
  131. CFI_REL_OFFSET rbx,RBX
  132. CFI_REL_OFFSET r11,R11
  133. CFI_REL_OFFSET r10,R10
  134. CFI_REL_OFFSET r9,R9
  135. CFI_REL_OFFSET r8,R8
  136. CFI_REL_OFFSET rax,RAX
  137. CFI_REL_OFFSET rcx,RCX
  138. CFI_REL_OFFSET rdx,RDX
  139. CFI_REL_OFFSET rsi,RSI
  140. CFI_REL_OFFSET rdi,RDI
  141. CFI_REL_OFFSET rip,RIP
  142. /*CFI_REL_OFFSET cs,CS*/
  143. /*CFI_REL_OFFSET rflags,EFLAGS*/
  144. CFI_REL_OFFSET rsp,RSP
  145. /*CFI_REL_OFFSET ss,SS*/
  146. .endm
  147. /*
  148. * A newly forked process directly context switches into this.
  149. */
  150. /* rdi: prev */
  151. ENTRY(ret_from_fork)
  152. CFI_DEFAULT_STACK
  153. push kernel_eflags(%rip)
  154. CFI_ADJUST_CFA_OFFSET 4
  155. popf # reset kernel eflags
  156. CFI_ADJUST_CFA_OFFSET -4
  157. call schedule_tail
  158. GET_THREAD_INFO(%rcx)
  159. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
  160. jnz rff_trace
  161. rff_action:
  162. RESTORE_REST
  163. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  164. je int_ret_from_sys_call
  165. testl $_TIF_IA32,threadinfo_flags(%rcx)
  166. jnz int_ret_from_sys_call
  167. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  168. jmp ret_from_sys_call
  169. rff_trace:
  170. movq %rsp,%rdi
  171. call syscall_trace_leave
  172. GET_THREAD_INFO(%rcx)
  173. jmp rff_action
  174. CFI_ENDPROC
  175. END(ret_from_fork)
  176. /*
  177. * System call entry. Upto 6 arguments in registers are supported.
  178. *
  179. * SYSCALL does not save anything on the stack and does not change the
  180. * stack pointer.
  181. */
  182. /*
  183. * Register setup:
  184. * rax system call number
  185. * rdi arg0
  186. * rcx return address for syscall/sysret, C arg3
  187. * rsi arg1
  188. * rdx arg2
  189. * r10 arg3 (--> moved to rcx for C)
  190. * r8 arg4
  191. * r9 arg5
  192. * r11 eflags for syscall/sysret, temporary for C
  193. * r12-r15,rbp,rbx saved by C code, not touched.
  194. *
  195. * Interrupts are off on entry.
  196. * Only called from user space.
  197. *
  198. * XXX if we had a free scratch register we could save the RSP into the stack frame
  199. * and report it properly in ps. Unfortunately we haven't.
  200. *
  201. * When user can change the frames always force IRET. That is because
  202. * it deals with uncanonical addresses better. SYSRET has trouble
  203. * with them due to bugs in both AMD and Intel CPUs.
  204. */
  205. ENTRY(system_call)
  206. CFI_STARTPROC simple
  207. CFI_SIGNAL_FRAME
  208. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  209. CFI_REGISTER rip,rcx
  210. /*CFI_REGISTER rflags,r11*/
  211. SWAPGS_UNSAFE_STACK
  212. /*
  213. * A hypervisor implementation might want to use a label
  214. * after the swapgs, so that it can do the swapgs
  215. * for the guest and jump here on syscall.
  216. */
  217. ENTRY(system_call_after_swapgs)
  218. movq %rsp,%gs:pda_oldrsp
  219. movq %gs:pda_kernelstack,%rsp
  220. /*
  221. * No need to follow this irqs off/on section - it's straight
  222. * and short:
  223. */
  224. ENABLE_INTERRUPTS(CLBR_NONE)
  225. SAVE_ARGS 8,1
  226. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  227. movq %rcx,RIP-ARGOFFSET(%rsp)
  228. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  229. GET_THREAD_INFO(%rcx)
  230. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
  231. jnz tracesys
  232. cmpq $__NR_syscall_max,%rax
  233. ja badsys
  234. movq %r10,%rcx
  235. call *sys_call_table(,%rax,8) # XXX: rip relative
  236. movq %rax,RAX-ARGOFFSET(%rsp)
  237. /*
  238. * Syscall return path ending with SYSRET (fast path)
  239. * Has incomplete stack frame and undefined top of stack.
  240. */
  241. ret_from_sys_call:
  242. movl $_TIF_ALLWORK_MASK,%edi
  243. /* edi: flagmask */
  244. sysret_check:
  245. LOCKDEP_SYS_EXIT
  246. GET_THREAD_INFO(%rcx)
  247. DISABLE_INTERRUPTS(CLBR_NONE)
  248. TRACE_IRQS_OFF
  249. movl threadinfo_flags(%rcx),%edx
  250. andl %edi,%edx
  251. jnz sysret_careful
  252. CFI_REMEMBER_STATE
  253. /*
  254. * sysretq will re-enable interrupts:
  255. */
  256. TRACE_IRQS_ON
  257. movq RIP-ARGOFFSET(%rsp),%rcx
  258. CFI_REGISTER rip,rcx
  259. RESTORE_ARGS 0,-ARG_SKIP,1
  260. /*CFI_REGISTER rflags,r11*/
  261. ENABLE_INTERRUPTS_SYSCALL_RET
  262. CFI_RESTORE_STATE
  263. /* Handle reschedules */
  264. /* edx: work, edi: workmask */
  265. sysret_careful:
  266. bt $TIF_NEED_RESCHED,%edx
  267. jnc sysret_signal
  268. TRACE_IRQS_ON
  269. ENABLE_INTERRUPTS(CLBR_NONE)
  270. pushq %rdi
  271. CFI_ADJUST_CFA_OFFSET 8
  272. call schedule
  273. popq %rdi
  274. CFI_ADJUST_CFA_OFFSET -8
  275. jmp sysret_check
  276. /* Handle a signal */
  277. sysret_signal:
  278. TRACE_IRQS_ON
  279. ENABLE_INTERRUPTS(CLBR_NONE)
  280. testl $_TIF_DO_NOTIFY_MASK,%edx
  281. jz 1f
  282. /* Really a signal */
  283. /* edx: work flags (arg3) */
  284. leaq do_notify_resume(%rip),%rax
  285. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  286. xorl %esi,%esi # oldset -> arg2
  287. call ptregscall_common
  288. 1: movl $_TIF_NEED_RESCHED,%edi
  289. /* Use IRET because user could have changed frame. This
  290. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  291. DISABLE_INTERRUPTS(CLBR_NONE)
  292. TRACE_IRQS_OFF
  293. jmp int_with_check
  294. badsys:
  295. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  296. jmp ret_from_sys_call
  297. /* Do syscall tracing */
  298. tracesys:
  299. SAVE_REST
  300. movq $-ENOSYS,RAX(%rsp)
  301. FIXUP_TOP_OF_STACK %rdi
  302. movq %rsp,%rdi
  303. call syscall_trace_enter
  304. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  305. RESTORE_REST
  306. cmpq $__NR_syscall_max,%rax
  307. movq $-ENOSYS,%rcx
  308. cmova %rcx,%rax
  309. ja 1f
  310. movq %r10,%rcx /* fixup for C */
  311. call *sys_call_table(,%rax,8)
  312. 1: movq %rax,RAX-ARGOFFSET(%rsp)
  313. /* Use IRET because user could have changed frame */
  314. /*
  315. * Syscall return path ending with IRET.
  316. * Has correct top of stack, but partial stack frame.
  317. */
  318. .globl int_ret_from_sys_call
  319. int_ret_from_sys_call:
  320. DISABLE_INTERRUPTS(CLBR_NONE)
  321. TRACE_IRQS_OFF
  322. testl $3,CS-ARGOFFSET(%rsp)
  323. je retint_restore_args
  324. movl $_TIF_ALLWORK_MASK,%edi
  325. /* edi: mask to check */
  326. int_with_check:
  327. LOCKDEP_SYS_EXIT_IRQ
  328. GET_THREAD_INFO(%rcx)
  329. movl threadinfo_flags(%rcx),%edx
  330. andl %edi,%edx
  331. jnz int_careful
  332. andl $~TS_COMPAT,threadinfo_status(%rcx)
  333. jmp retint_swapgs
  334. /* Either reschedule or signal or syscall exit tracking needed. */
  335. /* First do a reschedule test. */
  336. /* edx: work, edi: workmask */
  337. int_careful:
  338. bt $TIF_NEED_RESCHED,%edx
  339. jnc int_very_careful
  340. TRACE_IRQS_ON
  341. ENABLE_INTERRUPTS(CLBR_NONE)
  342. pushq %rdi
  343. CFI_ADJUST_CFA_OFFSET 8
  344. call schedule
  345. popq %rdi
  346. CFI_ADJUST_CFA_OFFSET -8
  347. DISABLE_INTERRUPTS(CLBR_NONE)
  348. TRACE_IRQS_OFF
  349. jmp int_with_check
  350. /* handle signals and tracing -- both require a full stack frame */
  351. int_very_careful:
  352. TRACE_IRQS_ON
  353. ENABLE_INTERRUPTS(CLBR_NONE)
  354. SAVE_REST
  355. /* Check for syscall exit trace */
  356. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  357. jz int_signal
  358. pushq %rdi
  359. CFI_ADJUST_CFA_OFFSET 8
  360. leaq 8(%rsp),%rdi # &ptregs -> arg1
  361. call syscall_trace_leave
  362. popq %rdi
  363. CFI_ADJUST_CFA_OFFSET -8
  364. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  365. jmp int_restore_rest
  366. int_signal:
  367. testl $_TIF_DO_NOTIFY_MASK,%edx
  368. jz 1f
  369. movq %rsp,%rdi # &ptregs -> arg1
  370. xorl %esi,%esi # oldset -> arg2
  371. call do_notify_resume
  372. 1: movl $_TIF_NEED_RESCHED,%edi
  373. int_restore_rest:
  374. RESTORE_REST
  375. DISABLE_INTERRUPTS(CLBR_NONE)
  376. TRACE_IRQS_OFF
  377. jmp int_with_check
  378. CFI_ENDPROC
  379. END(system_call)
  380. /*
  381. * Certain special system calls that need to save a complete full stack frame.
  382. */
  383. .macro PTREGSCALL label,func,arg
  384. .globl \label
  385. \label:
  386. leaq \func(%rip),%rax
  387. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  388. jmp ptregscall_common
  389. END(\label)
  390. .endm
  391. CFI_STARTPROC
  392. PTREGSCALL stub_clone, sys_clone, %r8
  393. PTREGSCALL stub_fork, sys_fork, %rdi
  394. PTREGSCALL stub_vfork, sys_vfork, %rdi
  395. PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
  396. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  397. PTREGSCALL stub_iopl, sys_iopl, %rsi
  398. ENTRY(ptregscall_common)
  399. popq %r11
  400. CFI_ADJUST_CFA_OFFSET -8
  401. CFI_REGISTER rip, r11
  402. SAVE_REST
  403. movq %r11, %r15
  404. CFI_REGISTER rip, r15
  405. FIXUP_TOP_OF_STACK %r11
  406. call *%rax
  407. RESTORE_TOP_OF_STACK %r11
  408. movq %r15, %r11
  409. CFI_REGISTER rip, r11
  410. RESTORE_REST
  411. pushq %r11
  412. CFI_ADJUST_CFA_OFFSET 8
  413. CFI_REL_OFFSET rip, 0
  414. ret
  415. CFI_ENDPROC
  416. END(ptregscall_common)
  417. ENTRY(stub_execve)
  418. CFI_STARTPROC
  419. popq %r11
  420. CFI_ADJUST_CFA_OFFSET -8
  421. CFI_REGISTER rip, r11
  422. SAVE_REST
  423. FIXUP_TOP_OF_STACK %r11
  424. call sys_execve
  425. RESTORE_TOP_OF_STACK %r11
  426. movq %rax,RAX(%rsp)
  427. RESTORE_REST
  428. jmp int_ret_from_sys_call
  429. CFI_ENDPROC
  430. END(stub_execve)
  431. /*
  432. * sigreturn is special because it needs to restore all registers on return.
  433. * This cannot be done with SYSRET, so use the IRET return path instead.
  434. */
  435. ENTRY(stub_rt_sigreturn)
  436. CFI_STARTPROC
  437. addq $8, %rsp
  438. CFI_ADJUST_CFA_OFFSET -8
  439. SAVE_REST
  440. movq %rsp,%rdi
  441. FIXUP_TOP_OF_STACK %r11
  442. call sys_rt_sigreturn
  443. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  444. RESTORE_REST
  445. jmp int_ret_from_sys_call
  446. CFI_ENDPROC
  447. END(stub_rt_sigreturn)
  448. /*
  449. * initial frame state for interrupts and exceptions
  450. */
  451. .macro _frame ref
  452. CFI_STARTPROC simple
  453. CFI_SIGNAL_FRAME
  454. CFI_DEF_CFA rsp,SS+8-\ref
  455. /*CFI_REL_OFFSET ss,SS-\ref*/
  456. CFI_REL_OFFSET rsp,RSP-\ref
  457. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  458. /*CFI_REL_OFFSET cs,CS-\ref*/
  459. CFI_REL_OFFSET rip,RIP-\ref
  460. .endm
  461. /* initial frame state for interrupts (and exceptions without error code) */
  462. #define INTR_FRAME _frame RIP
  463. /* initial frame state for exceptions with error code (and interrupts with
  464. vector already pushed) */
  465. #define XCPT_FRAME _frame ORIG_RAX
  466. /*
  467. * Interrupt entry/exit.
  468. *
  469. * Interrupt entry points save only callee clobbered registers in fast path.
  470. *
  471. * Entry runs with interrupts off.
  472. */
  473. /* 0(%rsp): interrupt number */
  474. .macro interrupt func
  475. cld
  476. SAVE_ARGS
  477. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  478. pushq %rbp
  479. CFI_ADJUST_CFA_OFFSET 8
  480. CFI_REL_OFFSET rbp, 0
  481. movq %rsp,%rbp
  482. CFI_DEF_CFA_REGISTER rbp
  483. testl $3,CS(%rdi)
  484. je 1f
  485. SWAPGS
  486. /* irqcount is used to check if a CPU is already on an interrupt
  487. stack or not. While this is essentially redundant with preempt_count
  488. it is a little cheaper to use a separate counter in the PDA
  489. (short of moving irq_enter into assembly, which would be too
  490. much work) */
  491. 1: incl %gs:pda_irqcount
  492. cmoveq %gs:pda_irqstackptr,%rsp
  493. push %rbp # backlink for old unwinder
  494. /*
  495. * We entered an interrupt context - irqs are off:
  496. */
  497. TRACE_IRQS_OFF
  498. call \func
  499. .endm
  500. ENTRY(common_interrupt)
  501. XCPT_FRAME
  502. interrupt do_IRQ
  503. /* 0(%rsp): oldrsp-ARGOFFSET */
  504. ret_from_intr:
  505. DISABLE_INTERRUPTS(CLBR_NONE)
  506. TRACE_IRQS_OFF
  507. decl %gs:pda_irqcount
  508. leaveq
  509. CFI_DEF_CFA_REGISTER rsp
  510. CFI_ADJUST_CFA_OFFSET -8
  511. exit_intr:
  512. GET_THREAD_INFO(%rcx)
  513. testl $3,CS-ARGOFFSET(%rsp)
  514. je retint_kernel
  515. /* Interrupt came from user space */
  516. /*
  517. * Has a correct top of stack, but a partial stack frame
  518. * %rcx: thread info. Interrupts off.
  519. */
  520. retint_with_reschedule:
  521. movl $_TIF_WORK_MASK,%edi
  522. retint_check:
  523. LOCKDEP_SYS_EXIT_IRQ
  524. movl threadinfo_flags(%rcx),%edx
  525. andl %edi,%edx
  526. CFI_REMEMBER_STATE
  527. jnz retint_careful
  528. retint_swapgs: /* return to user-space */
  529. /*
  530. * The iretq could re-enable interrupts:
  531. */
  532. DISABLE_INTERRUPTS(CLBR_ANY)
  533. TRACE_IRQS_IRETQ
  534. SWAPGS
  535. jmp restore_args
  536. retint_restore_args: /* return to kernel space */
  537. DISABLE_INTERRUPTS(CLBR_ANY)
  538. /*
  539. * The iretq could re-enable interrupts:
  540. */
  541. TRACE_IRQS_IRETQ
  542. restore_args:
  543. RESTORE_ARGS 0,8,0
  544. iret_label:
  545. #ifdef CONFIG_PARAVIRT
  546. INTERRUPT_RETURN
  547. #endif
  548. ENTRY(native_iret)
  549. iretq
  550. .section __ex_table,"a"
  551. .quad native_iret, bad_iret
  552. .previous
  553. .section .fixup,"ax"
  554. /* force a signal here? this matches i386 behaviour */
  555. /* running with kernel gs */
  556. bad_iret:
  557. movq $11,%rdi /* SIGSEGV */
  558. TRACE_IRQS_ON
  559. ENABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
  560. jmp do_exit
  561. .previous
  562. /* edi: workmask, edx: work */
  563. retint_careful:
  564. CFI_RESTORE_STATE
  565. bt $TIF_NEED_RESCHED,%edx
  566. jnc retint_signal
  567. TRACE_IRQS_ON
  568. ENABLE_INTERRUPTS(CLBR_NONE)
  569. pushq %rdi
  570. CFI_ADJUST_CFA_OFFSET 8
  571. call schedule
  572. popq %rdi
  573. CFI_ADJUST_CFA_OFFSET -8
  574. GET_THREAD_INFO(%rcx)
  575. DISABLE_INTERRUPTS(CLBR_NONE)
  576. TRACE_IRQS_OFF
  577. jmp retint_check
  578. retint_signal:
  579. testl $_TIF_DO_NOTIFY_MASK,%edx
  580. jz retint_swapgs
  581. TRACE_IRQS_ON
  582. ENABLE_INTERRUPTS(CLBR_NONE)
  583. SAVE_REST
  584. movq $-1,ORIG_RAX(%rsp)
  585. xorl %esi,%esi # oldset
  586. movq %rsp,%rdi # &pt_regs
  587. call do_notify_resume
  588. RESTORE_REST
  589. DISABLE_INTERRUPTS(CLBR_NONE)
  590. TRACE_IRQS_OFF
  591. movl $_TIF_NEED_RESCHED,%edi
  592. GET_THREAD_INFO(%rcx)
  593. jmp retint_check
  594. #ifdef CONFIG_PREEMPT
  595. /* Returning to kernel space. Check if we need preemption */
  596. /* rcx: threadinfo. interrupts off. */
  597. ENTRY(retint_kernel)
  598. cmpl $0,threadinfo_preempt_count(%rcx)
  599. jnz retint_restore_args
  600. bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
  601. jnc retint_restore_args
  602. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  603. jnc retint_restore_args
  604. call preempt_schedule_irq
  605. jmp exit_intr
  606. #endif
  607. CFI_ENDPROC
  608. END(common_interrupt)
  609. /*
  610. * APIC interrupts.
  611. */
  612. .macro apicinterrupt num,func
  613. INTR_FRAME
  614. pushq $~(\num)
  615. CFI_ADJUST_CFA_OFFSET 8
  616. interrupt \func
  617. jmp ret_from_intr
  618. CFI_ENDPROC
  619. .endm
  620. ENTRY(thermal_interrupt)
  621. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  622. END(thermal_interrupt)
  623. ENTRY(threshold_interrupt)
  624. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  625. END(threshold_interrupt)
  626. #ifdef CONFIG_SMP
  627. ENTRY(reschedule_interrupt)
  628. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  629. END(reschedule_interrupt)
  630. .macro INVALIDATE_ENTRY num
  631. ENTRY(invalidate_interrupt\num)
  632. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  633. END(invalidate_interrupt\num)
  634. .endm
  635. INVALIDATE_ENTRY 0
  636. INVALIDATE_ENTRY 1
  637. INVALIDATE_ENTRY 2
  638. INVALIDATE_ENTRY 3
  639. INVALIDATE_ENTRY 4
  640. INVALIDATE_ENTRY 5
  641. INVALIDATE_ENTRY 6
  642. INVALIDATE_ENTRY 7
  643. ENTRY(call_function_interrupt)
  644. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  645. END(call_function_interrupt)
  646. ENTRY(irq_move_cleanup_interrupt)
  647. apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
  648. END(irq_move_cleanup_interrupt)
  649. #endif
  650. ENTRY(apic_timer_interrupt)
  651. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  652. END(apic_timer_interrupt)
  653. ENTRY(error_interrupt)
  654. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  655. END(error_interrupt)
  656. ENTRY(spurious_interrupt)
  657. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  658. END(spurious_interrupt)
  659. /*
  660. * Exception entry points.
  661. */
  662. .macro zeroentry sym
  663. INTR_FRAME
  664. pushq $0 /* push error code/oldrax */
  665. CFI_ADJUST_CFA_OFFSET 8
  666. pushq %rax /* push real oldrax to the rdi slot */
  667. CFI_ADJUST_CFA_OFFSET 8
  668. CFI_REL_OFFSET rax,0
  669. leaq \sym(%rip),%rax
  670. jmp error_entry
  671. CFI_ENDPROC
  672. .endm
  673. .macro errorentry sym
  674. XCPT_FRAME
  675. pushq %rax
  676. CFI_ADJUST_CFA_OFFSET 8
  677. CFI_REL_OFFSET rax,0
  678. leaq \sym(%rip),%rax
  679. jmp error_entry
  680. CFI_ENDPROC
  681. .endm
  682. /* error code is on the stack already */
  683. /* handle NMI like exceptions that can happen everywhere */
  684. .macro paranoidentry sym, ist=0, irqtrace=1
  685. SAVE_ALL
  686. cld
  687. movl $1,%ebx
  688. movl $MSR_GS_BASE,%ecx
  689. rdmsr
  690. testl %edx,%edx
  691. js 1f
  692. SWAPGS
  693. xorl %ebx,%ebx
  694. 1:
  695. .if \ist
  696. movq %gs:pda_data_offset, %rbp
  697. .endif
  698. movq %rsp,%rdi
  699. movq ORIG_RAX(%rsp),%rsi
  700. movq $-1,ORIG_RAX(%rsp)
  701. .if \ist
  702. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  703. .endif
  704. call \sym
  705. .if \ist
  706. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  707. .endif
  708. DISABLE_INTERRUPTS(CLBR_NONE)
  709. .if \irqtrace
  710. TRACE_IRQS_OFF
  711. .endif
  712. .endm
  713. /*
  714. * "Paranoid" exit path from exception stack.
  715. * Paranoid because this is used by NMIs and cannot take
  716. * any kernel state for granted.
  717. * We don't do kernel preemption checks here, because only
  718. * NMI should be common and it does not enable IRQs and
  719. * cannot get reschedule ticks.
  720. *
  721. * "trace" is 0 for the NMI handler only, because irq-tracing
  722. * is fundamentally NMI-unsafe. (we cannot change the soft and
  723. * hard flags at once, atomically)
  724. */
  725. .macro paranoidexit trace=1
  726. /* ebx: no swapgs flag */
  727. paranoid_exit\trace:
  728. testl %ebx,%ebx /* swapgs needed? */
  729. jnz paranoid_restore\trace
  730. testl $3,CS(%rsp)
  731. jnz paranoid_userspace\trace
  732. paranoid_swapgs\trace:
  733. .if \trace
  734. TRACE_IRQS_IRETQ 0
  735. .endif
  736. SWAPGS_UNSAFE_STACK
  737. paranoid_restore\trace:
  738. RESTORE_ALL 8
  739. INTERRUPT_RETURN
  740. paranoid_userspace\trace:
  741. GET_THREAD_INFO(%rcx)
  742. movl threadinfo_flags(%rcx),%ebx
  743. andl $_TIF_WORK_MASK,%ebx
  744. jz paranoid_swapgs\trace
  745. movq %rsp,%rdi /* &pt_regs */
  746. call sync_regs
  747. movq %rax,%rsp /* switch stack for scheduling */
  748. testl $_TIF_NEED_RESCHED,%ebx
  749. jnz paranoid_schedule\trace
  750. movl %ebx,%edx /* arg3: thread flags */
  751. .if \trace
  752. TRACE_IRQS_ON
  753. .endif
  754. ENABLE_INTERRUPTS(CLBR_NONE)
  755. xorl %esi,%esi /* arg2: oldset */
  756. movq %rsp,%rdi /* arg1: &pt_regs */
  757. call do_notify_resume
  758. DISABLE_INTERRUPTS(CLBR_NONE)
  759. .if \trace
  760. TRACE_IRQS_OFF
  761. .endif
  762. jmp paranoid_userspace\trace
  763. paranoid_schedule\trace:
  764. .if \trace
  765. TRACE_IRQS_ON
  766. .endif
  767. ENABLE_INTERRUPTS(CLBR_ANY)
  768. call schedule
  769. DISABLE_INTERRUPTS(CLBR_ANY)
  770. .if \trace
  771. TRACE_IRQS_OFF
  772. .endif
  773. jmp paranoid_userspace\trace
  774. CFI_ENDPROC
  775. .endm
  776. /*
  777. * Exception entry point. This expects an error code/orig_rax on the stack
  778. * and the exception handler in %rax.
  779. */
  780. KPROBE_ENTRY(error_entry)
  781. _frame RDI
  782. CFI_REL_OFFSET rax,0
  783. /* rdi slot contains rax, oldrax contains error code */
  784. cld
  785. subq $14*8,%rsp
  786. CFI_ADJUST_CFA_OFFSET (14*8)
  787. movq %rsi,13*8(%rsp)
  788. CFI_REL_OFFSET rsi,RSI
  789. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  790. CFI_REGISTER rax,rsi
  791. movq %rdx,12*8(%rsp)
  792. CFI_REL_OFFSET rdx,RDX
  793. movq %rcx,11*8(%rsp)
  794. CFI_REL_OFFSET rcx,RCX
  795. movq %rsi,10*8(%rsp) /* store rax */
  796. CFI_REL_OFFSET rax,RAX
  797. movq %r8, 9*8(%rsp)
  798. CFI_REL_OFFSET r8,R8
  799. movq %r9, 8*8(%rsp)
  800. CFI_REL_OFFSET r9,R9
  801. movq %r10,7*8(%rsp)
  802. CFI_REL_OFFSET r10,R10
  803. movq %r11,6*8(%rsp)
  804. CFI_REL_OFFSET r11,R11
  805. movq %rbx,5*8(%rsp)
  806. CFI_REL_OFFSET rbx,RBX
  807. movq %rbp,4*8(%rsp)
  808. CFI_REL_OFFSET rbp,RBP
  809. movq %r12,3*8(%rsp)
  810. CFI_REL_OFFSET r12,R12
  811. movq %r13,2*8(%rsp)
  812. CFI_REL_OFFSET r13,R13
  813. movq %r14,1*8(%rsp)
  814. CFI_REL_OFFSET r14,R14
  815. movq %r15,(%rsp)
  816. CFI_REL_OFFSET r15,R15
  817. xorl %ebx,%ebx
  818. testl $3,CS(%rsp)
  819. je error_kernelspace
  820. error_swapgs:
  821. SWAPGS
  822. error_sti:
  823. movq %rdi,RDI(%rsp)
  824. CFI_REL_OFFSET rdi,RDI
  825. movq %rsp,%rdi
  826. movq ORIG_RAX(%rsp),%rsi /* get error code */
  827. movq $-1,ORIG_RAX(%rsp)
  828. call *%rax
  829. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  830. error_exit:
  831. movl %ebx,%eax
  832. RESTORE_REST
  833. DISABLE_INTERRUPTS(CLBR_NONE)
  834. TRACE_IRQS_OFF
  835. GET_THREAD_INFO(%rcx)
  836. testl %eax,%eax
  837. jne retint_kernel
  838. LOCKDEP_SYS_EXIT_IRQ
  839. movl threadinfo_flags(%rcx),%edx
  840. movl $_TIF_WORK_MASK,%edi
  841. andl %edi,%edx
  842. jnz retint_careful
  843. jmp retint_swapgs
  844. CFI_ENDPROC
  845. error_kernelspace:
  846. incl %ebx
  847. /* There are two places in the kernel that can potentially fault with
  848. usergs. Handle them here. The exception handlers after
  849. iret run with kernel gs again, so don't set the user space flag.
  850. B stepping K8s sometimes report an truncated RIP for IRET
  851. exceptions returning to compat mode. Check for these here too. */
  852. leaq iret_label(%rip),%rbp
  853. cmpq %rbp,RIP(%rsp)
  854. je error_swapgs
  855. movl %ebp,%ebp /* zero extend */
  856. cmpq %rbp,RIP(%rsp)
  857. je error_swapgs
  858. cmpq $gs_change,RIP(%rsp)
  859. je error_swapgs
  860. jmp error_sti
  861. KPROBE_END(error_entry)
  862. /* Reload gs selector with exception handling */
  863. /* edi: new selector */
  864. ENTRY(load_gs_index)
  865. CFI_STARTPROC
  866. pushf
  867. CFI_ADJUST_CFA_OFFSET 8
  868. DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
  869. SWAPGS
  870. gs_change:
  871. movl %edi,%gs
  872. 2: mfence /* workaround */
  873. SWAPGS
  874. popf
  875. CFI_ADJUST_CFA_OFFSET -8
  876. ret
  877. CFI_ENDPROC
  878. ENDPROC(load_gs_index)
  879. .section __ex_table,"a"
  880. .align 8
  881. .quad gs_change,bad_gs
  882. .previous
  883. .section .fixup,"ax"
  884. /* running with kernelgs */
  885. bad_gs:
  886. SWAPGS /* switch back to user gs */
  887. xorl %eax,%eax
  888. movl %eax,%gs
  889. jmp 2b
  890. .previous
  891. /*
  892. * Create a kernel thread.
  893. *
  894. * C extern interface:
  895. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  896. *
  897. * asm input arguments:
  898. * rdi: fn, rsi: arg, rdx: flags
  899. */
  900. ENTRY(kernel_thread)
  901. CFI_STARTPROC
  902. FAKE_STACK_FRAME $child_rip
  903. SAVE_ALL
  904. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  905. movq %rdx,%rdi
  906. orq kernel_thread_flags(%rip),%rdi
  907. movq $-1, %rsi
  908. movq %rsp, %rdx
  909. xorl %r8d,%r8d
  910. xorl %r9d,%r9d
  911. # clone now
  912. call do_fork
  913. movq %rax,RAX(%rsp)
  914. xorl %edi,%edi
  915. /*
  916. * It isn't worth to check for reschedule here,
  917. * so internally to the x86_64 port you can rely on kernel_thread()
  918. * not to reschedule the child before returning, this avoids the need
  919. * of hacks for example to fork off the per-CPU idle tasks.
  920. * [Hopefully no generic code relies on the reschedule -AK]
  921. */
  922. RESTORE_ALL
  923. UNFAKE_STACK_FRAME
  924. ret
  925. CFI_ENDPROC
  926. ENDPROC(kernel_thread)
  927. child_rip:
  928. pushq $0 # fake return address
  929. CFI_STARTPROC
  930. /*
  931. * Here we are in the child and the registers are set as they were
  932. * at kernel_thread() invocation in the parent.
  933. */
  934. movq %rdi, %rax
  935. movq %rsi, %rdi
  936. call *%rax
  937. # exit
  938. mov %eax, %edi
  939. call do_exit
  940. CFI_ENDPROC
  941. ENDPROC(child_rip)
  942. /*
  943. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  944. *
  945. * C extern interface:
  946. * extern long execve(char *name, char **argv, char **envp)
  947. *
  948. * asm input arguments:
  949. * rdi: name, rsi: argv, rdx: envp
  950. *
  951. * We want to fallback into:
  952. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
  953. *
  954. * do_sys_execve asm fallback arguments:
  955. * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
  956. */
  957. ENTRY(kernel_execve)
  958. CFI_STARTPROC
  959. FAKE_STACK_FRAME $0
  960. SAVE_ALL
  961. call sys_execve
  962. movq %rax, RAX(%rsp)
  963. RESTORE_REST
  964. testq %rax,%rax
  965. je int_ret_from_sys_call
  966. RESTORE_ARGS
  967. UNFAKE_STACK_FRAME
  968. ret
  969. CFI_ENDPROC
  970. ENDPROC(kernel_execve)
  971. KPROBE_ENTRY(page_fault)
  972. errorentry do_page_fault
  973. KPROBE_END(page_fault)
  974. ENTRY(coprocessor_error)
  975. zeroentry do_coprocessor_error
  976. END(coprocessor_error)
  977. ENTRY(simd_coprocessor_error)
  978. zeroentry do_simd_coprocessor_error
  979. END(simd_coprocessor_error)
  980. ENTRY(device_not_available)
  981. zeroentry math_state_restore
  982. END(device_not_available)
  983. /* runs on exception stack */
  984. KPROBE_ENTRY(debug)
  985. INTR_FRAME
  986. pushq $0
  987. CFI_ADJUST_CFA_OFFSET 8
  988. paranoidentry do_debug, DEBUG_STACK
  989. paranoidexit
  990. KPROBE_END(debug)
  991. /* runs on exception stack */
  992. KPROBE_ENTRY(nmi)
  993. INTR_FRAME
  994. pushq $-1
  995. CFI_ADJUST_CFA_OFFSET 8
  996. paranoidentry do_nmi, 0, 0
  997. #ifdef CONFIG_TRACE_IRQFLAGS
  998. paranoidexit 0
  999. #else
  1000. jmp paranoid_exit1
  1001. CFI_ENDPROC
  1002. #endif
  1003. KPROBE_END(nmi)
  1004. KPROBE_ENTRY(int3)
  1005. INTR_FRAME
  1006. pushq $0
  1007. CFI_ADJUST_CFA_OFFSET 8
  1008. paranoidentry do_int3, DEBUG_STACK
  1009. jmp paranoid_exit1
  1010. CFI_ENDPROC
  1011. KPROBE_END(int3)
  1012. ENTRY(overflow)
  1013. zeroentry do_overflow
  1014. END(overflow)
  1015. ENTRY(bounds)
  1016. zeroentry do_bounds
  1017. END(bounds)
  1018. ENTRY(invalid_op)
  1019. zeroentry do_invalid_op
  1020. END(invalid_op)
  1021. ENTRY(coprocessor_segment_overrun)
  1022. zeroentry do_coprocessor_segment_overrun
  1023. END(coprocessor_segment_overrun)
  1024. ENTRY(reserved)
  1025. zeroentry do_reserved
  1026. END(reserved)
  1027. /* runs on exception stack */
  1028. ENTRY(double_fault)
  1029. XCPT_FRAME
  1030. paranoidentry do_double_fault
  1031. jmp paranoid_exit1
  1032. CFI_ENDPROC
  1033. END(double_fault)
  1034. ENTRY(invalid_TSS)
  1035. errorentry do_invalid_TSS
  1036. END(invalid_TSS)
  1037. ENTRY(segment_not_present)
  1038. errorentry do_segment_not_present
  1039. END(segment_not_present)
  1040. /* runs on exception stack */
  1041. ENTRY(stack_segment)
  1042. XCPT_FRAME
  1043. paranoidentry do_stack_segment
  1044. jmp paranoid_exit1
  1045. CFI_ENDPROC
  1046. END(stack_segment)
  1047. KPROBE_ENTRY(general_protection)
  1048. errorentry do_general_protection
  1049. KPROBE_END(general_protection)
  1050. ENTRY(alignment_check)
  1051. errorentry do_alignment_check
  1052. END(alignment_check)
  1053. ENTRY(divide_error)
  1054. zeroentry do_divide_error
  1055. END(divide_error)
  1056. ENTRY(spurious_interrupt_bug)
  1057. zeroentry do_spurious_interrupt_bug
  1058. END(spurious_interrupt_bug)
  1059. #ifdef CONFIG_X86_MCE
  1060. /* runs on exception stack */
  1061. ENTRY(machine_check)
  1062. INTR_FRAME
  1063. pushq $0
  1064. CFI_ADJUST_CFA_OFFSET 8
  1065. paranoidentry do_machine_check
  1066. jmp paranoid_exit1
  1067. CFI_ENDPROC
  1068. END(machine_check)
  1069. #endif
  1070. /* Call softirq on interrupt stack. Interrupts are off. */
  1071. ENTRY(call_softirq)
  1072. CFI_STARTPROC
  1073. push %rbp
  1074. CFI_ADJUST_CFA_OFFSET 8
  1075. CFI_REL_OFFSET rbp,0
  1076. mov %rsp,%rbp
  1077. CFI_DEF_CFA_REGISTER rbp
  1078. incl %gs:pda_irqcount
  1079. cmove %gs:pda_irqstackptr,%rsp
  1080. push %rbp # backlink for old unwinder
  1081. call __do_softirq
  1082. leaveq
  1083. CFI_DEF_CFA_REGISTER rsp
  1084. CFI_ADJUST_CFA_OFFSET -8
  1085. decl %gs:pda_irqcount
  1086. ret
  1087. CFI_ENDPROC
  1088. ENDPROC(call_softirq)
  1089. KPROBE_ENTRY(ignore_sysret)
  1090. CFI_STARTPROC
  1091. mov $-ENOSYS,%eax
  1092. sysret
  1093. CFI_ENDPROC
  1094. ENDPROC(ignore_sysret)