entry_64.S 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. #include <asm/paravirt.h>
  52. .code64
  53. #ifndef CONFIG_PREEMPT
  54. #define retint_kernel retint_restore_args
  55. #endif
  56. #ifdef CONFIG_PARAVIRT
  57. ENTRY(native_usergs_sysret64)
  58. swapgs
  59. sysretq
  60. #endif /* CONFIG_PARAVIRT */
  61. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  62. #ifdef CONFIG_TRACE_IRQFLAGS
  63. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  64. jnc 1f
  65. TRACE_IRQS_ON
  66. 1:
  67. #endif
  68. .endm
  69. /*
  70. * C code is not supposed to know about undefined top of stack. Every time
  71. * a C function with an pt_regs argument is called from the SYSCALL based
  72. * fast path FIXUP_TOP_OF_STACK is needed.
  73. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  74. * manipulation.
  75. */
  76. /* %rsp:at FRAMEEND */
  77. .macro FIXUP_TOP_OF_STACK tmp
  78. movq %gs:pda_oldrsp,\tmp
  79. movq \tmp,RSP(%rsp)
  80. movq $__USER_DS,SS(%rsp)
  81. movq $__USER_CS,CS(%rsp)
  82. movq $-1,RCX(%rsp)
  83. movq R11(%rsp),\tmp /* get eflags */
  84. movq \tmp,EFLAGS(%rsp)
  85. .endm
  86. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  87. movq RSP-\offset(%rsp),\tmp
  88. movq \tmp,%gs:pda_oldrsp
  89. movq EFLAGS-\offset(%rsp),\tmp
  90. movq \tmp,R11-\offset(%rsp)
  91. .endm
  92. .macro FAKE_STACK_FRAME child_rip
  93. /* push in order ss, rsp, eflags, cs, rip */
  94. xorl %eax, %eax
  95. pushq $__KERNEL_DS /* ss */
  96. CFI_ADJUST_CFA_OFFSET 8
  97. /*CFI_REL_OFFSET ss,0*/
  98. pushq %rax /* rsp */
  99. CFI_ADJUST_CFA_OFFSET 8
  100. CFI_REL_OFFSET rsp,0
  101. pushq $(1<<9) /* eflags - interrupts on */
  102. CFI_ADJUST_CFA_OFFSET 8
  103. /*CFI_REL_OFFSET rflags,0*/
  104. pushq $__KERNEL_CS /* cs */
  105. CFI_ADJUST_CFA_OFFSET 8
  106. /*CFI_REL_OFFSET cs,0*/
  107. pushq \child_rip /* rip */
  108. CFI_ADJUST_CFA_OFFSET 8
  109. CFI_REL_OFFSET rip,0
  110. pushq %rax /* orig rax */
  111. CFI_ADJUST_CFA_OFFSET 8
  112. .endm
  113. .macro UNFAKE_STACK_FRAME
  114. addq $8*6, %rsp
  115. CFI_ADJUST_CFA_OFFSET -(6*8)
  116. .endm
  117. .macro CFI_DEFAULT_STACK start=1
  118. .if \start
  119. CFI_STARTPROC simple
  120. CFI_SIGNAL_FRAME
  121. CFI_DEF_CFA rsp,SS+8
  122. .else
  123. CFI_DEF_CFA_OFFSET SS+8
  124. .endif
  125. CFI_REL_OFFSET r15,R15
  126. CFI_REL_OFFSET r14,R14
  127. CFI_REL_OFFSET r13,R13
  128. CFI_REL_OFFSET r12,R12
  129. CFI_REL_OFFSET rbp,RBP
  130. CFI_REL_OFFSET rbx,RBX
  131. CFI_REL_OFFSET r11,R11
  132. CFI_REL_OFFSET r10,R10
  133. CFI_REL_OFFSET r9,R9
  134. CFI_REL_OFFSET r8,R8
  135. CFI_REL_OFFSET rax,RAX
  136. CFI_REL_OFFSET rcx,RCX
  137. CFI_REL_OFFSET rdx,RDX
  138. CFI_REL_OFFSET rsi,RSI
  139. CFI_REL_OFFSET rdi,RDI
  140. CFI_REL_OFFSET rip,RIP
  141. /*CFI_REL_OFFSET cs,CS*/
  142. /*CFI_REL_OFFSET rflags,EFLAGS*/
  143. CFI_REL_OFFSET rsp,RSP
  144. /*CFI_REL_OFFSET ss,SS*/
  145. .endm
  146. /*
  147. * A newly forked process directly context switches into this.
  148. */
  149. /* rdi: prev */
  150. ENTRY(ret_from_fork)
  151. CFI_DEFAULT_STACK
  152. push kernel_eflags(%rip)
  153. CFI_ADJUST_CFA_OFFSET 4
  154. popf # reset kernel eflags
  155. CFI_ADJUST_CFA_OFFSET -4
  156. call schedule_tail
  157. GET_THREAD_INFO(%rcx)
  158. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
  159. jnz rff_trace
  160. rff_action:
  161. RESTORE_REST
  162. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  163. je int_ret_from_sys_call
  164. testl $_TIF_IA32,TI_flags(%rcx)
  165. jnz int_ret_from_sys_call
  166. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  167. jmp ret_from_sys_call
  168. rff_trace:
  169. movq %rsp,%rdi
  170. call syscall_trace_leave
  171. GET_THREAD_INFO(%rcx)
  172. jmp rff_action
  173. CFI_ENDPROC
  174. END(ret_from_fork)
  175. /*
  176. * System call entry. Upto 6 arguments in registers are supported.
  177. *
  178. * SYSCALL does not save anything on the stack and does not change the
  179. * stack pointer.
  180. */
  181. /*
  182. * Register setup:
  183. * rax system call number
  184. * rdi arg0
  185. * rcx return address for syscall/sysret, C arg3
  186. * rsi arg1
  187. * rdx arg2
  188. * r10 arg3 (--> moved to rcx for C)
  189. * r8 arg4
  190. * r9 arg5
  191. * r11 eflags for syscall/sysret, temporary for C
  192. * r12-r15,rbp,rbx saved by C code, not touched.
  193. *
  194. * Interrupts are off on entry.
  195. * Only called from user space.
  196. *
  197. * XXX if we had a free scratch register we could save the RSP into the stack frame
  198. * and report it properly in ps. Unfortunately we haven't.
  199. *
  200. * When user can change the frames always force IRET. That is because
  201. * it deals with uncanonical addresses better. SYSRET has trouble
  202. * with them due to bugs in both AMD and Intel CPUs.
  203. */
  204. ENTRY(system_call)
  205. CFI_STARTPROC simple
  206. CFI_SIGNAL_FRAME
  207. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  208. CFI_REGISTER rip,rcx
  209. /*CFI_REGISTER rflags,r11*/
  210. SWAPGS_UNSAFE_STACK
  211. /*
  212. * A hypervisor implementation might want to use a label
  213. * after the swapgs, so that it can do the swapgs
  214. * for the guest and jump here on syscall.
  215. */
  216. ENTRY(system_call_after_swapgs)
  217. movq %rsp,%gs:pda_oldrsp
  218. movq %gs:pda_kernelstack,%rsp
  219. /*
  220. * No need to follow this irqs off/on section - it's straight
  221. * and short:
  222. */
  223. ENABLE_INTERRUPTS(CLBR_NONE)
  224. SAVE_ARGS 8,1
  225. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  226. movq %rcx,RIP-ARGOFFSET(%rsp)
  227. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  228. GET_THREAD_INFO(%rcx)
  229. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP), \
  230. TI_flags(%rcx)
  231. jnz tracesys
  232. cmpq $__NR_syscall_max,%rax
  233. ja badsys
  234. movq %r10,%rcx
  235. call *sys_call_table(,%rax,8) # XXX: rip relative
  236. movq %rax,RAX-ARGOFFSET(%rsp)
  237. /*
  238. * Syscall return path ending with SYSRET (fast path)
  239. * Has incomplete stack frame and undefined top of stack.
  240. */
  241. ret_from_sys_call:
  242. movl $_TIF_ALLWORK_MASK,%edi
  243. /* edi: flagmask */
  244. sysret_check:
  245. LOCKDEP_SYS_EXIT
  246. GET_THREAD_INFO(%rcx)
  247. DISABLE_INTERRUPTS(CLBR_NONE)
  248. TRACE_IRQS_OFF
  249. movl TI_flags(%rcx),%edx
  250. andl %edi,%edx
  251. jnz sysret_careful
  252. CFI_REMEMBER_STATE
  253. /*
  254. * sysretq will re-enable interrupts:
  255. */
  256. TRACE_IRQS_ON
  257. movq RIP-ARGOFFSET(%rsp),%rcx
  258. CFI_REGISTER rip,rcx
  259. RESTORE_ARGS 0,-ARG_SKIP,1
  260. /*CFI_REGISTER rflags,r11*/
  261. movq %gs:pda_oldrsp, %rsp
  262. USERGS_SYSRET64
  263. CFI_RESTORE_STATE
  264. /* Handle reschedules */
  265. /* edx: work, edi: workmask */
  266. sysret_careful:
  267. bt $TIF_NEED_RESCHED,%edx
  268. jnc sysret_signal
  269. TRACE_IRQS_ON
  270. ENABLE_INTERRUPTS(CLBR_NONE)
  271. pushq %rdi
  272. CFI_ADJUST_CFA_OFFSET 8
  273. call schedule
  274. popq %rdi
  275. CFI_ADJUST_CFA_OFFSET -8
  276. jmp sysret_check
  277. /* Handle a signal */
  278. sysret_signal:
  279. TRACE_IRQS_ON
  280. ENABLE_INTERRUPTS(CLBR_NONE)
  281. testl $_TIF_DO_NOTIFY_MASK,%edx
  282. jz 1f
  283. /* Really a signal */
  284. /* edx: work flags (arg3) */
  285. leaq do_notify_resume(%rip),%rax
  286. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  287. xorl %esi,%esi # oldset -> arg2
  288. call ptregscall_common
  289. 1: movl $_TIF_NEED_RESCHED,%edi
  290. /* Use IRET because user could have changed frame. This
  291. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  292. DISABLE_INTERRUPTS(CLBR_NONE)
  293. TRACE_IRQS_OFF
  294. jmp int_with_check
  295. badsys:
  296. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  297. jmp ret_from_sys_call
  298. /* Do syscall tracing */
  299. tracesys:
  300. SAVE_REST
  301. movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
  302. FIXUP_TOP_OF_STACK %rdi
  303. movq %rsp,%rdi
  304. call syscall_trace_enter
  305. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  306. RESTORE_REST
  307. cmpq $__NR_syscall_max,%rax
  308. ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
  309. movq %r10,%rcx /* fixup for C */
  310. call *sys_call_table(,%rax,8)
  311. movq %rax,RAX-ARGOFFSET(%rsp)
  312. /* Use IRET because user could have changed frame */
  313. /*
  314. * Syscall return path ending with IRET.
  315. * Has correct top of stack, but partial stack frame.
  316. */
  317. .globl int_ret_from_sys_call
  318. int_ret_from_sys_call:
  319. DISABLE_INTERRUPTS(CLBR_NONE)
  320. TRACE_IRQS_OFF
  321. testl $3,CS-ARGOFFSET(%rsp)
  322. je retint_restore_args
  323. movl $_TIF_ALLWORK_MASK,%edi
  324. /* edi: mask to check */
  325. int_with_check:
  326. LOCKDEP_SYS_EXIT_IRQ
  327. GET_THREAD_INFO(%rcx)
  328. movl TI_flags(%rcx),%edx
  329. andl %edi,%edx
  330. jnz int_careful
  331. andl $~TS_COMPAT,TI_status(%rcx)
  332. jmp retint_swapgs
  333. /* Either reschedule or signal or syscall exit tracking needed. */
  334. /* First do a reschedule test. */
  335. /* edx: work, edi: workmask */
  336. int_careful:
  337. bt $TIF_NEED_RESCHED,%edx
  338. jnc int_very_careful
  339. TRACE_IRQS_ON
  340. ENABLE_INTERRUPTS(CLBR_NONE)
  341. pushq %rdi
  342. CFI_ADJUST_CFA_OFFSET 8
  343. call schedule
  344. popq %rdi
  345. CFI_ADJUST_CFA_OFFSET -8
  346. DISABLE_INTERRUPTS(CLBR_NONE)
  347. TRACE_IRQS_OFF
  348. jmp int_with_check
  349. /* handle signals and tracing -- both require a full stack frame */
  350. int_very_careful:
  351. TRACE_IRQS_ON
  352. ENABLE_INTERRUPTS(CLBR_NONE)
  353. SAVE_REST
  354. /* Check for syscall exit trace */
  355. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  356. jz int_signal
  357. pushq %rdi
  358. CFI_ADJUST_CFA_OFFSET 8
  359. leaq 8(%rsp),%rdi # &ptregs -> arg1
  360. call syscall_trace_leave
  361. popq %rdi
  362. CFI_ADJUST_CFA_OFFSET -8
  363. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  364. jmp int_restore_rest
  365. int_signal:
  366. testl $_TIF_DO_NOTIFY_MASK,%edx
  367. jz 1f
  368. movq %rsp,%rdi # &ptregs -> arg1
  369. xorl %esi,%esi # oldset -> arg2
  370. call do_notify_resume
  371. 1: movl $_TIF_NEED_RESCHED,%edi
  372. int_restore_rest:
  373. RESTORE_REST
  374. DISABLE_INTERRUPTS(CLBR_NONE)
  375. TRACE_IRQS_OFF
  376. jmp int_with_check
  377. CFI_ENDPROC
  378. END(system_call)
  379. /*
  380. * Certain special system calls that need to save a complete full stack frame.
  381. */
  382. .macro PTREGSCALL label,func,arg
  383. .globl \label
  384. \label:
  385. leaq \func(%rip),%rax
  386. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  387. jmp ptregscall_common
  388. END(\label)
  389. .endm
  390. CFI_STARTPROC
  391. PTREGSCALL stub_clone, sys_clone, %r8
  392. PTREGSCALL stub_fork, sys_fork, %rdi
  393. PTREGSCALL stub_vfork, sys_vfork, %rdi
  394. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  395. PTREGSCALL stub_iopl, sys_iopl, %rsi
  396. ENTRY(ptregscall_common)
  397. popq %r11
  398. CFI_ADJUST_CFA_OFFSET -8
  399. CFI_REGISTER rip, r11
  400. SAVE_REST
  401. movq %r11, %r15
  402. CFI_REGISTER rip, r15
  403. FIXUP_TOP_OF_STACK %r11
  404. call *%rax
  405. RESTORE_TOP_OF_STACK %r11
  406. movq %r15, %r11
  407. CFI_REGISTER rip, r11
  408. RESTORE_REST
  409. pushq %r11
  410. CFI_ADJUST_CFA_OFFSET 8
  411. CFI_REL_OFFSET rip, 0
  412. ret
  413. CFI_ENDPROC
  414. END(ptregscall_common)
  415. ENTRY(stub_execve)
  416. CFI_STARTPROC
  417. popq %r11
  418. CFI_ADJUST_CFA_OFFSET -8
  419. CFI_REGISTER rip, r11
  420. SAVE_REST
  421. FIXUP_TOP_OF_STACK %r11
  422. movq %rsp, %rcx
  423. call sys_execve
  424. RESTORE_TOP_OF_STACK %r11
  425. movq %rax,RAX(%rsp)
  426. RESTORE_REST
  427. jmp int_ret_from_sys_call
  428. CFI_ENDPROC
  429. END(stub_execve)
  430. /*
  431. * sigreturn is special because it needs to restore all registers on return.
  432. * This cannot be done with SYSRET, so use the IRET return path instead.
  433. */
  434. ENTRY(stub_rt_sigreturn)
  435. CFI_STARTPROC
  436. addq $8, %rsp
  437. CFI_ADJUST_CFA_OFFSET -8
  438. SAVE_REST
  439. movq %rsp,%rdi
  440. FIXUP_TOP_OF_STACK %r11
  441. call sys_rt_sigreturn
  442. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  443. RESTORE_REST
  444. jmp int_ret_from_sys_call
  445. CFI_ENDPROC
  446. END(stub_rt_sigreturn)
  447. /*
  448. * initial frame state for interrupts and exceptions
  449. */
  450. .macro _frame ref
  451. CFI_STARTPROC simple
  452. CFI_SIGNAL_FRAME
  453. CFI_DEF_CFA rsp,SS+8-\ref
  454. /*CFI_REL_OFFSET ss,SS-\ref*/
  455. CFI_REL_OFFSET rsp,RSP-\ref
  456. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  457. /*CFI_REL_OFFSET cs,CS-\ref*/
  458. CFI_REL_OFFSET rip,RIP-\ref
  459. .endm
  460. /* initial frame state for interrupts (and exceptions without error code) */
  461. #define INTR_FRAME _frame RIP
  462. /* initial frame state for exceptions with error code (and interrupts with
  463. vector already pushed) */
  464. #define XCPT_FRAME _frame ORIG_RAX
  465. /*
  466. * Interrupt entry/exit.
  467. *
  468. * Interrupt entry points save only callee clobbered registers in fast path.
  469. *
  470. * Entry runs with interrupts off.
  471. */
  472. /* 0(%rsp): interrupt number */
  473. .macro interrupt func
  474. cld
  475. SAVE_ARGS
  476. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  477. pushq %rbp
  478. CFI_ADJUST_CFA_OFFSET 8
  479. CFI_REL_OFFSET rbp, 0
  480. movq %rsp,%rbp
  481. CFI_DEF_CFA_REGISTER rbp
  482. testl $3,CS(%rdi)
  483. je 1f
  484. SWAPGS
  485. /* irqcount is used to check if a CPU is already on an interrupt
  486. stack or not. While this is essentially redundant with preempt_count
  487. it is a little cheaper to use a separate counter in the PDA
  488. (short of moving irq_enter into assembly, which would be too
  489. much work) */
  490. 1: incl %gs:pda_irqcount
  491. cmoveq %gs:pda_irqstackptr,%rsp
  492. push %rbp # backlink for old unwinder
  493. /*
  494. * We entered an interrupt context - irqs are off:
  495. */
  496. TRACE_IRQS_OFF
  497. call \func
  498. .endm
  499. ENTRY(common_interrupt)
  500. XCPT_FRAME
  501. interrupt do_IRQ
  502. /* 0(%rsp): oldrsp-ARGOFFSET */
  503. ret_from_intr:
  504. DISABLE_INTERRUPTS(CLBR_NONE)
  505. TRACE_IRQS_OFF
  506. decl %gs:pda_irqcount
  507. leaveq
  508. CFI_DEF_CFA_REGISTER rsp
  509. CFI_ADJUST_CFA_OFFSET -8
  510. exit_intr:
  511. GET_THREAD_INFO(%rcx)
  512. testl $3,CS-ARGOFFSET(%rsp)
  513. je retint_kernel
  514. /* Interrupt came from user space */
  515. /*
  516. * Has a correct top of stack, but a partial stack frame
  517. * %rcx: thread info. Interrupts off.
  518. */
  519. retint_with_reschedule:
  520. movl $_TIF_WORK_MASK,%edi
  521. retint_check:
  522. LOCKDEP_SYS_EXIT_IRQ
  523. movl TI_flags(%rcx),%edx
  524. andl %edi,%edx
  525. CFI_REMEMBER_STATE
  526. jnz retint_careful
  527. retint_swapgs: /* return to user-space */
  528. /*
  529. * The iretq could re-enable interrupts:
  530. */
  531. DISABLE_INTERRUPTS(CLBR_ANY)
  532. TRACE_IRQS_IRETQ
  533. SWAPGS
  534. jmp restore_args
  535. retint_restore_args: /* return to kernel space */
  536. DISABLE_INTERRUPTS(CLBR_ANY)
  537. /*
  538. * The iretq could re-enable interrupts:
  539. */
  540. TRACE_IRQS_IRETQ
  541. restore_args:
  542. RESTORE_ARGS 0,8,0
  543. irq_return:
  544. INTERRUPT_RETURN
  545. .section __ex_table, "a"
  546. .quad irq_return, bad_iret
  547. .previous
  548. #ifdef CONFIG_PARAVIRT
  549. ENTRY(native_iret)
  550. iretq
  551. .section __ex_table,"a"
  552. .quad native_iret, bad_iret
  553. .previous
  554. #endif
  555. .section .fixup,"ax"
  556. bad_iret:
  557. /*
  558. * The iret traps when the %cs or %ss being restored is bogus.
  559. * We've lost the original trap vector and error code.
  560. * #GPF is the most likely one to get for an invalid selector.
  561. * So pretend we completed the iret and took the #GPF in user mode.
  562. *
  563. * We are now running with the kernel GS after exception recovery.
  564. * But error_entry expects us to have user GS to match the user %cs,
  565. * so swap back.
  566. */
  567. pushq $0
  568. SWAPGS
  569. jmp general_protection
  570. .previous
  571. /* edi: workmask, edx: work */
  572. retint_careful:
  573. CFI_RESTORE_STATE
  574. bt $TIF_NEED_RESCHED,%edx
  575. jnc retint_signal
  576. TRACE_IRQS_ON
  577. ENABLE_INTERRUPTS(CLBR_NONE)
  578. pushq %rdi
  579. CFI_ADJUST_CFA_OFFSET 8
  580. call schedule
  581. popq %rdi
  582. CFI_ADJUST_CFA_OFFSET -8
  583. GET_THREAD_INFO(%rcx)
  584. DISABLE_INTERRUPTS(CLBR_NONE)
  585. TRACE_IRQS_OFF
  586. jmp retint_check
  587. retint_signal:
  588. testl $_TIF_DO_NOTIFY_MASK,%edx
  589. jz retint_swapgs
  590. TRACE_IRQS_ON
  591. ENABLE_INTERRUPTS(CLBR_NONE)
  592. SAVE_REST
  593. movq $-1,ORIG_RAX(%rsp)
  594. xorl %esi,%esi # oldset
  595. movq %rsp,%rdi # &pt_regs
  596. call do_notify_resume
  597. RESTORE_REST
  598. DISABLE_INTERRUPTS(CLBR_NONE)
  599. TRACE_IRQS_OFF
  600. movl $_TIF_NEED_RESCHED,%edi
  601. GET_THREAD_INFO(%rcx)
  602. jmp retint_check
  603. #ifdef CONFIG_PREEMPT
  604. /* Returning to kernel space. Check if we need preemption */
  605. /* rcx: threadinfo. interrupts off. */
  606. ENTRY(retint_kernel)
  607. cmpl $0,TI_preempt_count(%rcx)
  608. jnz retint_restore_args
  609. bt $TIF_NEED_RESCHED,TI_flags(%rcx)
  610. jnc retint_restore_args
  611. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  612. jnc retint_restore_args
  613. call preempt_schedule_irq
  614. jmp exit_intr
  615. #endif
  616. CFI_ENDPROC
  617. END(common_interrupt)
  618. /*
  619. * APIC interrupts.
  620. */
  621. .macro apicinterrupt num,func
  622. INTR_FRAME
  623. pushq $~(\num)
  624. CFI_ADJUST_CFA_OFFSET 8
  625. interrupt \func
  626. jmp ret_from_intr
  627. CFI_ENDPROC
  628. .endm
  629. ENTRY(thermal_interrupt)
  630. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  631. END(thermal_interrupt)
  632. ENTRY(threshold_interrupt)
  633. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  634. END(threshold_interrupt)
  635. #ifdef CONFIG_SMP
  636. ENTRY(reschedule_interrupt)
  637. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  638. END(reschedule_interrupt)
  639. .macro INVALIDATE_ENTRY num
  640. ENTRY(invalidate_interrupt\num)
  641. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  642. END(invalidate_interrupt\num)
  643. .endm
  644. INVALIDATE_ENTRY 0
  645. INVALIDATE_ENTRY 1
  646. INVALIDATE_ENTRY 2
  647. INVALIDATE_ENTRY 3
  648. INVALIDATE_ENTRY 4
  649. INVALIDATE_ENTRY 5
  650. INVALIDATE_ENTRY 6
  651. INVALIDATE_ENTRY 7
  652. ENTRY(call_function_interrupt)
  653. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  654. END(call_function_interrupt)
  655. ENTRY(irq_move_cleanup_interrupt)
  656. apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
  657. END(irq_move_cleanup_interrupt)
  658. #endif
  659. ENTRY(apic_timer_interrupt)
  660. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  661. END(apic_timer_interrupt)
  662. ENTRY(uv_bau_message_intr1)
  663. apicinterrupt 220,uv_bau_message_interrupt
  664. END(uv_bau_message_intr1)
  665. ENTRY(error_interrupt)
  666. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  667. END(error_interrupt)
  668. ENTRY(spurious_interrupt)
  669. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  670. END(spurious_interrupt)
  671. /*
  672. * Exception entry points.
  673. */
  674. .macro zeroentry sym
  675. INTR_FRAME
  676. PARAVIRT_ADJUST_EXCEPTION_FRAME
  677. pushq $0 /* push error code/oldrax */
  678. CFI_ADJUST_CFA_OFFSET 8
  679. pushq %rax /* push real oldrax to the rdi slot */
  680. CFI_ADJUST_CFA_OFFSET 8
  681. CFI_REL_OFFSET rax,0
  682. leaq \sym(%rip),%rax
  683. jmp error_entry
  684. CFI_ENDPROC
  685. .endm
  686. .macro errorentry sym
  687. XCPT_FRAME
  688. PARAVIRT_ADJUST_EXCEPTION_FRAME
  689. pushq %rax
  690. CFI_ADJUST_CFA_OFFSET 8
  691. CFI_REL_OFFSET rax,0
  692. leaq \sym(%rip),%rax
  693. jmp error_entry
  694. CFI_ENDPROC
  695. .endm
  696. /* error code is on the stack already */
  697. /* handle NMI like exceptions that can happen everywhere */
  698. .macro paranoidentry sym, ist=0, irqtrace=1
  699. SAVE_ALL
  700. cld
  701. movl $1,%ebx
  702. movl $MSR_GS_BASE,%ecx
  703. rdmsr
  704. testl %edx,%edx
  705. js 1f
  706. SWAPGS
  707. xorl %ebx,%ebx
  708. 1:
  709. .if \ist
  710. movq %gs:pda_data_offset, %rbp
  711. .endif
  712. movq %rsp,%rdi
  713. movq ORIG_RAX(%rsp),%rsi
  714. movq $-1,ORIG_RAX(%rsp)
  715. .if \ist
  716. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  717. .endif
  718. call \sym
  719. .if \ist
  720. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  721. .endif
  722. DISABLE_INTERRUPTS(CLBR_NONE)
  723. .if \irqtrace
  724. TRACE_IRQS_OFF
  725. .endif
  726. .endm
  727. /*
  728. * "Paranoid" exit path from exception stack.
  729. * Paranoid because this is used by NMIs and cannot take
  730. * any kernel state for granted.
  731. * We don't do kernel preemption checks here, because only
  732. * NMI should be common and it does not enable IRQs and
  733. * cannot get reschedule ticks.
  734. *
  735. * "trace" is 0 for the NMI handler only, because irq-tracing
  736. * is fundamentally NMI-unsafe. (we cannot change the soft and
  737. * hard flags at once, atomically)
  738. */
  739. .macro paranoidexit trace=1
  740. /* ebx: no swapgs flag */
  741. paranoid_exit\trace:
  742. testl %ebx,%ebx /* swapgs needed? */
  743. jnz paranoid_restore\trace
  744. testl $3,CS(%rsp)
  745. jnz paranoid_userspace\trace
  746. paranoid_swapgs\trace:
  747. .if \trace
  748. TRACE_IRQS_IRETQ 0
  749. .endif
  750. SWAPGS_UNSAFE_STACK
  751. paranoid_restore\trace:
  752. RESTORE_ALL 8
  753. jmp irq_return
  754. paranoid_userspace\trace:
  755. GET_THREAD_INFO(%rcx)
  756. movl TI_flags(%rcx),%ebx
  757. andl $_TIF_WORK_MASK,%ebx
  758. jz paranoid_swapgs\trace
  759. movq %rsp,%rdi /* &pt_regs */
  760. call sync_regs
  761. movq %rax,%rsp /* switch stack for scheduling */
  762. testl $_TIF_NEED_RESCHED,%ebx
  763. jnz paranoid_schedule\trace
  764. movl %ebx,%edx /* arg3: thread flags */
  765. .if \trace
  766. TRACE_IRQS_ON
  767. .endif
  768. ENABLE_INTERRUPTS(CLBR_NONE)
  769. xorl %esi,%esi /* arg2: oldset */
  770. movq %rsp,%rdi /* arg1: &pt_regs */
  771. call do_notify_resume
  772. DISABLE_INTERRUPTS(CLBR_NONE)
  773. .if \trace
  774. TRACE_IRQS_OFF
  775. .endif
  776. jmp paranoid_userspace\trace
  777. paranoid_schedule\trace:
  778. .if \trace
  779. TRACE_IRQS_ON
  780. .endif
  781. ENABLE_INTERRUPTS(CLBR_ANY)
  782. call schedule
  783. DISABLE_INTERRUPTS(CLBR_ANY)
  784. .if \trace
  785. TRACE_IRQS_OFF
  786. .endif
  787. jmp paranoid_userspace\trace
  788. CFI_ENDPROC
  789. .endm
  790. /*
  791. * Exception entry point. This expects an error code/orig_rax on the stack
  792. * and the exception handler in %rax.
  793. */
  794. KPROBE_ENTRY(error_entry)
  795. _frame RDI
  796. CFI_REL_OFFSET rax,0
  797. /* rdi slot contains rax, oldrax contains error code */
  798. cld
  799. subq $14*8,%rsp
  800. CFI_ADJUST_CFA_OFFSET (14*8)
  801. movq %rsi,13*8(%rsp)
  802. CFI_REL_OFFSET rsi,RSI
  803. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  804. CFI_REGISTER rax,rsi
  805. movq %rdx,12*8(%rsp)
  806. CFI_REL_OFFSET rdx,RDX
  807. movq %rcx,11*8(%rsp)
  808. CFI_REL_OFFSET rcx,RCX
  809. movq %rsi,10*8(%rsp) /* store rax */
  810. CFI_REL_OFFSET rax,RAX
  811. movq %r8, 9*8(%rsp)
  812. CFI_REL_OFFSET r8,R8
  813. movq %r9, 8*8(%rsp)
  814. CFI_REL_OFFSET r9,R9
  815. movq %r10,7*8(%rsp)
  816. CFI_REL_OFFSET r10,R10
  817. movq %r11,6*8(%rsp)
  818. CFI_REL_OFFSET r11,R11
  819. movq %rbx,5*8(%rsp)
  820. CFI_REL_OFFSET rbx,RBX
  821. movq %rbp,4*8(%rsp)
  822. CFI_REL_OFFSET rbp,RBP
  823. movq %r12,3*8(%rsp)
  824. CFI_REL_OFFSET r12,R12
  825. movq %r13,2*8(%rsp)
  826. CFI_REL_OFFSET r13,R13
  827. movq %r14,1*8(%rsp)
  828. CFI_REL_OFFSET r14,R14
  829. movq %r15,(%rsp)
  830. CFI_REL_OFFSET r15,R15
  831. xorl %ebx,%ebx
  832. testl $3,CS(%rsp)
  833. je error_kernelspace
  834. error_swapgs:
  835. SWAPGS
  836. error_sti:
  837. movq %rdi,RDI(%rsp)
  838. CFI_REL_OFFSET rdi,RDI
  839. movq %rsp,%rdi
  840. movq ORIG_RAX(%rsp),%rsi /* get error code */
  841. movq $-1,ORIG_RAX(%rsp)
  842. call *%rax
  843. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  844. error_exit:
  845. movl %ebx,%eax
  846. RESTORE_REST
  847. DISABLE_INTERRUPTS(CLBR_NONE)
  848. TRACE_IRQS_OFF
  849. GET_THREAD_INFO(%rcx)
  850. testl %eax,%eax
  851. jne retint_kernel
  852. LOCKDEP_SYS_EXIT_IRQ
  853. movl TI_flags(%rcx),%edx
  854. movl $_TIF_WORK_MASK,%edi
  855. andl %edi,%edx
  856. jnz retint_careful
  857. jmp retint_swapgs
  858. CFI_ENDPROC
  859. error_kernelspace:
  860. incl %ebx
  861. /* There are two places in the kernel that can potentially fault with
  862. usergs. Handle them here. The exception handlers after
  863. iret run with kernel gs again, so don't set the user space flag.
  864. B stepping K8s sometimes report an truncated RIP for IRET
  865. exceptions returning to compat mode. Check for these here too. */
  866. leaq irq_return(%rip),%rcx
  867. cmpq %rcx,RIP(%rsp)
  868. je error_swapgs
  869. movl %ecx,%ecx /* zero extend */
  870. cmpq %rcx,RIP(%rsp)
  871. je error_swapgs
  872. cmpq $gs_change,RIP(%rsp)
  873. je error_swapgs
  874. jmp error_sti
  875. KPROBE_END(error_entry)
  876. /* Reload gs selector with exception handling */
  877. /* edi: new selector */
  878. ENTRY(native_load_gs_index)
  879. CFI_STARTPROC
  880. pushf
  881. CFI_ADJUST_CFA_OFFSET 8
  882. DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
  883. SWAPGS
  884. gs_change:
  885. movl %edi,%gs
  886. 2: mfence /* workaround */
  887. SWAPGS
  888. popf
  889. CFI_ADJUST_CFA_OFFSET -8
  890. ret
  891. CFI_ENDPROC
  892. ENDPROC(native_load_gs_index)
  893. .section __ex_table,"a"
  894. .align 8
  895. .quad gs_change,bad_gs
  896. .previous
  897. .section .fixup,"ax"
  898. /* running with kernelgs */
  899. bad_gs:
  900. SWAPGS /* switch back to user gs */
  901. xorl %eax,%eax
  902. movl %eax,%gs
  903. jmp 2b
  904. .previous
  905. /*
  906. * Create a kernel thread.
  907. *
  908. * C extern interface:
  909. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  910. *
  911. * asm input arguments:
  912. * rdi: fn, rsi: arg, rdx: flags
  913. */
  914. ENTRY(kernel_thread)
  915. CFI_STARTPROC
  916. FAKE_STACK_FRAME $child_rip
  917. SAVE_ALL
  918. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  919. movq %rdx,%rdi
  920. orq kernel_thread_flags(%rip),%rdi
  921. movq $-1, %rsi
  922. movq %rsp, %rdx
  923. xorl %r8d,%r8d
  924. xorl %r9d,%r9d
  925. # clone now
  926. call do_fork
  927. movq %rax,RAX(%rsp)
  928. xorl %edi,%edi
  929. /*
  930. * It isn't worth to check for reschedule here,
  931. * so internally to the x86_64 port you can rely on kernel_thread()
  932. * not to reschedule the child before returning, this avoids the need
  933. * of hacks for example to fork off the per-CPU idle tasks.
  934. * [Hopefully no generic code relies on the reschedule -AK]
  935. */
  936. RESTORE_ALL
  937. UNFAKE_STACK_FRAME
  938. ret
  939. CFI_ENDPROC
  940. ENDPROC(kernel_thread)
  941. child_rip:
  942. pushq $0 # fake return address
  943. CFI_STARTPROC
  944. /*
  945. * Here we are in the child and the registers are set as they were
  946. * at kernel_thread() invocation in the parent.
  947. */
  948. movq %rdi, %rax
  949. movq %rsi, %rdi
  950. call *%rax
  951. # exit
  952. mov %eax, %edi
  953. call do_exit
  954. CFI_ENDPROC
  955. ENDPROC(child_rip)
  956. /*
  957. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  958. *
  959. * C extern interface:
  960. * extern long execve(char *name, char **argv, char **envp)
  961. *
  962. * asm input arguments:
  963. * rdi: name, rsi: argv, rdx: envp
  964. *
  965. * We want to fallback into:
  966. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
  967. *
  968. * do_sys_execve asm fallback arguments:
  969. * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
  970. */
  971. ENTRY(kernel_execve)
  972. CFI_STARTPROC
  973. FAKE_STACK_FRAME $0
  974. SAVE_ALL
  975. movq %rsp,%rcx
  976. call sys_execve
  977. movq %rax, RAX(%rsp)
  978. RESTORE_REST
  979. testq %rax,%rax
  980. je int_ret_from_sys_call
  981. RESTORE_ARGS
  982. UNFAKE_STACK_FRAME
  983. ret
  984. CFI_ENDPROC
  985. ENDPROC(kernel_execve)
  986. KPROBE_ENTRY(page_fault)
  987. errorentry do_page_fault
  988. KPROBE_END(page_fault)
  989. ENTRY(coprocessor_error)
  990. zeroentry do_coprocessor_error
  991. END(coprocessor_error)
  992. ENTRY(simd_coprocessor_error)
  993. zeroentry do_simd_coprocessor_error
  994. END(simd_coprocessor_error)
  995. ENTRY(device_not_available)
  996. zeroentry math_state_restore
  997. END(device_not_available)
  998. /* runs on exception stack */
  999. KPROBE_ENTRY(debug)
  1000. INTR_FRAME
  1001. pushq $0
  1002. CFI_ADJUST_CFA_OFFSET 8
  1003. paranoidentry do_debug, DEBUG_STACK
  1004. paranoidexit
  1005. KPROBE_END(debug)
  1006. /* runs on exception stack */
  1007. KPROBE_ENTRY(nmi)
  1008. INTR_FRAME
  1009. pushq $-1
  1010. CFI_ADJUST_CFA_OFFSET 8
  1011. paranoidentry do_nmi, 0, 0
  1012. #ifdef CONFIG_TRACE_IRQFLAGS
  1013. paranoidexit 0
  1014. #else
  1015. jmp paranoid_exit1
  1016. CFI_ENDPROC
  1017. #endif
  1018. KPROBE_END(nmi)
  1019. KPROBE_ENTRY(int3)
  1020. INTR_FRAME
  1021. pushq $0
  1022. CFI_ADJUST_CFA_OFFSET 8
  1023. paranoidentry do_int3, DEBUG_STACK
  1024. jmp paranoid_exit1
  1025. CFI_ENDPROC
  1026. KPROBE_END(int3)
  1027. ENTRY(overflow)
  1028. zeroentry do_overflow
  1029. END(overflow)
  1030. ENTRY(bounds)
  1031. zeroentry do_bounds
  1032. END(bounds)
  1033. ENTRY(invalid_op)
  1034. zeroentry do_invalid_op
  1035. END(invalid_op)
  1036. ENTRY(coprocessor_segment_overrun)
  1037. zeroentry do_coprocessor_segment_overrun
  1038. END(coprocessor_segment_overrun)
  1039. /* runs on exception stack */
  1040. ENTRY(double_fault)
  1041. XCPT_FRAME
  1042. paranoidentry do_double_fault
  1043. jmp paranoid_exit1
  1044. CFI_ENDPROC
  1045. END(double_fault)
  1046. ENTRY(invalid_TSS)
  1047. errorentry do_invalid_TSS
  1048. END(invalid_TSS)
  1049. ENTRY(segment_not_present)
  1050. errorentry do_segment_not_present
  1051. END(segment_not_present)
  1052. /* runs on exception stack */
  1053. ENTRY(stack_segment)
  1054. XCPT_FRAME
  1055. paranoidentry do_stack_segment
  1056. jmp paranoid_exit1
  1057. CFI_ENDPROC
  1058. END(stack_segment)
  1059. KPROBE_ENTRY(general_protection)
  1060. errorentry do_general_protection
  1061. KPROBE_END(general_protection)
  1062. ENTRY(alignment_check)
  1063. errorentry do_alignment_check
  1064. END(alignment_check)
  1065. ENTRY(divide_error)
  1066. zeroentry do_divide_error
  1067. END(divide_error)
  1068. ENTRY(spurious_interrupt_bug)
  1069. zeroentry do_spurious_interrupt_bug
  1070. END(spurious_interrupt_bug)
  1071. #ifdef CONFIG_X86_MCE
  1072. /* runs on exception stack */
  1073. ENTRY(machine_check)
  1074. INTR_FRAME
  1075. pushq $0
  1076. CFI_ADJUST_CFA_OFFSET 8
  1077. paranoidentry do_machine_check
  1078. jmp paranoid_exit1
  1079. CFI_ENDPROC
  1080. END(machine_check)
  1081. #endif
  1082. /* Call softirq on interrupt stack. Interrupts are off. */
  1083. ENTRY(call_softirq)
  1084. CFI_STARTPROC
  1085. push %rbp
  1086. CFI_ADJUST_CFA_OFFSET 8
  1087. CFI_REL_OFFSET rbp,0
  1088. mov %rsp,%rbp
  1089. CFI_DEF_CFA_REGISTER rbp
  1090. incl %gs:pda_irqcount
  1091. cmove %gs:pda_irqstackptr,%rsp
  1092. push %rbp # backlink for old unwinder
  1093. call __do_softirq
  1094. leaveq
  1095. CFI_DEF_CFA_REGISTER rsp
  1096. CFI_ADJUST_CFA_OFFSET -8
  1097. decl %gs:pda_irqcount
  1098. ret
  1099. CFI_ENDPROC
  1100. ENDPROC(call_softirq)
  1101. KPROBE_ENTRY(ignore_sysret)
  1102. CFI_STARTPROC
  1103. mov $-ENOSYS,%eax
  1104. sysret
  1105. CFI_ENDPROC
  1106. ENDPROC(ignore_sysret)