entry_64.S 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. #include <asm/paravirt.h>
  52. .code64
  53. #ifndef CONFIG_PREEMPT
  54. #define retint_kernel retint_restore_args
  55. #endif
  56. #ifdef CONFIG_PARAVIRT
  57. ENTRY(native_usergs_sysret64)
  58. swapgs
  59. sysretq
  60. #endif /* CONFIG_PARAVIRT */
  61. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  62. #ifdef CONFIG_TRACE_IRQFLAGS
  63. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  64. jnc 1f
  65. TRACE_IRQS_ON
  66. 1:
  67. #endif
  68. .endm
  69. /*
  70. * C code is not supposed to know about undefined top of stack. Every time
  71. * a C function with an pt_regs argument is called from the SYSCALL based
  72. * fast path FIXUP_TOP_OF_STACK is needed.
  73. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  74. * manipulation.
  75. */
  76. /* %rsp:at FRAMEEND */
  77. .macro FIXUP_TOP_OF_STACK tmp
  78. movq %gs:pda_oldrsp,\tmp
  79. movq \tmp,RSP(%rsp)
  80. movq $__USER_DS,SS(%rsp)
  81. movq $__USER_CS,CS(%rsp)
  82. movq $-1,RCX(%rsp)
  83. movq R11(%rsp),\tmp /* get eflags */
  84. movq \tmp,EFLAGS(%rsp)
  85. .endm
  86. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  87. movq RSP-\offset(%rsp),\tmp
  88. movq \tmp,%gs:pda_oldrsp
  89. movq EFLAGS-\offset(%rsp),\tmp
  90. movq \tmp,R11-\offset(%rsp)
  91. .endm
  92. .macro FAKE_STACK_FRAME child_rip
  93. /* push in order ss, rsp, eflags, cs, rip */
  94. xorl %eax, %eax
  95. pushq $__KERNEL_DS /* ss */
  96. CFI_ADJUST_CFA_OFFSET 8
  97. /*CFI_REL_OFFSET ss,0*/
  98. pushq %rax /* rsp */
  99. CFI_ADJUST_CFA_OFFSET 8
  100. CFI_REL_OFFSET rsp,0
  101. pushq $(1<<9) /* eflags - interrupts on */
  102. CFI_ADJUST_CFA_OFFSET 8
  103. /*CFI_REL_OFFSET rflags,0*/
  104. pushq $__KERNEL_CS /* cs */
  105. CFI_ADJUST_CFA_OFFSET 8
  106. /*CFI_REL_OFFSET cs,0*/
  107. pushq \child_rip /* rip */
  108. CFI_ADJUST_CFA_OFFSET 8
  109. CFI_REL_OFFSET rip,0
  110. pushq %rax /* orig rax */
  111. CFI_ADJUST_CFA_OFFSET 8
  112. .endm
  113. .macro UNFAKE_STACK_FRAME
  114. addq $8*6, %rsp
  115. CFI_ADJUST_CFA_OFFSET -(6*8)
  116. .endm
  117. .macro CFI_DEFAULT_STACK start=1
  118. .if \start
  119. CFI_STARTPROC simple
  120. CFI_SIGNAL_FRAME
  121. CFI_DEF_CFA rsp,SS+8
  122. .else
  123. CFI_DEF_CFA_OFFSET SS+8
  124. .endif
  125. CFI_REL_OFFSET r15,R15
  126. CFI_REL_OFFSET r14,R14
  127. CFI_REL_OFFSET r13,R13
  128. CFI_REL_OFFSET r12,R12
  129. CFI_REL_OFFSET rbp,RBP
  130. CFI_REL_OFFSET rbx,RBX
  131. CFI_REL_OFFSET r11,R11
  132. CFI_REL_OFFSET r10,R10
  133. CFI_REL_OFFSET r9,R9
  134. CFI_REL_OFFSET r8,R8
  135. CFI_REL_OFFSET rax,RAX
  136. CFI_REL_OFFSET rcx,RCX
  137. CFI_REL_OFFSET rdx,RDX
  138. CFI_REL_OFFSET rsi,RSI
  139. CFI_REL_OFFSET rdi,RDI
  140. CFI_REL_OFFSET rip,RIP
  141. /*CFI_REL_OFFSET cs,CS*/
  142. /*CFI_REL_OFFSET rflags,EFLAGS*/
  143. CFI_REL_OFFSET rsp,RSP
  144. /*CFI_REL_OFFSET ss,SS*/
  145. .endm
  146. /*
  147. * A newly forked process directly context switches into this.
  148. */
  149. /* rdi: prev */
  150. ENTRY(ret_from_fork)
  151. CFI_DEFAULT_STACK
  152. push kernel_eflags(%rip)
  153. CFI_ADJUST_CFA_OFFSET 4
  154. popf # reset kernel eflags
  155. CFI_ADJUST_CFA_OFFSET -4
  156. call schedule_tail
  157. GET_THREAD_INFO(%rcx)
  158. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
  159. jnz rff_trace
  160. rff_action:
  161. RESTORE_REST
  162. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  163. je int_ret_from_sys_call
  164. testl $_TIF_IA32,threadinfo_flags(%rcx)
  165. jnz int_ret_from_sys_call
  166. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  167. jmp ret_from_sys_call
  168. rff_trace:
  169. movq %rsp,%rdi
  170. call syscall_trace_leave
  171. GET_THREAD_INFO(%rcx)
  172. jmp rff_action
  173. CFI_ENDPROC
  174. END(ret_from_fork)
  175. /*
  176. * System call entry. Upto 6 arguments in registers are supported.
  177. *
  178. * SYSCALL does not save anything on the stack and does not change the
  179. * stack pointer.
  180. */
  181. /*
  182. * Register setup:
  183. * rax system call number
  184. * rdi arg0
  185. * rcx return address for syscall/sysret, C arg3
  186. * rsi arg1
  187. * rdx arg2
  188. * r10 arg3 (--> moved to rcx for C)
  189. * r8 arg4
  190. * r9 arg5
  191. * r11 eflags for syscall/sysret, temporary for C
  192. * r12-r15,rbp,rbx saved by C code, not touched.
  193. *
  194. * Interrupts are off on entry.
  195. * Only called from user space.
  196. *
  197. * XXX if we had a free scratch register we could save the RSP into the stack frame
  198. * and report it properly in ps. Unfortunately we haven't.
  199. *
  200. * When user can change the frames always force IRET. That is because
  201. * it deals with uncanonical addresses better. SYSRET has trouble
  202. * with them due to bugs in both AMD and Intel CPUs.
  203. */
  204. ENTRY(system_call)
  205. CFI_STARTPROC simple
  206. CFI_SIGNAL_FRAME
  207. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  208. CFI_REGISTER rip,rcx
  209. /*CFI_REGISTER rflags,r11*/
  210. SWAPGS_UNSAFE_STACK
  211. /*
  212. * A hypervisor implementation might want to use a label
  213. * after the swapgs, so that it can do the swapgs
  214. * for the guest and jump here on syscall.
  215. */
  216. ENTRY(system_call_after_swapgs)
  217. movq %rsp,%gs:pda_oldrsp
  218. movq %gs:pda_kernelstack,%rsp
  219. /*
  220. * No need to follow this irqs off/on section - it's straight
  221. * and short:
  222. */
  223. ENABLE_INTERRUPTS(CLBR_NONE)
  224. SAVE_ARGS 8,1
  225. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  226. movq %rcx,RIP-ARGOFFSET(%rsp)
  227. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  228. GET_THREAD_INFO(%rcx)
  229. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
  230. jnz tracesys
  231. cmpq $__NR_syscall_max,%rax
  232. ja badsys
  233. movq %r10,%rcx
  234. call *sys_call_table(,%rax,8) # XXX: rip relative
  235. movq %rax,RAX-ARGOFFSET(%rsp)
  236. /*
  237. * Syscall return path ending with SYSRET (fast path)
  238. * Has incomplete stack frame and undefined top of stack.
  239. */
  240. ret_from_sys_call:
  241. movl $_TIF_ALLWORK_MASK,%edi
  242. /* edi: flagmask */
  243. sysret_check:
  244. LOCKDEP_SYS_EXIT
  245. GET_THREAD_INFO(%rcx)
  246. DISABLE_INTERRUPTS(CLBR_NONE)
  247. TRACE_IRQS_OFF
  248. movl threadinfo_flags(%rcx),%edx
  249. andl %edi,%edx
  250. jnz sysret_careful
  251. CFI_REMEMBER_STATE
  252. /*
  253. * sysretq will re-enable interrupts:
  254. */
  255. TRACE_IRQS_ON
  256. movq RIP-ARGOFFSET(%rsp),%rcx
  257. CFI_REGISTER rip,rcx
  258. RESTORE_ARGS 0,-ARG_SKIP,1
  259. /*CFI_REGISTER rflags,r11*/
  260. movq %gs:pda_oldrsp, %rsp
  261. USERGS_SYSRET64
  262. CFI_RESTORE_STATE
  263. /* Handle reschedules */
  264. /* edx: work, edi: workmask */
  265. sysret_careful:
  266. bt $TIF_NEED_RESCHED,%edx
  267. jnc sysret_signal
  268. TRACE_IRQS_ON
  269. ENABLE_INTERRUPTS(CLBR_NONE)
  270. pushq %rdi
  271. CFI_ADJUST_CFA_OFFSET 8
  272. call schedule
  273. popq %rdi
  274. CFI_ADJUST_CFA_OFFSET -8
  275. jmp sysret_check
  276. /* Handle a signal */
  277. sysret_signal:
  278. TRACE_IRQS_ON
  279. ENABLE_INTERRUPTS(CLBR_NONE)
  280. testl $_TIF_DO_NOTIFY_MASK,%edx
  281. jz 1f
  282. /* Really a signal */
  283. /* edx: work flags (arg3) */
  284. leaq do_notify_resume(%rip),%rax
  285. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  286. xorl %esi,%esi # oldset -> arg2
  287. call ptregscall_common
  288. 1: movl $_TIF_NEED_RESCHED,%edi
  289. /* Use IRET because user could have changed frame. This
  290. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  291. DISABLE_INTERRUPTS(CLBR_NONE)
  292. TRACE_IRQS_OFF
  293. jmp int_with_check
  294. badsys:
  295. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  296. jmp ret_from_sys_call
  297. /* Do syscall tracing */
  298. tracesys:
  299. SAVE_REST
  300. movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
  301. FIXUP_TOP_OF_STACK %rdi
  302. movq %rsp,%rdi
  303. call syscall_trace_enter
  304. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  305. RESTORE_REST
  306. cmpq $__NR_syscall_max,%rax
  307. ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
  308. movq %r10,%rcx /* fixup for C */
  309. call *sys_call_table(,%rax,8)
  310. movq %rax,RAX-ARGOFFSET(%rsp)
  311. /* Use IRET because user could have changed frame */
  312. /*
  313. * Syscall return path ending with IRET.
  314. * Has correct top of stack, but partial stack frame.
  315. */
  316. .globl int_ret_from_sys_call
  317. int_ret_from_sys_call:
  318. DISABLE_INTERRUPTS(CLBR_NONE)
  319. TRACE_IRQS_OFF
  320. testl $3,CS-ARGOFFSET(%rsp)
  321. je retint_restore_args
  322. movl $_TIF_ALLWORK_MASK,%edi
  323. /* edi: mask to check */
  324. int_with_check:
  325. LOCKDEP_SYS_EXIT_IRQ
  326. GET_THREAD_INFO(%rcx)
  327. movl threadinfo_flags(%rcx),%edx
  328. andl %edi,%edx
  329. jnz int_careful
  330. andl $~TS_COMPAT,threadinfo_status(%rcx)
  331. jmp retint_swapgs
  332. /* Either reschedule or signal or syscall exit tracking needed. */
  333. /* First do a reschedule test. */
  334. /* edx: work, edi: workmask */
  335. int_careful:
  336. bt $TIF_NEED_RESCHED,%edx
  337. jnc int_very_careful
  338. TRACE_IRQS_ON
  339. ENABLE_INTERRUPTS(CLBR_NONE)
  340. pushq %rdi
  341. CFI_ADJUST_CFA_OFFSET 8
  342. call schedule
  343. popq %rdi
  344. CFI_ADJUST_CFA_OFFSET -8
  345. DISABLE_INTERRUPTS(CLBR_NONE)
  346. TRACE_IRQS_OFF
  347. jmp int_with_check
  348. /* handle signals and tracing -- both require a full stack frame */
  349. int_very_careful:
  350. TRACE_IRQS_ON
  351. ENABLE_INTERRUPTS(CLBR_NONE)
  352. SAVE_REST
  353. /* Check for syscall exit trace */
  354. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  355. jz int_signal
  356. pushq %rdi
  357. CFI_ADJUST_CFA_OFFSET 8
  358. leaq 8(%rsp),%rdi # &ptregs -> arg1
  359. call syscall_trace_leave
  360. popq %rdi
  361. CFI_ADJUST_CFA_OFFSET -8
  362. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  363. jmp int_restore_rest
  364. int_signal:
  365. testl $_TIF_DO_NOTIFY_MASK,%edx
  366. jz 1f
  367. movq %rsp,%rdi # &ptregs -> arg1
  368. xorl %esi,%esi # oldset -> arg2
  369. call do_notify_resume
  370. 1: movl $_TIF_NEED_RESCHED,%edi
  371. int_restore_rest:
  372. RESTORE_REST
  373. DISABLE_INTERRUPTS(CLBR_NONE)
  374. TRACE_IRQS_OFF
  375. jmp int_with_check
  376. CFI_ENDPROC
  377. END(system_call)
  378. /*
  379. * Certain special system calls that need to save a complete full stack frame.
  380. */
  381. .macro PTREGSCALL label,func,arg
  382. .globl \label
  383. \label:
  384. leaq \func(%rip),%rax
  385. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  386. jmp ptregscall_common
  387. END(\label)
  388. .endm
  389. CFI_STARTPROC
  390. PTREGSCALL stub_clone, sys_clone, %r8
  391. PTREGSCALL stub_fork, sys_fork, %rdi
  392. PTREGSCALL stub_vfork, sys_vfork, %rdi
  393. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  394. PTREGSCALL stub_iopl, sys_iopl, %rsi
  395. ENTRY(ptregscall_common)
  396. popq %r11
  397. CFI_ADJUST_CFA_OFFSET -8
  398. CFI_REGISTER rip, r11
  399. SAVE_REST
  400. movq %r11, %r15
  401. CFI_REGISTER rip, r15
  402. FIXUP_TOP_OF_STACK %r11
  403. call *%rax
  404. RESTORE_TOP_OF_STACK %r11
  405. movq %r15, %r11
  406. CFI_REGISTER rip, r11
  407. RESTORE_REST
  408. pushq %r11
  409. CFI_ADJUST_CFA_OFFSET 8
  410. CFI_REL_OFFSET rip, 0
  411. ret
  412. CFI_ENDPROC
  413. END(ptregscall_common)
  414. ENTRY(stub_execve)
  415. CFI_STARTPROC
  416. popq %r11
  417. CFI_ADJUST_CFA_OFFSET -8
  418. CFI_REGISTER rip, r11
  419. SAVE_REST
  420. FIXUP_TOP_OF_STACK %r11
  421. movq %rsp, %rcx
  422. call sys_execve
  423. RESTORE_TOP_OF_STACK %r11
  424. movq %rax,RAX(%rsp)
  425. RESTORE_REST
  426. jmp int_ret_from_sys_call
  427. CFI_ENDPROC
  428. END(stub_execve)
  429. /*
  430. * sigreturn is special because it needs to restore all registers on return.
  431. * This cannot be done with SYSRET, so use the IRET return path instead.
  432. */
  433. ENTRY(stub_rt_sigreturn)
  434. CFI_STARTPROC
  435. addq $8, %rsp
  436. CFI_ADJUST_CFA_OFFSET -8
  437. SAVE_REST
  438. movq %rsp,%rdi
  439. FIXUP_TOP_OF_STACK %r11
  440. call sys_rt_sigreturn
  441. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  442. RESTORE_REST
  443. jmp int_ret_from_sys_call
  444. CFI_ENDPROC
  445. END(stub_rt_sigreturn)
  446. /*
  447. * initial frame state for interrupts and exceptions
  448. */
  449. .macro _frame ref
  450. CFI_STARTPROC simple
  451. CFI_SIGNAL_FRAME
  452. CFI_DEF_CFA rsp,SS+8-\ref
  453. /*CFI_REL_OFFSET ss,SS-\ref*/
  454. CFI_REL_OFFSET rsp,RSP-\ref
  455. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  456. /*CFI_REL_OFFSET cs,CS-\ref*/
  457. CFI_REL_OFFSET rip,RIP-\ref
  458. .endm
  459. /* initial frame state for interrupts (and exceptions without error code) */
  460. #define INTR_FRAME _frame RIP
  461. /* initial frame state for exceptions with error code (and interrupts with
  462. vector already pushed) */
  463. #define XCPT_FRAME _frame ORIG_RAX
  464. /*
  465. * Interrupt entry/exit.
  466. *
  467. * Interrupt entry points save only callee clobbered registers in fast path.
  468. *
  469. * Entry runs with interrupts off.
  470. */
  471. /* 0(%rsp): interrupt number */
  472. .macro interrupt func
  473. cld
  474. SAVE_ARGS
  475. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  476. pushq %rbp
  477. CFI_ADJUST_CFA_OFFSET 8
  478. CFI_REL_OFFSET rbp, 0
  479. movq %rsp,%rbp
  480. CFI_DEF_CFA_REGISTER rbp
  481. testl $3,CS(%rdi)
  482. je 1f
  483. SWAPGS
  484. /* irqcount is used to check if a CPU is already on an interrupt
  485. stack or not. While this is essentially redundant with preempt_count
  486. it is a little cheaper to use a separate counter in the PDA
  487. (short of moving irq_enter into assembly, which would be too
  488. much work) */
  489. 1: incl %gs:pda_irqcount
  490. cmoveq %gs:pda_irqstackptr,%rsp
  491. push %rbp # backlink for old unwinder
  492. /*
  493. * We entered an interrupt context - irqs are off:
  494. */
  495. TRACE_IRQS_OFF
  496. call \func
  497. .endm
  498. ENTRY(common_interrupt)
  499. XCPT_FRAME
  500. interrupt do_IRQ
  501. /* 0(%rsp): oldrsp-ARGOFFSET */
  502. ret_from_intr:
  503. DISABLE_INTERRUPTS(CLBR_NONE)
  504. TRACE_IRQS_OFF
  505. decl %gs:pda_irqcount
  506. leaveq
  507. CFI_DEF_CFA_REGISTER rsp
  508. CFI_ADJUST_CFA_OFFSET -8
  509. exit_intr:
  510. GET_THREAD_INFO(%rcx)
  511. testl $3,CS-ARGOFFSET(%rsp)
  512. je retint_kernel
  513. /* Interrupt came from user space */
  514. /*
  515. * Has a correct top of stack, but a partial stack frame
  516. * %rcx: thread info. Interrupts off.
  517. */
  518. retint_with_reschedule:
  519. movl $_TIF_WORK_MASK,%edi
  520. retint_check:
  521. LOCKDEP_SYS_EXIT_IRQ
  522. movl threadinfo_flags(%rcx),%edx
  523. andl %edi,%edx
  524. CFI_REMEMBER_STATE
  525. jnz retint_careful
  526. retint_swapgs: /* return to user-space */
  527. /*
  528. * The iretq could re-enable interrupts:
  529. */
  530. DISABLE_INTERRUPTS(CLBR_ANY)
  531. TRACE_IRQS_IRETQ
  532. SWAPGS
  533. jmp restore_args
  534. retint_restore_args: /* return to kernel space */
  535. DISABLE_INTERRUPTS(CLBR_ANY)
  536. /*
  537. * The iretq could re-enable interrupts:
  538. */
  539. TRACE_IRQS_IRETQ
  540. restore_args:
  541. RESTORE_ARGS 0,8,0
  542. irq_return:
  543. INTERRUPT_RETURN
  544. .section __ex_table, "a"
  545. .quad irq_return, bad_iret
  546. .previous
  547. #ifdef CONFIG_PARAVIRT
  548. ENTRY(native_iret)
  549. iretq
  550. .section __ex_table,"a"
  551. .quad native_iret, bad_iret
  552. .previous
  553. #endif
  554. .section .fixup,"ax"
  555. bad_iret:
  556. /*
  557. * The iret traps when the %cs or %ss being restored is bogus.
  558. * We've lost the original trap vector and error code.
  559. * #GPF is the most likely one to get for an invalid selector.
  560. * So pretend we completed the iret and took the #GPF in user mode.
  561. *
  562. * We are now running with the kernel GS after exception recovery.
  563. * But error_entry expects us to have user GS to match the user %cs,
  564. * so swap back.
  565. */
  566. pushq $0
  567. SWAPGS
  568. jmp general_protection
  569. .previous
  570. /* edi: workmask, edx: work */
  571. retint_careful:
  572. CFI_RESTORE_STATE
  573. bt $TIF_NEED_RESCHED,%edx
  574. jnc retint_signal
  575. TRACE_IRQS_ON
  576. ENABLE_INTERRUPTS(CLBR_NONE)
  577. pushq %rdi
  578. CFI_ADJUST_CFA_OFFSET 8
  579. call schedule
  580. popq %rdi
  581. CFI_ADJUST_CFA_OFFSET -8
  582. GET_THREAD_INFO(%rcx)
  583. DISABLE_INTERRUPTS(CLBR_NONE)
  584. TRACE_IRQS_OFF
  585. jmp retint_check
  586. retint_signal:
  587. testl $_TIF_DO_NOTIFY_MASK,%edx
  588. jz retint_swapgs
  589. TRACE_IRQS_ON
  590. ENABLE_INTERRUPTS(CLBR_NONE)
  591. SAVE_REST
  592. movq $-1,ORIG_RAX(%rsp)
  593. xorl %esi,%esi # oldset
  594. movq %rsp,%rdi # &pt_regs
  595. call do_notify_resume
  596. RESTORE_REST
  597. DISABLE_INTERRUPTS(CLBR_NONE)
  598. TRACE_IRQS_OFF
  599. movl $_TIF_NEED_RESCHED,%edi
  600. GET_THREAD_INFO(%rcx)
  601. jmp retint_check
  602. #ifdef CONFIG_PREEMPT
  603. /* Returning to kernel space. Check if we need preemption */
  604. /* rcx: threadinfo. interrupts off. */
  605. ENTRY(retint_kernel)
  606. cmpl $0,threadinfo_preempt_count(%rcx)
  607. jnz retint_restore_args
  608. bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
  609. jnc retint_restore_args
  610. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  611. jnc retint_restore_args
  612. call preempt_schedule_irq
  613. jmp exit_intr
  614. #endif
  615. CFI_ENDPROC
  616. END(common_interrupt)
  617. /*
  618. * APIC interrupts.
  619. */
  620. .macro apicinterrupt num,func
  621. INTR_FRAME
  622. pushq $~(\num)
  623. CFI_ADJUST_CFA_OFFSET 8
  624. interrupt \func
  625. jmp ret_from_intr
  626. CFI_ENDPROC
  627. .endm
  628. ENTRY(thermal_interrupt)
  629. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  630. END(thermal_interrupt)
  631. ENTRY(threshold_interrupt)
  632. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  633. END(threshold_interrupt)
  634. #ifdef CONFIG_SMP
  635. ENTRY(reschedule_interrupt)
  636. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  637. END(reschedule_interrupt)
  638. .macro INVALIDATE_ENTRY num
  639. ENTRY(invalidate_interrupt\num)
  640. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  641. END(invalidate_interrupt\num)
  642. .endm
  643. INVALIDATE_ENTRY 0
  644. INVALIDATE_ENTRY 1
  645. INVALIDATE_ENTRY 2
  646. INVALIDATE_ENTRY 3
  647. INVALIDATE_ENTRY 4
  648. INVALIDATE_ENTRY 5
  649. INVALIDATE_ENTRY 6
  650. INVALIDATE_ENTRY 7
  651. ENTRY(call_function_interrupt)
  652. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  653. END(call_function_interrupt)
  654. ENTRY(irq_move_cleanup_interrupt)
  655. apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
  656. END(irq_move_cleanup_interrupt)
  657. #endif
  658. ENTRY(apic_timer_interrupt)
  659. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  660. END(apic_timer_interrupt)
  661. ENTRY(uv_bau_message_intr1)
  662. apicinterrupt 220,uv_bau_message_interrupt
  663. END(uv_bau_message_intr1)
  664. ENTRY(error_interrupt)
  665. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  666. END(error_interrupt)
  667. ENTRY(spurious_interrupt)
  668. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  669. END(spurious_interrupt)
  670. /*
  671. * Exception entry points.
  672. */
  673. .macro zeroentry sym
  674. INTR_FRAME
  675. PARAVIRT_ADJUST_EXCEPTION_FRAME
  676. pushq $0 /* push error code/oldrax */
  677. CFI_ADJUST_CFA_OFFSET 8
  678. pushq %rax /* push real oldrax to the rdi slot */
  679. CFI_ADJUST_CFA_OFFSET 8
  680. CFI_REL_OFFSET rax,0
  681. leaq \sym(%rip),%rax
  682. jmp error_entry
  683. CFI_ENDPROC
  684. .endm
  685. .macro errorentry sym
  686. XCPT_FRAME
  687. PARAVIRT_ADJUST_EXCEPTION_FRAME
  688. pushq %rax
  689. CFI_ADJUST_CFA_OFFSET 8
  690. CFI_REL_OFFSET rax,0
  691. leaq \sym(%rip),%rax
  692. jmp error_entry
  693. CFI_ENDPROC
  694. .endm
  695. /* error code is on the stack already */
  696. /* handle NMI like exceptions that can happen everywhere */
  697. .macro paranoidentry sym, ist=0, irqtrace=1
  698. SAVE_ALL
  699. cld
  700. movl $1,%ebx
  701. movl $MSR_GS_BASE,%ecx
  702. rdmsr
  703. testl %edx,%edx
  704. js 1f
  705. SWAPGS
  706. xorl %ebx,%ebx
  707. 1:
  708. .if \ist
  709. movq %gs:pda_data_offset, %rbp
  710. .endif
  711. movq %rsp,%rdi
  712. movq ORIG_RAX(%rsp),%rsi
  713. movq $-1,ORIG_RAX(%rsp)
  714. .if \ist
  715. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  716. .endif
  717. call \sym
  718. .if \ist
  719. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  720. .endif
  721. DISABLE_INTERRUPTS(CLBR_NONE)
  722. .if \irqtrace
  723. TRACE_IRQS_OFF
  724. .endif
  725. .endm
  726. /*
  727. * "Paranoid" exit path from exception stack.
  728. * Paranoid because this is used by NMIs and cannot take
  729. * any kernel state for granted.
  730. * We don't do kernel preemption checks here, because only
  731. * NMI should be common and it does not enable IRQs and
  732. * cannot get reschedule ticks.
  733. *
  734. * "trace" is 0 for the NMI handler only, because irq-tracing
  735. * is fundamentally NMI-unsafe. (we cannot change the soft and
  736. * hard flags at once, atomically)
  737. */
  738. .macro paranoidexit trace=1
  739. /* ebx: no swapgs flag */
  740. paranoid_exit\trace:
  741. testl %ebx,%ebx /* swapgs needed? */
  742. jnz paranoid_restore\trace
  743. testl $3,CS(%rsp)
  744. jnz paranoid_userspace\trace
  745. paranoid_swapgs\trace:
  746. .if \trace
  747. TRACE_IRQS_IRETQ 0
  748. .endif
  749. SWAPGS_UNSAFE_STACK
  750. paranoid_restore\trace:
  751. RESTORE_ALL 8
  752. jmp irq_return
  753. paranoid_userspace\trace:
  754. GET_THREAD_INFO(%rcx)
  755. movl threadinfo_flags(%rcx),%ebx
  756. andl $_TIF_WORK_MASK,%ebx
  757. jz paranoid_swapgs\trace
  758. movq %rsp,%rdi /* &pt_regs */
  759. call sync_regs
  760. movq %rax,%rsp /* switch stack for scheduling */
  761. testl $_TIF_NEED_RESCHED,%ebx
  762. jnz paranoid_schedule\trace
  763. movl %ebx,%edx /* arg3: thread flags */
  764. .if \trace
  765. TRACE_IRQS_ON
  766. .endif
  767. ENABLE_INTERRUPTS(CLBR_NONE)
  768. xorl %esi,%esi /* arg2: oldset */
  769. movq %rsp,%rdi /* arg1: &pt_regs */
  770. call do_notify_resume
  771. DISABLE_INTERRUPTS(CLBR_NONE)
  772. .if \trace
  773. TRACE_IRQS_OFF
  774. .endif
  775. jmp paranoid_userspace\trace
  776. paranoid_schedule\trace:
  777. .if \trace
  778. TRACE_IRQS_ON
  779. .endif
  780. ENABLE_INTERRUPTS(CLBR_ANY)
  781. call schedule
  782. DISABLE_INTERRUPTS(CLBR_ANY)
  783. .if \trace
  784. TRACE_IRQS_OFF
  785. .endif
  786. jmp paranoid_userspace\trace
  787. CFI_ENDPROC
  788. .endm
  789. /*
  790. * Exception entry point. This expects an error code/orig_rax on the stack
  791. * and the exception handler in %rax.
  792. */
  793. KPROBE_ENTRY(error_entry)
  794. _frame RDI
  795. CFI_REL_OFFSET rax,0
  796. /* rdi slot contains rax, oldrax contains error code */
  797. cld
  798. subq $14*8,%rsp
  799. CFI_ADJUST_CFA_OFFSET (14*8)
  800. movq %rsi,13*8(%rsp)
  801. CFI_REL_OFFSET rsi,RSI
  802. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  803. CFI_REGISTER rax,rsi
  804. movq %rdx,12*8(%rsp)
  805. CFI_REL_OFFSET rdx,RDX
  806. movq %rcx,11*8(%rsp)
  807. CFI_REL_OFFSET rcx,RCX
  808. movq %rsi,10*8(%rsp) /* store rax */
  809. CFI_REL_OFFSET rax,RAX
  810. movq %r8, 9*8(%rsp)
  811. CFI_REL_OFFSET r8,R8
  812. movq %r9, 8*8(%rsp)
  813. CFI_REL_OFFSET r9,R9
  814. movq %r10,7*8(%rsp)
  815. CFI_REL_OFFSET r10,R10
  816. movq %r11,6*8(%rsp)
  817. CFI_REL_OFFSET r11,R11
  818. movq %rbx,5*8(%rsp)
  819. CFI_REL_OFFSET rbx,RBX
  820. movq %rbp,4*8(%rsp)
  821. CFI_REL_OFFSET rbp,RBP
  822. movq %r12,3*8(%rsp)
  823. CFI_REL_OFFSET r12,R12
  824. movq %r13,2*8(%rsp)
  825. CFI_REL_OFFSET r13,R13
  826. movq %r14,1*8(%rsp)
  827. CFI_REL_OFFSET r14,R14
  828. movq %r15,(%rsp)
  829. CFI_REL_OFFSET r15,R15
  830. xorl %ebx,%ebx
  831. testl $3,CS(%rsp)
  832. je error_kernelspace
  833. error_swapgs:
  834. SWAPGS
  835. error_sti:
  836. movq %rdi,RDI(%rsp)
  837. CFI_REL_OFFSET rdi,RDI
  838. movq %rsp,%rdi
  839. movq ORIG_RAX(%rsp),%rsi /* get error code */
  840. movq $-1,ORIG_RAX(%rsp)
  841. call *%rax
  842. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  843. error_exit:
  844. movl %ebx,%eax
  845. RESTORE_REST
  846. DISABLE_INTERRUPTS(CLBR_NONE)
  847. TRACE_IRQS_OFF
  848. GET_THREAD_INFO(%rcx)
  849. testl %eax,%eax
  850. jne retint_kernel
  851. LOCKDEP_SYS_EXIT_IRQ
  852. movl threadinfo_flags(%rcx),%edx
  853. movl $_TIF_WORK_MASK,%edi
  854. andl %edi,%edx
  855. jnz retint_careful
  856. jmp retint_swapgs
  857. CFI_ENDPROC
  858. error_kernelspace:
  859. incl %ebx
  860. /* There are two places in the kernel that can potentially fault with
  861. usergs. Handle them here. The exception handlers after
  862. iret run with kernel gs again, so don't set the user space flag.
  863. B stepping K8s sometimes report an truncated RIP for IRET
  864. exceptions returning to compat mode. Check for these here too. */
  865. leaq irq_return(%rip),%rcx
  866. cmpq %rcx,RIP(%rsp)
  867. je error_swapgs
  868. movl %ecx,%ecx /* zero extend */
  869. cmpq %rcx,RIP(%rsp)
  870. je error_swapgs
  871. cmpq $gs_change,RIP(%rsp)
  872. je error_swapgs
  873. jmp error_sti
  874. KPROBE_END(error_entry)
  875. /* Reload gs selector with exception handling */
  876. /* edi: new selector */
  877. ENTRY(load_gs_index)
  878. CFI_STARTPROC
  879. pushf
  880. CFI_ADJUST_CFA_OFFSET 8
  881. DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
  882. SWAPGS
  883. gs_change:
  884. movl %edi,%gs
  885. 2: mfence /* workaround */
  886. SWAPGS
  887. popf
  888. CFI_ADJUST_CFA_OFFSET -8
  889. ret
  890. CFI_ENDPROC
  891. ENDPROC(load_gs_index)
  892. .section __ex_table,"a"
  893. .align 8
  894. .quad gs_change,bad_gs
  895. .previous
  896. .section .fixup,"ax"
  897. /* running with kernelgs */
  898. bad_gs:
  899. SWAPGS /* switch back to user gs */
  900. xorl %eax,%eax
  901. movl %eax,%gs
  902. jmp 2b
  903. .previous
  904. /*
  905. * Create a kernel thread.
  906. *
  907. * C extern interface:
  908. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  909. *
  910. * asm input arguments:
  911. * rdi: fn, rsi: arg, rdx: flags
  912. */
  913. ENTRY(kernel_thread)
  914. CFI_STARTPROC
  915. FAKE_STACK_FRAME $child_rip
  916. SAVE_ALL
  917. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  918. movq %rdx,%rdi
  919. orq kernel_thread_flags(%rip),%rdi
  920. movq $-1, %rsi
  921. movq %rsp, %rdx
  922. xorl %r8d,%r8d
  923. xorl %r9d,%r9d
  924. # clone now
  925. call do_fork
  926. movq %rax,RAX(%rsp)
  927. xorl %edi,%edi
  928. /*
  929. * It isn't worth to check for reschedule here,
  930. * so internally to the x86_64 port you can rely on kernel_thread()
  931. * not to reschedule the child before returning, this avoids the need
  932. * of hacks for example to fork off the per-CPU idle tasks.
  933. * [Hopefully no generic code relies on the reschedule -AK]
  934. */
  935. RESTORE_ALL
  936. UNFAKE_STACK_FRAME
  937. ret
  938. CFI_ENDPROC
  939. ENDPROC(kernel_thread)
  940. child_rip:
  941. pushq $0 # fake return address
  942. CFI_STARTPROC
  943. /*
  944. * Here we are in the child and the registers are set as they were
  945. * at kernel_thread() invocation in the parent.
  946. */
  947. movq %rdi, %rax
  948. movq %rsi, %rdi
  949. call *%rax
  950. # exit
  951. mov %eax, %edi
  952. call do_exit
  953. CFI_ENDPROC
  954. ENDPROC(child_rip)
  955. /*
  956. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  957. *
  958. * C extern interface:
  959. * extern long execve(char *name, char **argv, char **envp)
  960. *
  961. * asm input arguments:
  962. * rdi: name, rsi: argv, rdx: envp
  963. *
  964. * We want to fallback into:
  965. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
  966. *
  967. * do_sys_execve asm fallback arguments:
  968. * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
  969. */
  970. ENTRY(kernel_execve)
  971. CFI_STARTPROC
  972. FAKE_STACK_FRAME $0
  973. SAVE_ALL
  974. movq %rsp,%rcx
  975. call sys_execve
  976. movq %rax, RAX(%rsp)
  977. RESTORE_REST
  978. testq %rax,%rax
  979. je int_ret_from_sys_call
  980. RESTORE_ARGS
  981. UNFAKE_STACK_FRAME
  982. ret
  983. CFI_ENDPROC
  984. ENDPROC(kernel_execve)
  985. KPROBE_ENTRY(page_fault)
  986. errorentry do_page_fault
  987. KPROBE_END(page_fault)
  988. ENTRY(coprocessor_error)
  989. zeroentry do_coprocessor_error
  990. END(coprocessor_error)
  991. ENTRY(simd_coprocessor_error)
  992. zeroentry do_simd_coprocessor_error
  993. END(simd_coprocessor_error)
  994. ENTRY(device_not_available)
  995. zeroentry math_state_restore
  996. END(device_not_available)
  997. /* runs on exception stack */
  998. KPROBE_ENTRY(debug)
  999. INTR_FRAME
  1000. pushq $0
  1001. CFI_ADJUST_CFA_OFFSET 8
  1002. paranoidentry do_debug, DEBUG_STACK
  1003. paranoidexit
  1004. KPROBE_END(debug)
  1005. /* runs on exception stack */
  1006. KPROBE_ENTRY(nmi)
  1007. INTR_FRAME
  1008. pushq $-1
  1009. CFI_ADJUST_CFA_OFFSET 8
  1010. paranoidentry do_nmi, 0, 0
  1011. #ifdef CONFIG_TRACE_IRQFLAGS
  1012. paranoidexit 0
  1013. #else
  1014. jmp paranoid_exit1
  1015. CFI_ENDPROC
  1016. #endif
  1017. KPROBE_END(nmi)
  1018. KPROBE_ENTRY(int3)
  1019. INTR_FRAME
  1020. pushq $0
  1021. CFI_ADJUST_CFA_OFFSET 8
  1022. paranoidentry do_int3, DEBUG_STACK
  1023. jmp paranoid_exit1
  1024. CFI_ENDPROC
  1025. KPROBE_END(int3)
  1026. ENTRY(overflow)
  1027. zeroentry do_overflow
  1028. END(overflow)
  1029. ENTRY(bounds)
  1030. zeroentry do_bounds
  1031. END(bounds)
  1032. ENTRY(invalid_op)
  1033. zeroentry do_invalid_op
  1034. END(invalid_op)
  1035. ENTRY(coprocessor_segment_overrun)
  1036. zeroentry do_coprocessor_segment_overrun
  1037. END(coprocessor_segment_overrun)
  1038. /* runs on exception stack */
  1039. ENTRY(double_fault)
  1040. XCPT_FRAME
  1041. paranoidentry do_double_fault
  1042. jmp paranoid_exit1
  1043. CFI_ENDPROC
  1044. END(double_fault)
  1045. ENTRY(invalid_TSS)
  1046. errorentry do_invalid_TSS
  1047. END(invalid_TSS)
  1048. ENTRY(segment_not_present)
  1049. errorentry do_segment_not_present
  1050. END(segment_not_present)
  1051. /* runs on exception stack */
  1052. ENTRY(stack_segment)
  1053. XCPT_FRAME
  1054. paranoidentry do_stack_segment
  1055. jmp paranoid_exit1
  1056. CFI_ENDPROC
  1057. END(stack_segment)
  1058. KPROBE_ENTRY(general_protection)
  1059. errorentry do_general_protection
  1060. KPROBE_END(general_protection)
  1061. ENTRY(alignment_check)
  1062. errorentry do_alignment_check
  1063. END(alignment_check)
  1064. ENTRY(divide_error)
  1065. zeroentry do_divide_error
  1066. END(divide_error)
  1067. ENTRY(spurious_interrupt_bug)
  1068. zeroentry do_spurious_interrupt_bug
  1069. END(spurious_interrupt_bug)
  1070. #ifdef CONFIG_X86_MCE
  1071. /* runs on exception stack */
  1072. ENTRY(machine_check)
  1073. INTR_FRAME
  1074. pushq $0
  1075. CFI_ADJUST_CFA_OFFSET 8
  1076. paranoidentry do_machine_check
  1077. jmp paranoid_exit1
  1078. CFI_ENDPROC
  1079. END(machine_check)
  1080. #endif
  1081. /* Call softirq on interrupt stack. Interrupts are off. */
  1082. ENTRY(call_softirq)
  1083. CFI_STARTPROC
  1084. push %rbp
  1085. CFI_ADJUST_CFA_OFFSET 8
  1086. CFI_REL_OFFSET rbp,0
  1087. mov %rsp,%rbp
  1088. CFI_DEF_CFA_REGISTER rbp
  1089. incl %gs:pda_irqcount
  1090. cmove %gs:pda_irqstackptr,%rsp
  1091. push %rbp # backlink for old unwinder
  1092. call __do_softirq
  1093. leaveq
  1094. CFI_DEF_CFA_REGISTER rsp
  1095. CFI_ADJUST_CFA_OFFSET -8
  1096. decl %gs:pda_irqcount
  1097. ret
  1098. CFI_ENDPROC
  1099. ENDPROC(call_softirq)
  1100. KPROBE_ENTRY(ignore_sysret)
  1101. CFI_STARTPROC
  1102. mov $-ENOSYS,%eax
  1103. sysret
  1104. CFI_ENDPROC
  1105. ENDPROC(ignore_sysret)