entry_64.S 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. #include <asm/paravirt.h>
  52. .code64
  53. #ifdef CONFIG_FTRACE
  54. ENTRY(mcount)
  55. cmpq $ftrace_stub, ftrace_trace_function
  56. jnz trace
  57. .globl ftrace_stub
  58. ftrace_stub:
  59. retq
  60. trace:
  61. /* taken from glibc */
  62. subq $0x38, %rsp
  63. movq %rax, (%rsp)
  64. movq %rcx, 8(%rsp)
  65. movq %rdx, 16(%rsp)
  66. movq %rsi, 24(%rsp)
  67. movq %rdi, 32(%rsp)
  68. movq %r8, 40(%rsp)
  69. movq %r9, 48(%rsp)
  70. movq 0x38(%rsp), %rdi
  71. movq 8(%rbp), %rsi
  72. call *ftrace_trace_function
  73. movq 48(%rsp), %r9
  74. movq 40(%rsp), %r8
  75. movq 32(%rsp), %rdi
  76. movq 24(%rsp), %rsi
  77. movq 16(%rsp), %rdx
  78. movq 8(%rsp), %rcx
  79. movq (%rsp), %rax
  80. addq $0x38, %rsp
  81. jmp ftrace_stub
  82. END(mcount)
  83. #endif
  84. #ifndef CONFIG_PREEMPT
  85. #define retint_kernel retint_restore_args
  86. #endif
  87. #ifdef CONFIG_PARAVIRT
  88. ENTRY(native_irq_enable_syscall_ret)
  89. movq %gs:pda_oldrsp,%rsp
  90. swapgs
  91. sysretq
  92. #endif /* CONFIG_PARAVIRT */
  93. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  94. #ifdef CONFIG_TRACE_IRQFLAGS
  95. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  96. jnc 1f
  97. TRACE_IRQS_ON
  98. 1:
  99. #endif
  100. .endm
  101. /*
  102. * C code is not supposed to know about undefined top of stack. Every time
  103. * a C function with an pt_regs argument is called from the SYSCALL based
  104. * fast path FIXUP_TOP_OF_STACK is needed.
  105. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  106. * manipulation.
  107. */
  108. /* %rsp:at FRAMEEND */
  109. .macro FIXUP_TOP_OF_STACK tmp
  110. movq %gs:pda_oldrsp,\tmp
  111. movq \tmp,RSP(%rsp)
  112. movq $__USER_DS,SS(%rsp)
  113. movq $__USER_CS,CS(%rsp)
  114. movq $-1,RCX(%rsp)
  115. movq R11(%rsp),\tmp /* get eflags */
  116. movq \tmp,EFLAGS(%rsp)
  117. .endm
  118. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  119. movq RSP-\offset(%rsp),\tmp
  120. movq \tmp,%gs:pda_oldrsp
  121. movq EFLAGS-\offset(%rsp),\tmp
  122. movq \tmp,R11-\offset(%rsp)
  123. .endm
  124. .macro FAKE_STACK_FRAME child_rip
  125. /* push in order ss, rsp, eflags, cs, rip */
  126. xorl %eax, %eax
  127. pushq %rax /* ss */
  128. CFI_ADJUST_CFA_OFFSET 8
  129. /*CFI_REL_OFFSET ss,0*/
  130. pushq %rax /* rsp */
  131. CFI_ADJUST_CFA_OFFSET 8
  132. CFI_REL_OFFSET rsp,0
  133. pushq $(1<<9) /* eflags - interrupts on */
  134. CFI_ADJUST_CFA_OFFSET 8
  135. /*CFI_REL_OFFSET rflags,0*/
  136. pushq $__KERNEL_CS /* cs */
  137. CFI_ADJUST_CFA_OFFSET 8
  138. /*CFI_REL_OFFSET cs,0*/
  139. pushq \child_rip /* rip */
  140. CFI_ADJUST_CFA_OFFSET 8
  141. CFI_REL_OFFSET rip,0
  142. pushq %rax /* orig rax */
  143. CFI_ADJUST_CFA_OFFSET 8
  144. .endm
  145. .macro UNFAKE_STACK_FRAME
  146. addq $8*6, %rsp
  147. CFI_ADJUST_CFA_OFFSET -(6*8)
  148. .endm
  149. .macro CFI_DEFAULT_STACK start=1
  150. .if \start
  151. CFI_STARTPROC simple
  152. CFI_SIGNAL_FRAME
  153. CFI_DEF_CFA rsp,SS+8
  154. .else
  155. CFI_DEF_CFA_OFFSET SS+8
  156. .endif
  157. CFI_REL_OFFSET r15,R15
  158. CFI_REL_OFFSET r14,R14
  159. CFI_REL_OFFSET r13,R13
  160. CFI_REL_OFFSET r12,R12
  161. CFI_REL_OFFSET rbp,RBP
  162. CFI_REL_OFFSET rbx,RBX
  163. CFI_REL_OFFSET r11,R11
  164. CFI_REL_OFFSET r10,R10
  165. CFI_REL_OFFSET r9,R9
  166. CFI_REL_OFFSET r8,R8
  167. CFI_REL_OFFSET rax,RAX
  168. CFI_REL_OFFSET rcx,RCX
  169. CFI_REL_OFFSET rdx,RDX
  170. CFI_REL_OFFSET rsi,RSI
  171. CFI_REL_OFFSET rdi,RDI
  172. CFI_REL_OFFSET rip,RIP
  173. /*CFI_REL_OFFSET cs,CS*/
  174. /*CFI_REL_OFFSET rflags,EFLAGS*/
  175. CFI_REL_OFFSET rsp,RSP
  176. /*CFI_REL_OFFSET ss,SS*/
  177. .endm
  178. /*
  179. * A newly forked process directly context switches into this.
  180. */
  181. /* rdi: prev */
  182. ENTRY(ret_from_fork)
  183. CFI_DEFAULT_STACK
  184. push kernel_eflags(%rip)
  185. CFI_ADJUST_CFA_OFFSET 4
  186. popf # reset kernel eflags
  187. CFI_ADJUST_CFA_OFFSET -4
  188. call schedule_tail
  189. GET_THREAD_INFO(%rcx)
  190. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
  191. jnz rff_trace
  192. rff_action:
  193. RESTORE_REST
  194. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  195. je int_ret_from_sys_call
  196. testl $_TIF_IA32,threadinfo_flags(%rcx)
  197. jnz int_ret_from_sys_call
  198. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  199. jmp ret_from_sys_call
  200. rff_trace:
  201. movq %rsp,%rdi
  202. call syscall_trace_leave
  203. GET_THREAD_INFO(%rcx)
  204. jmp rff_action
  205. CFI_ENDPROC
  206. END(ret_from_fork)
  207. /*
  208. * System call entry. Upto 6 arguments in registers are supported.
  209. *
  210. * SYSCALL does not save anything on the stack and does not change the
  211. * stack pointer.
  212. */
  213. /*
  214. * Register setup:
  215. * rax system call number
  216. * rdi arg0
  217. * rcx return address for syscall/sysret, C arg3
  218. * rsi arg1
  219. * rdx arg2
  220. * r10 arg3 (--> moved to rcx for C)
  221. * r8 arg4
  222. * r9 arg5
  223. * r11 eflags for syscall/sysret, temporary for C
  224. * r12-r15,rbp,rbx saved by C code, not touched.
  225. *
  226. * Interrupts are off on entry.
  227. * Only called from user space.
  228. *
  229. * XXX if we had a free scratch register we could save the RSP into the stack frame
  230. * and report it properly in ps. Unfortunately we haven't.
  231. *
  232. * When user can change the frames always force IRET. That is because
  233. * it deals with uncanonical addresses better. SYSRET has trouble
  234. * with them due to bugs in both AMD and Intel CPUs.
  235. */
  236. ENTRY(system_call)
  237. CFI_STARTPROC simple
  238. CFI_SIGNAL_FRAME
  239. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  240. CFI_REGISTER rip,rcx
  241. /*CFI_REGISTER rflags,r11*/
  242. SWAPGS_UNSAFE_STACK
  243. /*
  244. * A hypervisor implementation might want to use a label
  245. * after the swapgs, so that it can do the swapgs
  246. * for the guest and jump here on syscall.
  247. */
  248. ENTRY(system_call_after_swapgs)
  249. movq %rsp,%gs:pda_oldrsp
  250. movq %gs:pda_kernelstack,%rsp
  251. /*
  252. * No need to follow this irqs off/on section - it's straight
  253. * and short:
  254. */
  255. ENABLE_INTERRUPTS(CLBR_NONE)
  256. SAVE_ARGS 8,1
  257. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  258. movq %rcx,RIP-ARGOFFSET(%rsp)
  259. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  260. GET_THREAD_INFO(%rcx)
  261. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
  262. jnz tracesys
  263. cmpq $__NR_syscall_max,%rax
  264. ja badsys
  265. movq %r10,%rcx
  266. call *sys_call_table(,%rax,8) # XXX: rip relative
  267. movq %rax,RAX-ARGOFFSET(%rsp)
  268. /*
  269. * Syscall return path ending with SYSRET (fast path)
  270. * Has incomplete stack frame and undefined top of stack.
  271. */
  272. ret_from_sys_call:
  273. movl $_TIF_ALLWORK_MASK,%edi
  274. /* edi: flagmask */
  275. sysret_check:
  276. LOCKDEP_SYS_EXIT
  277. GET_THREAD_INFO(%rcx)
  278. DISABLE_INTERRUPTS(CLBR_NONE)
  279. TRACE_IRQS_OFF
  280. movl threadinfo_flags(%rcx),%edx
  281. andl %edi,%edx
  282. jnz sysret_careful
  283. CFI_REMEMBER_STATE
  284. /*
  285. * sysretq will re-enable interrupts:
  286. */
  287. TRACE_IRQS_ON
  288. movq RIP-ARGOFFSET(%rsp),%rcx
  289. CFI_REGISTER rip,rcx
  290. RESTORE_ARGS 0,-ARG_SKIP,1
  291. /*CFI_REGISTER rflags,r11*/
  292. ENABLE_INTERRUPTS_SYSCALL_RET
  293. CFI_RESTORE_STATE
  294. /* Handle reschedules */
  295. /* edx: work, edi: workmask */
  296. sysret_careful:
  297. bt $TIF_NEED_RESCHED,%edx
  298. jnc sysret_signal
  299. TRACE_IRQS_ON
  300. ENABLE_INTERRUPTS(CLBR_NONE)
  301. pushq %rdi
  302. CFI_ADJUST_CFA_OFFSET 8
  303. call schedule
  304. popq %rdi
  305. CFI_ADJUST_CFA_OFFSET -8
  306. jmp sysret_check
  307. /* Handle a signal */
  308. sysret_signal:
  309. TRACE_IRQS_ON
  310. ENABLE_INTERRUPTS(CLBR_NONE)
  311. testl $_TIF_DO_NOTIFY_MASK,%edx
  312. jz 1f
  313. /* Really a signal */
  314. /* edx: work flags (arg3) */
  315. leaq do_notify_resume(%rip),%rax
  316. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  317. xorl %esi,%esi # oldset -> arg2
  318. call ptregscall_common
  319. 1: movl $_TIF_NEED_RESCHED,%edi
  320. /* Use IRET because user could have changed frame. This
  321. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  322. DISABLE_INTERRUPTS(CLBR_NONE)
  323. TRACE_IRQS_OFF
  324. jmp int_with_check
  325. badsys:
  326. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  327. jmp ret_from_sys_call
  328. /* Do syscall tracing */
  329. tracesys:
  330. SAVE_REST
  331. movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
  332. FIXUP_TOP_OF_STACK %rdi
  333. movq %rsp,%rdi
  334. call syscall_trace_enter
  335. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  336. RESTORE_REST
  337. cmpq $__NR_syscall_max,%rax
  338. ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
  339. movq %r10,%rcx /* fixup for C */
  340. call *sys_call_table(,%rax,8)
  341. movq %rax,RAX-ARGOFFSET(%rsp)
  342. /* Use IRET because user could have changed frame */
  343. /*
  344. * Syscall return path ending with IRET.
  345. * Has correct top of stack, but partial stack frame.
  346. */
  347. .globl int_ret_from_sys_call
  348. int_ret_from_sys_call:
  349. DISABLE_INTERRUPTS(CLBR_NONE)
  350. TRACE_IRQS_OFF
  351. testl $3,CS-ARGOFFSET(%rsp)
  352. je retint_restore_args
  353. movl $_TIF_ALLWORK_MASK,%edi
  354. /* edi: mask to check */
  355. int_with_check:
  356. LOCKDEP_SYS_EXIT_IRQ
  357. GET_THREAD_INFO(%rcx)
  358. movl threadinfo_flags(%rcx),%edx
  359. andl %edi,%edx
  360. jnz int_careful
  361. andl $~TS_COMPAT,threadinfo_status(%rcx)
  362. jmp retint_swapgs
  363. /* Either reschedule or signal or syscall exit tracking needed. */
  364. /* First do a reschedule test. */
  365. /* edx: work, edi: workmask */
  366. int_careful:
  367. bt $TIF_NEED_RESCHED,%edx
  368. jnc int_very_careful
  369. TRACE_IRQS_ON
  370. ENABLE_INTERRUPTS(CLBR_NONE)
  371. pushq %rdi
  372. CFI_ADJUST_CFA_OFFSET 8
  373. call schedule
  374. popq %rdi
  375. CFI_ADJUST_CFA_OFFSET -8
  376. DISABLE_INTERRUPTS(CLBR_NONE)
  377. TRACE_IRQS_OFF
  378. jmp int_with_check
  379. /* handle signals and tracing -- both require a full stack frame */
  380. int_very_careful:
  381. TRACE_IRQS_ON
  382. ENABLE_INTERRUPTS(CLBR_NONE)
  383. SAVE_REST
  384. /* Check for syscall exit trace */
  385. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  386. jz int_signal
  387. pushq %rdi
  388. CFI_ADJUST_CFA_OFFSET 8
  389. leaq 8(%rsp),%rdi # &ptregs -> arg1
  390. call syscall_trace_leave
  391. popq %rdi
  392. CFI_ADJUST_CFA_OFFSET -8
  393. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  394. jmp int_restore_rest
  395. int_signal:
  396. testl $_TIF_DO_NOTIFY_MASK,%edx
  397. jz 1f
  398. movq %rsp,%rdi # &ptregs -> arg1
  399. xorl %esi,%esi # oldset -> arg2
  400. call do_notify_resume
  401. 1: movl $_TIF_NEED_RESCHED,%edi
  402. int_restore_rest:
  403. RESTORE_REST
  404. DISABLE_INTERRUPTS(CLBR_NONE)
  405. TRACE_IRQS_OFF
  406. jmp int_with_check
  407. CFI_ENDPROC
  408. END(system_call)
  409. /*
  410. * Certain special system calls that need to save a complete full stack frame.
  411. */
  412. .macro PTREGSCALL label,func,arg
  413. .globl \label
  414. \label:
  415. leaq \func(%rip),%rax
  416. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  417. jmp ptregscall_common
  418. END(\label)
  419. .endm
  420. CFI_STARTPROC
  421. PTREGSCALL stub_clone, sys_clone, %r8
  422. PTREGSCALL stub_fork, sys_fork, %rdi
  423. PTREGSCALL stub_vfork, sys_vfork, %rdi
  424. PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
  425. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  426. PTREGSCALL stub_iopl, sys_iopl, %rsi
  427. ENTRY(ptregscall_common)
  428. popq %r11
  429. CFI_ADJUST_CFA_OFFSET -8
  430. CFI_REGISTER rip, r11
  431. SAVE_REST
  432. movq %r11, %r15
  433. CFI_REGISTER rip, r15
  434. FIXUP_TOP_OF_STACK %r11
  435. call *%rax
  436. RESTORE_TOP_OF_STACK %r11
  437. movq %r15, %r11
  438. CFI_REGISTER rip, r11
  439. RESTORE_REST
  440. pushq %r11
  441. CFI_ADJUST_CFA_OFFSET 8
  442. CFI_REL_OFFSET rip, 0
  443. ret
  444. CFI_ENDPROC
  445. END(ptregscall_common)
  446. ENTRY(stub_execve)
  447. CFI_STARTPROC
  448. popq %r11
  449. CFI_ADJUST_CFA_OFFSET -8
  450. CFI_REGISTER rip, r11
  451. SAVE_REST
  452. FIXUP_TOP_OF_STACK %r11
  453. movq %rsp, %rcx
  454. call sys_execve
  455. RESTORE_TOP_OF_STACK %r11
  456. movq %rax,RAX(%rsp)
  457. RESTORE_REST
  458. jmp int_ret_from_sys_call
  459. CFI_ENDPROC
  460. END(stub_execve)
  461. /*
  462. * sigreturn is special because it needs to restore all registers on return.
  463. * This cannot be done with SYSRET, so use the IRET return path instead.
  464. */
  465. ENTRY(stub_rt_sigreturn)
  466. CFI_STARTPROC
  467. addq $8, %rsp
  468. CFI_ADJUST_CFA_OFFSET -8
  469. SAVE_REST
  470. movq %rsp,%rdi
  471. FIXUP_TOP_OF_STACK %r11
  472. call sys_rt_sigreturn
  473. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  474. RESTORE_REST
  475. jmp int_ret_from_sys_call
  476. CFI_ENDPROC
  477. END(stub_rt_sigreturn)
  478. /*
  479. * initial frame state for interrupts and exceptions
  480. */
  481. .macro _frame ref
  482. CFI_STARTPROC simple
  483. CFI_SIGNAL_FRAME
  484. CFI_DEF_CFA rsp,SS+8-\ref
  485. /*CFI_REL_OFFSET ss,SS-\ref*/
  486. CFI_REL_OFFSET rsp,RSP-\ref
  487. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  488. /*CFI_REL_OFFSET cs,CS-\ref*/
  489. CFI_REL_OFFSET rip,RIP-\ref
  490. .endm
  491. /* initial frame state for interrupts (and exceptions without error code) */
  492. #define INTR_FRAME _frame RIP
  493. /* initial frame state for exceptions with error code (and interrupts with
  494. vector already pushed) */
  495. #define XCPT_FRAME _frame ORIG_RAX
  496. /*
  497. * Interrupt entry/exit.
  498. *
  499. * Interrupt entry points save only callee clobbered registers in fast path.
  500. *
  501. * Entry runs with interrupts off.
  502. */
  503. /* 0(%rsp): interrupt number */
  504. .macro interrupt func
  505. cld
  506. SAVE_ARGS
  507. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  508. pushq %rbp
  509. CFI_ADJUST_CFA_OFFSET 8
  510. CFI_REL_OFFSET rbp, 0
  511. movq %rsp,%rbp
  512. CFI_DEF_CFA_REGISTER rbp
  513. testl $3,CS(%rdi)
  514. je 1f
  515. SWAPGS
  516. /* irqcount is used to check if a CPU is already on an interrupt
  517. stack or not. While this is essentially redundant with preempt_count
  518. it is a little cheaper to use a separate counter in the PDA
  519. (short of moving irq_enter into assembly, which would be too
  520. much work) */
  521. 1: incl %gs:pda_irqcount
  522. cmoveq %gs:pda_irqstackptr,%rsp
  523. push %rbp # backlink for old unwinder
  524. /*
  525. * We entered an interrupt context - irqs are off:
  526. */
  527. TRACE_IRQS_OFF
  528. call \func
  529. .endm
  530. ENTRY(common_interrupt)
  531. XCPT_FRAME
  532. interrupt do_IRQ
  533. /* 0(%rsp): oldrsp-ARGOFFSET */
  534. ret_from_intr:
  535. DISABLE_INTERRUPTS(CLBR_NONE)
  536. TRACE_IRQS_OFF
  537. decl %gs:pda_irqcount
  538. leaveq
  539. CFI_DEF_CFA_REGISTER rsp
  540. CFI_ADJUST_CFA_OFFSET -8
  541. exit_intr:
  542. GET_THREAD_INFO(%rcx)
  543. testl $3,CS-ARGOFFSET(%rsp)
  544. je retint_kernel
  545. /* Interrupt came from user space */
  546. /*
  547. * Has a correct top of stack, but a partial stack frame
  548. * %rcx: thread info. Interrupts off.
  549. */
  550. retint_with_reschedule:
  551. movl $_TIF_WORK_MASK,%edi
  552. retint_check:
  553. LOCKDEP_SYS_EXIT_IRQ
  554. movl threadinfo_flags(%rcx),%edx
  555. andl %edi,%edx
  556. CFI_REMEMBER_STATE
  557. jnz retint_careful
  558. retint_swapgs: /* return to user-space */
  559. /*
  560. * The iretq could re-enable interrupts:
  561. */
  562. DISABLE_INTERRUPTS(CLBR_ANY)
  563. TRACE_IRQS_IRETQ
  564. SWAPGS
  565. jmp restore_args
  566. retint_restore_args: /* return to kernel space */
  567. DISABLE_INTERRUPTS(CLBR_ANY)
  568. /*
  569. * The iretq could re-enable interrupts:
  570. */
  571. TRACE_IRQS_IRETQ
  572. restore_args:
  573. RESTORE_ARGS 0,8,0
  574. irq_return:
  575. INTERRUPT_RETURN
  576. .section __ex_table, "a"
  577. .quad irq_return, bad_iret
  578. .previous
  579. #ifdef CONFIG_PARAVIRT
  580. ENTRY(native_iret)
  581. iretq
  582. .section __ex_table,"a"
  583. .quad native_iret, bad_iret
  584. .previous
  585. #endif
  586. .section .fixup,"ax"
  587. bad_iret:
  588. /*
  589. * The iret traps when the %cs or %ss being restored is bogus.
  590. * We've lost the original trap vector and error code.
  591. * #GPF is the most likely one to get for an invalid selector.
  592. * So pretend we completed the iret and took the #GPF in user mode.
  593. *
  594. * We are now running with the kernel GS after exception recovery.
  595. * But error_entry expects us to have user GS to match the user %cs,
  596. * so swap back.
  597. */
  598. pushq $0
  599. SWAPGS
  600. jmp general_protection
  601. .previous
  602. /* edi: workmask, edx: work */
  603. retint_careful:
  604. CFI_RESTORE_STATE
  605. bt $TIF_NEED_RESCHED,%edx
  606. jnc retint_signal
  607. TRACE_IRQS_ON
  608. ENABLE_INTERRUPTS(CLBR_NONE)
  609. pushq %rdi
  610. CFI_ADJUST_CFA_OFFSET 8
  611. call schedule
  612. popq %rdi
  613. CFI_ADJUST_CFA_OFFSET -8
  614. GET_THREAD_INFO(%rcx)
  615. DISABLE_INTERRUPTS(CLBR_NONE)
  616. TRACE_IRQS_OFF
  617. jmp retint_check
  618. retint_signal:
  619. testl $_TIF_DO_NOTIFY_MASK,%edx
  620. jz retint_swapgs
  621. TRACE_IRQS_ON
  622. ENABLE_INTERRUPTS(CLBR_NONE)
  623. SAVE_REST
  624. movq $-1,ORIG_RAX(%rsp)
  625. xorl %esi,%esi # oldset
  626. movq %rsp,%rdi # &pt_regs
  627. call do_notify_resume
  628. RESTORE_REST
  629. DISABLE_INTERRUPTS(CLBR_NONE)
  630. TRACE_IRQS_OFF
  631. movl $_TIF_NEED_RESCHED,%edi
  632. GET_THREAD_INFO(%rcx)
  633. jmp retint_check
  634. #ifdef CONFIG_PREEMPT
  635. /* Returning to kernel space. Check if we need preemption */
  636. /* rcx: threadinfo. interrupts off. */
  637. ENTRY(retint_kernel)
  638. cmpl $0,threadinfo_preempt_count(%rcx)
  639. jnz retint_restore_args
  640. bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
  641. jnc retint_restore_args
  642. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  643. jnc retint_restore_args
  644. call preempt_schedule_irq
  645. jmp exit_intr
  646. #endif
  647. CFI_ENDPROC
  648. END(common_interrupt)
  649. /*
  650. * APIC interrupts.
  651. */
  652. .macro apicinterrupt num,func
  653. INTR_FRAME
  654. pushq $~(\num)
  655. CFI_ADJUST_CFA_OFFSET 8
  656. interrupt \func
  657. jmp ret_from_intr
  658. CFI_ENDPROC
  659. .endm
  660. ENTRY(thermal_interrupt)
  661. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  662. END(thermal_interrupt)
  663. ENTRY(threshold_interrupt)
  664. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  665. END(threshold_interrupt)
  666. #ifdef CONFIG_SMP
  667. ENTRY(reschedule_interrupt)
  668. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  669. END(reschedule_interrupt)
  670. .macro INVALIDATE_ENTRY num
  671. ENTRY(invalidate_interrupt\num)
  672. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  673. END(invalidate_interrupt\num)
  674. .endm
  675. INVALIDATE_ENTRY 0
  676. INVALIDATE_ENTRY 1
  677. INVALIDATE_ENTRY 2
  678. INVALIDATE_ENTRY 3
  679. INVALIDATE_ENTRY 4
  680. INVALIDATE_ENTRY 5
  681. INVALIDATE_ENTRY 6
  682. INVALIDATE_ENTRY 7
  683. ENTRY(call_function_interrupt)
  684. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  685. END(call_function_interrupt)
  686. ENTRY(irq_move_cleanup_interrupt)
  687. apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
  688. END(irq_move_cleanup_interrupt)
  689. #endif
  690. ENTRY(apic_timer_interrupt)
  691. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  692. END(apic_timer_interrupt)
  693. ENTRY(error_interrupt)
  694. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  695. END(error_interrupt)
  696. ENTRY(spurious_interrupt)
  697. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  698. END(spurious_interrupt)
  699. /*
  700. * Exception entry points.
  701. */
  702. .macro zeroentry sym
  703. INTR_FRAME
  704. pushq $0 /* push error code/oldrax */
  705. CFI_ADJUST_CFA_OFFSET 8
  706. pushq %rax /* push real oldrax to the rdi slot */
  707. CFI_ADJUST_CFA_OFFSET 8
  708. CFI_REL_OFFSET rax,0
  709. leaq \sym(%rip),%rax
  710. jmp error_entry
  711. CFI_ENDPROC
  712. .endm
  713. .macro errorentry sym
  714. XCPT_FRAME
  715. pushq %rax
  716. CFI_ADJUST_CFA_OFFSET 8
  717. CFI_REL_OFFSET rax,0
  718. leaq \sym(%rip),%rax
  719. jmp error_entry
  720. CFI_ENDPROC
  721. .endm
  722. /* error code is on the stack already */
  723. /* handle NMI like exceptions that can happen everywhere */
  724. .macro paranoidentry sym, ist=0, irqtrace=1
  725. SAVE_ALL
  726. cld
  727. movl $1,%ebx
  728. movl $MSR_GS_BASE,%ecx
  729. rdmsr
  730. testl %edx,%edx
  731. js 1f
  732. SWAPGS
  733. xorl %ebx,%ebx
  734. 1:
  735. .if \ist
  736. movq %gs:pda_data_offset, %rbp
  737. .endif
  738. movq %rsp,%rdi
  739. movq ORIG_RAX(%rsp),%rsi
  740. movq $-1,ORIG_RAX(%rsp)
  741. .if \ist
  742. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  743. .endif
  744. call \sym
  745. .if \ist
  746. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  747. .endif
  748. DISABLE_INTERRUPTS(CLBR_NONE)
  749. .if \irqtrace
  750. TRACE_IRQS_OFF
  751. .endif
  752. .endm
  753. /*
  754. * "Paranoid" exit path from exception stack.
  755. * Paranoid because this is used by NMIs and cannot take
  756. * any kernel state for granted.
  757. * We don't do kernel preemption checks here, because only
  758. * NMI should be common and it does not enable IRQs and
  759. * cannot get reschedule ticks.
  760. *
  761. * "trace" is 0 for the NMI handler only, because irq-tracing
  762. * is fundamentally NMI-unsafe. (we cannot change the soft and
  763. * hard flags at once, atomically)
  764. */
  765. .macro paranoidexit trace=1
  766. /* ebx: no swapgs flag */
  767. paranoid_exit\trace:
  768. testl %ebx,%ebx /* swapgs needed? */
  769. jnz paranoid_restore\trace
  770. testl $3,CS(%rsp)
  771. jnz paranoid_userspace\trace
  772. paranoid_swapgs\trace:
  773. .if \trace
  774. TRACE_IRQS_IRETQ 0
  775. .endif
  776. SWAPGS_UNSAFE_STACK
  777. paranoid_restore\trace:
  778. RESTORE_ALL 8
  779. jmp irq_return
  780. paranoid_userspace\trace:
  781. GET_THREAD_INFO(%rcx)
  782. movl threadinfo_flags(%rcx),%ebx
  783. andl $_TIF_WORK_MASK,%ebx
  784. jz paranoid_swapgs\trace
  785. movq %rsp,%rdi /* &pt_regs */
  786. call sync_regs
  787. movq %rax,%rsp /* switch stack for scheduling */
  788. testl $_TIF_NEED_RESCHED,%ebx
  789. jnz paranoid_schedule\trace
  790. movl %ebx,%edx /* arg3: thread flags */
  791. .if \trace
  792. TRACE_IRQS_ON
  793. .endif
  794. ENABLE_INTERRUPTS(CLBR_NONE)
  795. xorl %esi,%esi /* arg2: oldset */
  796. movq %rsp,%rdi /* arg1: &pt_regs */
  797. call do_notify_resume
  798. DISABLE_INTERRUPTS(CLBR_NONE)
  799. .if \trace
  800. TRACE_IRQS_OFF
  801. .endif
  802. jmp paranoid_userspace\trace
  803. paranoid_schedule\trace:
  804. .if \trace
  805. TRACE_IRQS_ON
  806. .endif
  807. ENABLE_INTERRUPTS(CLBR_ANY)
  808. call schedule
  809. DISABLE_INTERRUPTS(CLBR_ANY)
  810. .if \trace
  811. TRACE_IRQS_OFF
  812. .endif
  813. jmp paranoid_userspace\trace
  814. CFI_ENDPROC
  815. .endm
  816. /*
  817. * Exception entry point. This expects an error code/orig_rax on the stack
  818. * and the exception handler in %rax.
  819. */
  820. KPROBE_ENTRY(error_entry)
  821. _frame RDI
  822. CFI_REL_OFFSET rax,0
  823. /* rdi slot contains rax, oldrax contains error code */
  824. cld
  825. subq $14*8,%rsp
  826. CFI_ADJUST_CFA_OFFSET (14*8)
  827. movq %rsi,13*8(%rsp)
  828. CFI_REL_OFFSET rsi,RSI
  829. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  830. CFI_REGISTER rax,rsi
  831. movq %rdx,12*8(%rsp)
  832. CFI_REL_OFFSET rdx,RDX
  833. movq %rcx,11*8(%rsp)
  834. CFI_REL_OFFSET rcx,RCX
  835. movq %rsi,10*8(%rsp) /* store rax */
  836. CFI_REL_OFFSET rax,RAX
  837. movq %r8, 9*8(%rsp)
  838. CFI_REL_OFFSET r8,R8
  839. movq %r9, 8*8(%rsp)
  840. CFI_REL_OFFSET r9,R9
  841. movq %r10,7*8(%rsp)
  842. CFI_REL_OFFSET r10,R10
  843. movq %r11,6*8(%rsp)
  844. CFI_REL_OFFSET r11,R11
  845. movq %rbx,5*8(%rsp)
  846. CFI_REL_OFFSET rbx,RBX
  847. movq %rbp,4*8(%rsp)
  848. CFI_REL_OFFSET rbp,RBP
  849. movq %r12,3*8(%rsp)
  850. CFI_REL_OFFSET r12,R12
  851. movq %r13,2*8(%rsp)
  852. CFI_REL_OFFSET r13,R13
  853. movq %r14,1*8(%rsp)
  854. CFI_REL_OFFSET r14,R14
  855. movq %r15,(%rsp)
  856. CFI_REL_OFFSET r15,R15
  857. xorl %ebx,%ebx
  858. testl $3,CS(%rsp)
  859. je error_kernelspace
  860. error_swapgs:
  861. SWAPGS
  862. error_sti:
  863. movq %rdi,RDI(%rsp)
  864. CFI_REL_OFFSET rdi,RDI
  865. movq %rsp,%rdi
  866. movq ORIG_RAX(%rsp),%rsi /* get error code */
  867. movq $-1,ORIG_RAX(%rsp)
  868. call *%rax
  869. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  870. error_exit:
  871. movl %ebx,%eax
  872. RESTORE_REST
  873. DISABLE_INTERRUPTS(CLBR_NONE)
  874. TRACE_IRQS_OFF
  875. GET_THREAD_INFO(%rcx)
  876. testl %eax,%eax
  877. jne retint_kernel
  878. LOCKDEP_SYS_EXIT_IRQ
  879. movl threadinfo_flags(%rcx),%edx
  880. movl $_TIF_WORK_MASK,%edi
  881. andl %edi,%edx
  882. jnz retint_careful
  883. jmp retint_swapgs
  884. CFI_ENDPROC
  885. error_kernelspace:
  886. incl %ebx
  887. /* There are two places in the kernel that can potentially fault with
  888. usergs. Handle them here. The exception handlers after
  889. iret run with kernel gs again, so don't set the user space flag.
  890. B stepping K8s sometimes report an truncated RIP for IRET
  891. exceptions returning to compat mode. Check for these here too. */
  892. leaq irq_return(%rip),%rbp
  893. cmpq %rbp,RIP(%rsp)
  894. je error_swapgs
  895. movl %ebp,%ebp /* zero extend */
  896. cmpq %rbp,RIP(%rsp)
  897. je error_swapgs
  898. cmpq $gs_change,RIP(%rsp)
  899. je error_swapgs
  900. jmp error_sti
  901. KPROBE_END(error_entry)
  902. /* Reload gs selector with exception handling */
  903. /* edi: new selector */
  904. ENTRY(load_gs_index)
  905. CFI_STARTPROC
  906. pushf
  907. CFI_ADJUST_CFA_OFFSET 8
  908. DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
  909. SWAPGS
  910. gs_change:
  911. movl %edi,%gs
  912. 2: mfence /* workaround */
  913. SWAPGS
  914. popf
  915. CFI_ADJUST_CFA_OFFSET -8
  916. ret
  917. CFI_ENDPROC
  918. ENDPROC(load_gs_index)
  919. .section __ex_table,"a"
  920. .align 8
  921. .quad gs_change,bad_gs
  922. .previous
  923. .section .fixup,"ax"
  924. /* running with kernelgs */
  925. bad_gs:
  926. SWAPGS /* switch back to user gs */
  927. xorl %eax,%eax
  928. movl %eax,%gs
  929. jmp 2b
  930. .previous
  931. /*
  932. * Create a kernel thread.
  933. *
  934. * C extern interface:
  935. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  936. *
  937. * asm input arguments:
  938. * rdi: fn, rsi: arg, rdx: flags
  939. */
  940. ENTRY(kernel_thread)
  941. CFI_STARTPROC
  942. FAKE_STACK_FRAME $child_rip
  943. SAVE_ALL
  944. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  945. movq %rdx,%rdi
  946. orq kernel_thread_flags(%rip),%rdi
  947. movq $-1, %rsi
  948. movq %rsp, %rdx
  949. xorl %r8d,%r8d
  950. xorl %r9d,%r9d
  951. # clone now
  952. call do_fork
  953. movq %rax,RAX(%rsp)
  954. xorl %edi,%edi
  955. /*
  956. * It isn't worth to check for reschedule here,
  957. * so internally to the x86_64 port you can rely on kernel_thread()
  958. * not to reschedule the child before returning, this avoids the need
  959. * of hacks for example to fork off the per-CPU idle tasks.
  960. * [Hopefully no generic code relies on the reschedule -AK]
  961. */
  962. RESTORE_ALL
  963. UNFAKE_STACK_FRAME
  964. ret
  965. CFI_ENDPROC
  966. ENDPROC(kernel_thread)
  967. child_rip:
  968. pushq $0 # fake return address
  969. CFI_STARTPROC
  970. /*
  971. * Here we are in the child and the registers are set as they were
  972. * at kernel_thread() invocation in the parent.
  973. */
  974. movq %rdi, %rax
  975. movq %rsi, %rdi
  976. call *%rax
  977. # exit
  978. mov %eax, %edi
  979. call do_exit
  980. CFI_ENDPROC
  981. ENDPROC(child_rip)
  982. /*
  983. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  984. *
  985. * C extern interface:
  986. * extern long execve(char *name, char **argv, char **envp)
  987. *
  988. * asm input arguments:
  989. * rdi: name, rsi: argv, rdx: envp
  990. *
  991. * We want to fallback into:
  992. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
  993. *
  994. * do_sys_execve asm fallback arguments:
  995. * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
  996. */
  997. ENTRY(kernel_execve)
  998. CFI_STARTPROC
  999. FAKE_STACK_FRAME $0
  1000. SAVE_ALL
  1001. movq %rsp,%rcx
  1002. call sys_execve
  1003. movq %rax, RAX(%rsp)
  1004. RESTORE_REST
  1005. testq %rax,%rax
  1006. je int_ret_from_sys_call
  1007. RESTORE_ARGS
  1008. UNFAKE_STACK_FRAME
  1009. ret
  1010. CFI_ENDPROC
  1011. ENDPROC(kernel_execve)
  1012. KPROBE_ENTRY(page_fault)
  1013. errorentry do_page_fault
  1014. KPROBE_END(page_fault)
  1015. ENTRY(coprocessor_error)
  1016. zeroentry do_coprocessor_error
  1017. END(coprocessor_error)
  1018. ENTRY(simd_coprocessor_error)
  1019. zeroentry do_simd_coprocessor_error
  1020. END(simd_coprocessor_error)
  1021. ENTRY(device_not_available)
  1022. zeroentry math_state_restore
  1023. END(device_not_available)
  1024. /* runs on exception stack */
  1025. KPROBE_ENTRY(debug)
  1026. INTR_FRAME
  1027. pushq $0
  1028. CFI_ADJUST_CFA_OFFSET 8
  1029. paranoidentry do_debug, DEBUG_STACK
  1030. paranoidexit
  1031. KPROBE_END(debug)
  1032. /* runs on exception stack */
  1033. KPROBE_ENTRY(nmi)
  1034. INTR_FRAME
  1035. pushq $-1
  1036. CFI_ADJUST_CFA_OFFSET 8
  1037. paranoidentry do_nmi, 0, 0
  1038. #ifdef CONFIG_TRACE_IRQFLAGS
  1039. paranoidexit 0
  1040. #else
  1041. jmp paranoid_exit1
  1042. CFI_ENDPROC
  1043. #endif
  1044. KPROBE_END(nmi)
  1045. KPROBE_ENTRY(int3)
  1046. INTR_FRAME
  1047. pushq $0
  1048. CFI_ADJUST_CFA_OFFSET 8
  1049. paranoidentry do_int3, DEBUG_STACK
  1050. jmp paranoid_exit1
  1051. CFI_ENDPROC
  1052. KPROBE_END(int3)
  1053. ENTRY(overflow)
  1054. zeroentry do_overflow
  1055. END(overflow)
  1056. ENTRY(bounds)
  1057. zeroentry do_bounds
  1058. END(bounds)
  1059. ENTRY(invalid_op)
  1060. zeroentry do_invalid_op
  1061. END(invalid_op)
  1062. ENTRY(coprocessor_segment_overrun)
  1063. zeroentry do_coprocessor_segment_overrun
  1064. END(coprocessor_segment_overrun)
  1065. ENTRY(reserved)
  1066. zeroentry do_reserved
  1067. END(reserved)
  1068. /* runs on exception stack */
  1069. ENTRY(double_fault)
  1070. XCPT_FRAME
  1071. paranoidentry do_double_fault
  1072. jmp paranoid_exit1
  1073. CFI_ENDPROC
  1074. END(double_fault)
  1075. ENTRY(invalid_TSS)
  1076. errorentry do_invalid_TSS
  1077. END(invalid_TSS)
  1078. ENTRY(segment_not_present)
  1079. errorentry do_segment_not_present
  1080. END(segment_not_present)
  1081. /* runs on exception stack */
  1082. ENTRY(stack_segment)
  1083. XCPT_FRAME
  1084. paranoidentry do_stack_segment
  1085. jmp paranoid_exit1
  1086. CFI_ENDPROC
  1087. END(stack_segment)
  1088. KPROBE_ENTRY(general_protection)
  1089. errorentry do_general_protection
  1090. KPROBE_END(general_protection)
  1091. ENTRY(alignment_check)
  1092. errorentry do_alignment_check
  1093. END(alignment_check)
  1094. ENTRY(divide_error)
  1095. zeroentry do_divide_error
  1096. END(divide_error)
  1097. ENTRY(spurious_interrupt_bug)
  1098. zeroentry do_spurious_interrupt_bug
  1099. END(spurious_interrupt_bug)
  1100. #ifdef CONFIG_X86_MCE
  1101. /* runs on exception stack */
  1102. ENTRY(machine_check)
  1103. INTR_FRAME
  1104. pushq $0
  1105. CFI_ADJUST_CFA_OFFSET 8
  1106. paranoidentry do_machine_check
  1107. jmp paranoid_exit1
  1108. CFI_ENDPROC
  1109. END(machine_check)
  1110. #endif
  1111. /* Call softirq on interrupt stack. Interrupts are off. */
  1112. ENTRY(call_softirq)
  1113. CFI_STARTPROC
  1114. push %rbp
  1115. CFI_ADJUST_CFA_OFFSET 8
  1116. CFI_REL_OFFSET rbp,0
  1117. mov %rsp,%rbp
  1118. CFI_DEF_CFA_REGISTER rbp
  1119. incl %gs:pda_irqcount
  1120. cmove %gs:pda_irqstackptr,%rsp
  1121. push %rbp # backlink for old unwinder
  1122. call __do_softirq
  1123. leaveq
  1124. CFI_DEF_CFA_REGISTER rsp
  1125. CFI_ADJUST_CFA_OFFSET -8
  1126. decl %gs:pda_irqcount
  1127. ret
  1128. CFI_ENDPROC
  1129. ENDPROC(call_softirq)
  1130. KPROBE_ENTRY(ignore_sysret)
  1131. CFI_STARTPROC
  1132. mov $-ENOSYS,%eax
  1133. sysret
  1134. CFI_ENDPROC
  1135. ENDPROC(ignore_sysret)