entry_64.S 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. */
  8. /*
  9. * entry.S contains the system-call and fault low-level handling routines.
  10. *
  11. * NOTE: This code handles signal-recognition, which happens every time
  12. * after an interrupt and after each system call.
  13. *
  14. * Normal syscalls and interrupts don't save a full stack frame, this is
  15. * only done for syscall tracing, signals or fork/exec et.al.
  16. *
  17. * A note on terminology:
  18. * - top of stack: Architecture defined interrupt frame from SS to RIP
  19. * at the top of the kernel process stack.
  20. * - partial stack frame: partially saved registers upto R11.
  21. * - full stack frame: Like partial stack frame, but all register saved.
  22. *
  23. * Some macro usage:
  24. * - CFI macros are used to generate dwarf2 unwind information for better
  25. * backtraces. They don't change any code.
  26. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  27. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  28. * There are unfortunately lots of special cases where some registers
  29. * not touched. The macro is a big mess that should be cleaned up.
  30. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  31. * Gives a full stack frame.
  32. * - ENTRY/END Define functions in the symbol table.
  33. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  34. * frame that is otherwise undefined after a SYSCALL
  35. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  36. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  37. */
  38. #include <linux/linkage.h>
  39. #include <asm/segment.h>
  40. #include <asm/cache.h>
  41. #include <asm/errno.h>
  42. #include <asm/dwarf2.h>
  43. #include <asm/calling.h>
  44. #include <asm/asm-offsets.h>
  45. #include <asm/msr.h>
  46. #include <asm/unistd.h>
  47. #include <asm/thread_info.h>
  48. #include <asm/hw_irq.h>
  49. #include <asm/page.h>
  50. #include <asm/irqflags.h>
  51. #include <asm/paravirt.h>
  52. .code64
  53. #ifdef CONFIG_FTRACE
  54. #ifdef CONFIG_DYNAMIC_FTRACE
  55. ENTRY(mcount)
  56. subq $0x38, %rsp
  57. movq %rax, (%rsp)
  58. movq %rcx, 8(%rsp)
  59. movq %rdx, 16(%rsp)
  60. movq %rsi, 24(%rsp)
  61. movq %rdi, 32(%rsp)
  62. movq %r8, 40(%rsp)
  63. movq %r9, 48(%rsp)
  64. movq 0x38(%rsp), %rdi
  65. .globl mcount_call
  66. mcount_call:
  67. call ftrace_stub
  68. movq 48(%rsp), %r9
  69. movq 40(%rsp), %r8
  70. movq 32(%rsp), %rdi
  71. movq 24(%rsp), %rsi
  72. movq 16(%rsp), %rdx
  73. movq 8(%rsp), %rcx
  74. movq (%rsp), %rax
  75. addq $0x38, %rsp
  76. retq
  77. END(mcount)
  78. ENTRY(ftrace_caller)
  79. /* taken from glibc */
  80. subq $0x38, %rsp
  81. movq %rax, (%rsp)
  82. movq %rcx, 8(%rsp)
  83. movq %rdx, 16(%rsp)
  84. movq %rsi, 24(%rsp)
  85. movq %rdi, 32(%rsp)
  86. movq %r8, 40(%rsp)
  87. movq %r9, 48(%rsp)
  88. movq 0x38(%rsp), %rdi
  89. movq 8(%rbp), %rsi
  90. .globl ftrace_call
  91. ftrace_call:
  92. call ftrace_stub
  93. movq 48(%rsp), %r9
  94. movq 40(%rsp), %r8
  95. movq 32(%rsp), %rdi
  96. movq 24(%rsp), %rsi
  97. movq 16(%rsp), %rdx
  98. movq 8(%rsp), %rcx
  99. movq (%rsp), %rax
  100. addq $0x38, %rsp
  101. .globl ftrace_stub
  102. ftrace_stub:
  103. retq
  104. END(ftrace_caller)
  105. #else /* ! CONFIG_DYNAMIC_FTRACE */
  106. ENTRY(mcount)
  107. cmpq $ftrace_stub, ftrace_trace_function
  108. jnz trace
  109. .globl ftrace_stub
  110. ftrace_stub:
  111. retq
  112. trace:
  113. /* taken from glibc */
  114. subq $0x38, %rsp
  115. movq %rax, (%rsp)
  116. movq %rcx, 8(%rsp)
  117. movq %rdx, 16(%rsp)
  118. movq %rsi, 24(%rsp)
  119. movq %rdi, 32(%rsp)
  120. movq %r8, 40(%rsp)
  121. movq %r9, 48(%rsp)
  122. movq 0x38(%rsp), %rdi
  123. movq 8(%rbp), %rsi
  124. call *ftrace_trace_function
  125. movq 48(%rsp), %r9
  126. movq 40(%rsp), %r8
  127. movq 32(%rsp), %rdi
  128. movq 24(%rsp), %rsi
  129. movq 16(%rsp), %rdx
  130. movq 8(%rsp), %rcx
  131. movq (%rsp), %rax
  132. addq $0x38, %rsp
  133. jmp ftrace_stub
  134. END(mcount)
  135. #endif /* CONFIG_DYNAMIC_FTRACE */
  136. #endif /* CONFIG_FTRACE */
  137. #ifndef CONFIG_PREEMPT
  138. #define retint_kernel retint_restore_args
  139. #endif
  140. #ifdef CONFIG_PARAVIRT
  141. ENTRY(native_irq_enable_syscall_ret)
  142. movq %gs:pda_oldrsp,%rsp
  143. swapgs
  144. sysretq
  145. #endif /* CONFIG_PARAVIRT */
  146. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  147. #ifdef CONFIG_TRACE_IRQFLAGS
  148. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  149. jnc 1f
  150. TRACE_IRQS_ON
  151. 1:
  152. #endif
  153. .endm
  154. /*
  155. * C code is not supposed to know about undefined top of stack. Every time
  156. * a C function with an pt_regs argument is called from the SYSCALL based
  157. * fast path FIXUP_TOP_OF_STACK is needed.
  158. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  159. * manipulation.
  160. */
  161. /* %rsp:at FRAMEEND */
  162. .macro FIXUP_TOP_OF_STACK tmp
  163. movq %gs:pda_oldrsp,\tmp
  164. movq \tmp,RSP(%rsp)
  165. movq $__USER_DS,SS(%rsp)
  166. movq $__USER_CS,CS(%rsp)
  167. movq $-1,RCX(%rsp)
  168. movq R11(%rsp),\tmp /* get eflags */
  169. movq \tmp,EFLAGS(%rsp)
  170. .endm
  171. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  172. movq RSP-\offset(%rsp),\tmp
  173. movq \tmp,%gs:pda_oldrsp
  174. movq EFLAGS-\offset(%rsp),\tmp
  175. movq \tmp,R11-\offset(%rsp)
  176. .endm
  177. .macro FAKE_STACK_FRAME child_rip
  178. /* push in order ss, rsp, eflags, cs, rip */
  179. xorl %eax, %eax
  180. pushq %rax /* ss */
  181. CFI_ADJUST_CFA_OFFSET 8
  182. /*CFI_REL_OFFSET ss,0*/
  183. pushq %rax /* rsp */
  184. CFI_ADJUST_CFA_OFFSET 8
  185. CFI_REL_OFFSET rsp,0
  186. pushq $(1<<9) /* eflags - interrupts on */
  187. CFI_ADJUST_CFA_OFFSET 8
  188. /*CFI_REL_OFFSET rflags,0*/
  189. pushq $__KERNEL_CS /* cs */
  190. CFI_ADJUST_CFA_OFFSET 8
  191. /*CFI_REL_OFFSET cs,0*/
  192. pushq \child_rip /* rip */
  193. CFI_ADJUST_CFA_OFFSET 8
  194. CFI_REL_OFFSET rip,0
  195. pushq %rax /* orig rax */
  196. CFI_ADJUST_CFA_OFFSET 8
  197. .endm
  198. .macro UNFAKE_STACK_FRAME
  199. addq $8*6, %rsp
  200. CFI_ADJUST_CFA_OFFSET -(6*8)
  201. .endm
  202. .macro CFI_DEFAULT_STACK start=1
  203. .if \start
  204. CFI_STARTPROC simple
  205. CFI_SIGNAL_FRAME
  206. CFI_DEF_CFA rsp,SS+8
  207. .else
  208. CFI_DEF_CFA_OFFSET SS+8
  209. .endif
  210. CFI_REL_OFFSET r15,R15
  211. CFI_REL_OFFSET r14,R14
  212. CFI_REL_OFFSET r13,R13
  213. CFI_REL_OFFSET r12,R12
  214. CFI_REL_OFFSET rbp,RBP
  215. CFI_REL_OFFSET rbx,RBX
  216. CFI_REL_OFFSET r11,R11
  217. CFI_REL_OFFSET r10,R10
  218. CFI_REL_OFFSET r9,R9
  219. CFI_REL_OFFSET r8,R8
  220. CFI_REL_OFFSET rax,RAX
  221. CFI_REL_OFFSET rcx,RCX
  222. CFI_REL_OFFSET rdx,RDX
  223. CFI_REL_OFFSET rsi,RSI
  224. CFI_REL_OFFSET rdi,RDI
  225. CFI_REL_OFFSET rip,RIP
  226. /*CFI_REL_OFFSET cs,CS*/
  227. /*CFI_REL_OFFSET rflags,EFLAGS*/
  228. CFI_REL_OFFSET rsp,RSP
  229. /*CFI_REL_OFFSET ss,SS*/
  230. .endm
  231. /*
  232. * A newly forked process directly context switches into this.
  233. */
  234. /* rdi: prev */
  235. ENTRY(ret_from_fork)
  236. CFI_DEFAULT_STACK
  237. push kernel_eflags(%rip)
  238. CFI_ADJUST_CFA_OFFSET 4
  239. popf # reset kernel eflags
  240. CFI_ADJUST_CFA_OFFSET -4
  241. call schedule_tail
  242. GET_THREAD_INFO(%rcx)
  243. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
  244. jnz rff_trace
  245. rff_action:
  246. RESTORE_REST
  247. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  248. je int_ret_from_sys_call
  249. testl $_TIF_IA32,threadinfo_flags(%rcx)
  250. jnz int_ret_from_sys_call
  251. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  252. jmp ret_from_sys_call
  253. rff_trace:
  254. movq %rsp,%rdi
  255. call syscall_trace_leave
  256. GET_THREAD_INFO(%rcx)
  257. jmp rff_action
  258. CFI_ENDPROC
  259. END(ret_from_fork)
  260. /*
  261. * System call entry. Upto 6 arguments in registers are supported.
  262. *
  263. * SYSCALL does not save anything on the stack and does not change the
  264. * stack pointer.
  265. */
  266. /*
  267. * Register setup:
  268. * rax system call number
  269. * rdi arg0
  270. * rcx return address for syscall/sysret, C arg3
  271. * rsi arg1
  272. * rdx arg2
  273. * r10 arg3 (--> moved to rcx for C)
  274. * r8 arg4
  275. * r9 arg5
  276. * r11 eflags for syscall/sysret, temporary for C
  277. * r12-r15,rbp,rbx saved by C code, not touched.
  278. *
  279. * Interrupts are off on entry.
  280. * Only called from user space.
  281. *
  282. * XXX if we had a free scratch register we could save the RSP into the stack frame
  283. * and report it properly in ps. Unfortunately we haven't.
  284. *
  285. * When user can change the frames always force IRET. That is because
  286. * it deals with uncanonical addresses better. SYSRET has trouble
  287. * with them due to bugs in both AMD and Intel CPUs.
  288. */
  289. ENTRY(system_call)
  290. CFI_STARTPROC simple
  291. CFI_SIGNAL_FRAME
  292. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  293. CFI_REGISTER rip,rcx
  294. /*CFI_REGISTER rflags,r11*/
  295. SWAPGS_UNSAFE_STACK
  296. /*
  297. * A hypervisor implementation might want to use a label
  298. * after the swapgs, so that it can do the swapgs
  299. * for the guest and jump here on syscall.
  300. */
  301. ENTRY(system_call_after_swapgs)
  302. movq %rsp,%gs:pda_oldrsp
  303. movq %gs:pda_kernelstack,%rsp
  304. /*
  305. * No need to follow this irqs off/on section - it's straight
  306. * and short:
  307. */
  308. ENABLE_INTERRUPTS(CLBR_NONE)
  309. SAVE_ARGS 8,1
  310. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  311. movq %rcx,RIP-ARGOFFSET(%rsp)
  312. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  313. GET_THREAD_INFO(%rcx)
  314. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
  315. jnz tracesys
  316. cmpq $__NR_syscall_max,%rax
  317. ja badsys
  318. movq %r10,%rcx
  319. call *sys_call_table(,%rax,8) # XXX: rip relative
  320. movq %rax,RAX-ARGOFFSET(%rsp)
  321. /*
  322. * Syscall return path ending with SYSRET (fast path)
  323. * Has incomplete stack frame and undefined top of stack.
  324. */
  325. ret_from_sys_call:
  326. movl $_TIF_ALLWORK_MASK,%edi
  327. /* edi: flagmask */
  328. sysret_check:
  329. LOCKDEP_SYS_EXIT
  330. GET_THREAD_INFO(%rcx)
  331. DISABLE_INTERRUPTS(CLBR_NONE)
  332. TRACE_IRQS_OFF
  333. movl threadinfo_flags(%rcx),%edx
  334. andl %edi,%edx
  335. jnz sysret_careful
  336. CFI_REMEMBER_STATE
  337. /*
  338. * sysretq will re-enable interrupts:
  339. */
  340. TRACE_IRQS_ON
  341. movq RIP-ARGOFFSET(%rsp),%rcx
  342. CFI_REGISTER rip,rcx
  343. RESTORE_ARGS 0,-ARG_SKIP,1
  344. /*CFI_REGISTER rflags,r11*/
  345. ENABLE_INTERRUPTS_SYSCALL_RET
  346. CFI_RESTORE_STATE
  347. /* Handle reschedules */
  348. /* edx: work, edi: workmask */
  349. sysret_careful:
  350. bt $TIF_NEED_RESCHED,%edx
  351. jnc sysret_signal
  352. TRACE_IRQS_ON
  353. ENABLE_INTERRUPTS(CLBR_NONE)
  354. pushq %rdi
  355. CFI_ADJUST_CFA_OFFSET 8
  356. call schedule
  357. popq %rdi
  358. CFI_ADJUST_CFA_OFFSET -8
  359. jmp sysret_check
  360. /* Handle a signal */
  361. sysret_signal:
  362. TRACE_IRQS_ON
  363. ENABLE_INTERRUPTS(CLBR_NONE)
  364. testl $_TIF_DO_NOTIFY_MASK,%edx
  365. jz 1f
  366. /* Really a signal */
  367. /* edx: work flags (arg3) */
  368. leaq do_notify_resume(%rip),%rax
  369. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  370. xorl %esi,%esi # oldset -> arg2
  371. call ptregscall_common
  372. 1: movl $_TIF_NEED_RESCHED,%edi
  373. /* Use IRET because user could have changed frame. This
  374. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  375. DISABLE_INTERRUPTS(CLBR_NONE)
  376. TRACE_IRQS_OFF
  377. jmp int_with_check
  378. badsys:
  379. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  380. jmp ret_from_sys_call
  381. /* Do syscall tracing */
  382. tracesys:
  383. SAVE_REST
  384. movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
  385. FIXUP_TOP_OF_STACK %rdi
  386. movq %rsp,%rdi
  387. call syscall_trace_enter
  388. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  389. RESTORE_REST
  390. cmpq $__NR_syscall_max,%rax
  391. ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
  392. movq %r10,%rcx /* fixup for C */
  393. call *sys_call_table(,%rax,8)
  394. movq %rax,RAX-ARGOFFSET(%rsp)
  395. /* Use IRET because user could have changed frame */
  396. /*
  397. * Syscall return path ending with IRET.
  398. * Has correct top of stack, but partial stack frame.
  399. */
  400. .globl int_ret_from_sys_call
  401. int_ret_from_sys_call:
  402. DISABLE_INTERRUPTS(CLBR_NONE)
  403. TRACE_IRQS_OFF
  404. testl $3,CS-ARGOFFSET(%rsp)
  405. je retint_restore_args
  406. movl $_TIF_ALLWORK_MASK,%edi
  407. /* edi: mask to check */
  408. int_with_check:
  409. LOCKDEP_SYS_EXIT_IRQ
  410. GET_THREAD_INFO(%rcx)
  411. movl threadinfo_flags(%rcx),%edx
  412. andl %edi,%edx
  413. jnz int_careful
  414. andl $~TS_COMPAT,threadinfo_status(%rcx)
  415. jmp retint_swapgs
  416. /* Either reschedule or signal or syscall exit tracking needed. */
  417. /* First do a reschedule test. */
  418. /* edx: work, edi: workmask */
  419. int_careful:
  420. bt $TIF_NEED_RESCHED,%edx
  421. jnc int_very_careful
  422. TRACE_IRQS_ON
  423. ENABLE_INTERRUPTS(CLBR_NONE)
  424. pushq %rdi
  425. CFI_ADJUST_CFA_OFFSET 8
  426. call schedule
  427. popq %rdi
  428. CFI_ADJUST_CFA_OFFSET -8
  429. DISABLE_INTERRUPTS(CLBR_NONE)
  430. TRACE_IRQS_OFF
  431. jmp int_with_check
  432. /* handle signals and tracing -- both require a full stack frame */
  433. int_very_careful:
  434. TRACE_IRQS_ON
  435. ENABLE_INTERRUPTS(CLBR_NONE)
  436. SAVE_REST
  437. /* Check for syscall exit trace */
  438. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  439. jz int_signal
  440. pushq %rdi
  441. CFI_ADJUST_CFA_OFFSET 8
  442. leaq 8(%rsp),%rdi # &ptregs -> arg1
  443. call syscall_trace_leave
  444. popq %rdi
  445. CFI_ADJUST_CFA_OFFSET -8
  446. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  447. jmp int_restore_rest
  448. int_signal:
  449. testl $_TIF_DO_NOTIFY_MASK,%edx
  450. jz 1f
  451. movq %rsp,%rdi # &ptregs -> arg1
  452. xorl %esi,%esi # oldset -> arg2
  453. call do_notify_resume
  454. 1: movl $_TIF_NEED_RESCHED,%edi
  455. int_restore_rest:
  456. RESTORE_REST
  457. DISABLE_INTERRUPTS(CLBR_NONE)
  458. TRACE_IRQS_OFF
  459. jmp int_with_check
  460. CFI_ENDPROC
  461. END(system_call)
  462. /*
  463. * Certain special system calls that need to save a complete full stack frame.
  464. */
  465. .macro PTREGSCALL label,func,arg
  466. .globl \label
  467. \label:
  468. leaq \func(%rip),%rax
  469. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  470. jmp ptregscall_common
  471. END(\label)
  472. .endm
  473. CFI_STARTPROC
  474. PTREGSCALL stub_clone, sys_clone, %r8
  475. PTREGSCALL stub_fork, sys_fork, %rdi
  476. PTREGSCALL stub_vfork, sys_vfork, %rdi
  477. PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
  478. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  479. PTREGSCALL stub_iopl, sys_iopl, %rsi
  480. ENTRY(ptregscall_common)
  481. popq %r11
  482. CFI_ADJUST_CFA_OFFSET -8
  483. CFI_REGISTER rip, r11
  484. SAVE_REST
  485. movq %r11, %r15
  486. CFI_REGISTER rip, r15
  487. FIXUP_TOP_OF_STACK %r11
  488. call *%rax
  489. RESTORE_TOP_OF_STACK %r11
  490. movq %r15, %r11
  491. CFI_REGISTER rip, r11
  492. RESTORE_REST
  493. pushq %r11
  494. CFI_ADJUST_CFA_OFFSET 8
  495. CFI_REL_OFFSET rip, 0
  496. ret
  497. CFI_ENDPROC
  498. END(ptregscall_common)
  499. ENTRY(stub_execve)
  500. CFI_STARTPROC
  501. popq %r11
  502. CFI_ADJUST_CFA_OFFSET -8
  503. CFI_REGISTER rip, r11
  504. SAVE_REST
  505. FIXUP_TOP_OF_STACK %r11
  506. movq %rsp, %rcx
  507. call sys_execve
  508. RESTORE_TOP_OF_STACK %r11
  509. movq %rax,RAX(%rsp)
  510. RESTORE_REST
  511. jmp int_ret_from_sys_call
  512. CFI_ENDPROC
  513. END(stub_execve)
  514. /*
  515. * sigreturn is special because it needs to restore all registers on return.
  516. * This cannot be done with SYSRET, so use the IRET return path instead.
  517. */
  518. ENTRY(stub_rt_sigreturn)
  519. CFI_STARTPROC
  520. addq $8, %rsp
  521. CFI_ADJUST_CFA_OFFSET -8
  522. SAVE_REST
  523. movq %rsp,%rdi
  524. FIXUP_TOP_OF_STACK %r11
  525. call sys_rt_sigreturn
  526. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  527. RESTORE_REST
  528. jmp int_ret_from_sys_call
  529. CFI_ENDPROC
  530. END(stub_rt_sigreturn)
  531. /*
  532. * initial frame state for interrupts and exceptions
  533. */
  534. .macro _frame ref
  535. CFI_STARTPROC simple
  536. CFI_SIGNAL_FRAME
  537. CFI_DEF_CFA rsp,SS+8-\ref
  538. /*CFI_REL_OFFSET ss,SS-\ref*/
  539. CFI_REL_OFFSET rsp,RSP-\ref
  540. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  541. /*CFI_REL_OFFSET cs,CS-\ref*/
  542. CFI_REL_OFFSET rip,RIP-\ref
  543. .endm
  544. /* initial frame state for interrupts (and exceptions without error code) */
  545. #define INTR_FRAME _frame RIP
  546. /* initial frame state for exceptions with error code (and interrupts with
  547. vector already pushed) */
  548. #define XCPT_FRAME _frame ORIG_RAX
  549. /*
  550. * Interrupt entry/exit.
  551. *
  552. * Interrupt entry points save only callee clobbered registers in fast path.
  553. *
  554. * Entry runs with interrupts off.
  555. */
  556. /* 0(%rsp): interrupt number */
  557. .macro interrupt func
  558. cld
  559. SAVE_ARGS
  560. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  561. pushq %rbp
  562. CFI_ADJUST_CFA_OFFSET 8
  563. CFI_REL_OFFSET rbp, 0
  564. movq %rsp,%rbp
  565. CFI_DEF_CFA_REGISTER rbp
  566. testl $3,CS(%rdi)
  567. je 1f
  568. SWAPGS
  569. /* irqcount is used to check if a CPU is already on an interrupt
  570. stack or not. While this is essentially redundant with preempt_count
  571. it is a little cheaper to use a separate counter in the PDA
  572. (short of moving irq_enter into assembly, which would be too
  573. much work) */
  574. 1: incl %gs:pda_irqcount
  575. cmoveq %gs:pda_irqstackptr,%rsp
  576. push %rbp # backlink for old unwinder
  577. /*
  578. * We entered an interrupt context - irqs are off:
  579. */
  580. TRACE_IRQS_OFF
  581. call \func
  582. .endm
  583. ENTRY(common_interrupt)
  584. XCPT_FRAME
  585. interrupt do_IRQ
  586. /* 0(%rsp): oldrsp-ARGOFFSET */
  587. ret_from_intr:
  588. DISABLE_INTERRUPTS(CLBR_NONE)
  589. TRACE_IRQS_OFF
  590. decl %gs:pda_irqcount
  591. leaveq
  592. CFI_DEF_CFA_REGISTER rsp
  593. CFI_ADJUST_CFA_OFFSET -8
  594. exit_intr:
  595. GET_THREAD_INFO(%rcx)
  596. testl $3,CS-ARGOFFSET(%rsp)
  597. je retint_kernel
  598. /* Interrupt came from user space */
  599. /*
  600. * Has a correct top of stack, but a partial stack frame
  601. * %rcx: thread info. Interrupts off.
  602. */
  603. retint_with_reschedule:
  604. movl $_TIF_WORK_MASK,%edi
  605. retint_check:
  606. LOCKDEP_SYS_EXIT_IRQ
  607. movl threadinfo_flags(%rcx),%edx
  608. andl %edi,%edx
  609. CFI_REMEMBER_STATE
  610. jnz retint_careful
  611. retint_swapgs: /* return to user-space */
  612. /*
  613. * The iretq could re-enable interrupts:
  614. */
  615. DISABLE_INTERRUPTS(CLBR_ANY)
  616. TRACE_IRQS_IRETQ
  617. SWAPGS
  618. jmp restore_args
  619. retint_restore_args: /* return to kernel space */
  620. DISABLE_INTERRUPTS(CLBR_ANY)
  621. /*
  622. * The iretq could re-enable interrupts:
  623. */
  624. TRACE_IRQS_IRETQ
  625. restore_args:
  626. RESTORE_ARGS 0,8,0
  627. irq_return:
  628. INTERRUPT_RETURN
  629. .section __ex_table, "a"
  630. .quad irq_return, bad_iret
  631. .previous
  632. #ifdef CONFIG_PARAVIRT
  633. ENTRY(native_iret)
  634. iretq
  635. .section __ex_table,"a"
  636. .quad native_iret, bad_iret
  637. .previous
  638. #endif
  639. .section .fixup,"ax"
  640. bad_iret:
  641. /*
  642. * The iret traps when the %cs or %ss being restored is bogus.
  643. * We've lost the original trap vector and error code.
  644. * #GPF is the most likely one to get for an invalid selector.
  645. * So pretend we completed the iret and took the #GPF in user mode.
  646. *
  647. * We are now running with the kernel GS after exception recovery.
  648. * But error_entry expects us to have user GS to match the user %cs,
  649. * so swap back.
  650. */
  651. pushq $0
  652. SWAPGS
  653. jmp general_protection
  654. .previous
  655. /* edi: workmask, edx: work */
  656. retint_careful:
  657. CFI_RESTORE_STATE
  658. bt $TIF_NEED_RESCHED,%edx
  659. jnc retint_signal
  660. TRACE_IRQS_ON
  661. ENABLE_INTERRUPTS(CLBR_NONE)
  662. pushq %rdi
  663. CFI_ADJUST_CFA_OFFSET 8
  664. call schedule
  665. popq %rdi
  666. CFI_ADJUST_CFA_OFFSET -8
  667. GET_THREAD_INFO(%rcx)
  668. DISABLE_INTERRUPTS(CLBR_NONE)
  669. TRACE_IRQS_OFF
  670. jmp retint_check
  671. retint_signal:
  672. testl $_TIF_DO_NOTIFY_MASK,%edx
  673. jz retint_swapgs
  674. TRACE_IRQS_ON
  675. ENABLE_INTERRUPTS(CLBR_NONE)
  676. SAVE_REST
  677. movq $-1,ORIG_RAX(%rsp)
  678. xorl %esi,%esi # oldset
  679. movq %rsp,%rdi # &pt_regs
  680. call do_notify_resume
  681. RESTORE_REST
  682. DISABLE_INTERRUPTS(CLBR_NONE)
  683. TRACE_IRQS_OFF
  684. movl $_TIF_NEED_RESCHED,%edi
  685. GET_THREAD_INFO(%rcx)
  686. jmp retint_check
  687. #ifdef CONFIG_PREEMPT
  688. /* Returning to kernel space. Check if we need preemption */
  689. /* rcx: threadinfo. interrupts off. */
  690. ENTRY(retint_kernel)
  691. cmpl $0,threadinfo_preempt_count(%rcx)
  692. jnz retint_restore_args
  693. bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
  694. jnc retint_restore_args
  695. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  696. jnc retint_restore_args
  697. call preempt_schedule_irq
  698. jmp exit_intr
  699. #endif
  700. CFI_ENDPROC
  701. END(common_interrupt)
  702. /*
  703. * APIC interrupts.
  704. */
  705. .macro apicinterrupt num,func
  706. INTR_FRAME
  707. pushq $~(\num)
  708. CFI_ADJUST_CFA_OFFSET 8
  709. interrupt \func
  710. jmp ret_from_intr
  711. CFI_ENDPROC
  712. .endm
  713. ENTRY(thermal_interrupt)
  714. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  715. END(thermal_interrupt)
  716. ENTRY(threshold_interrupt)
  717. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  718. END(threshold_interrupt)
  719. #ifdef CONFIG_SMP
  720. ENTRY(reschedule_interrupt)
  721. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  722. END(reschedule_interrupt)
  723. .macro INVALIDATE_ENTRY num
  724. ENTRY(invalidate_interrupt\num)
  725. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  726. END(invalidate_interrupt\num)
  727. .endm
  728. INVALIDATE_ENTRY 0
  729. INVALIDATE_ENTRY 1
  730. INVALIDATE_ENTRY 2
  731. INVALIDATE_ENTRY 3
  732. INVALIDATE_ENTRY 4
  733. INVALIDATE_ENTRY 5
  734. INVALIDATE_ENTRY 6
  735. INVALIDATE_ENTRY 7
  736. ENTRY(call_function_interrupt)
  737. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  738. END(call_function_interrupt)
  739. ENTRY(irq_move_cleanup_interrupt)
  740. apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
  741. END(irq_move_cleanup_interrupt)
  742. #endif
  743. ENTRY(apic_timer_interrupt)
  744. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  745. END(apic_timer_interrupt)
  746. ENTRY(error_interrupt)
  747. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  748. END(error_interrupt)
  749. ENTRY(spurious_interrupt)
  750. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  751. END(spurious_interrupt)
  752. /*
  753. * Exception entry points.
  754. */
  755. .macro zeroentry sym
  756. INTR_FRAME
  757. pushq $0 /* push error code/oldrax */
  758. CFI_ADJUST_CFA_OFFSET 8
  759. pushq %rax /* push real oldrax to the rdi slot */
  760. CFI_ADJUST_CFA_OFFSET 8
  761. CFI_REL_OFFSET rax,0
  762. leaq \sym(%rip),%rax
  763. jmp error_entry
  764. CFI_ENDPROC
  765. .endm
  766. .macro errorentry sym
  767. XCPT_FRAME
  768. pushq %rax
  769. CFI_ADJUST_CFA_OFFSET 8
  770. CFI_REL_OFFSET rax,0
  771. leaq \sym(%rip),%rax
  772. jmp error_entry
  773. CFI_ENDPROC
  774. .endm
  775. /* error code is on the stack already */
  776. /* handle NMI like exceptions that can happen everywhere */
  777. .macro paranoidentry sym, ist=0, irqtrace=1
  778. SAVE_ALL
  779. cld
  780. movl $1,%ebx
  781. movl $MSR_GS_BASE,%ecx
  782. rdmsr
  783. testl %edx,%edx
  784. js 1f
  785. SWAPGS
  786. xorl %ebx,%ebx
  787. 1:
  788. .if \ist
  789. movq %gs:pda_data_offset, %rbp
  790. .endif
  791. movq %rsp,%rdi
  792. movq ORIG_RAX(%rsp),%rsi
  793. movq $-1,ORIG_RAX(%rsp)
  794. .if \ist
  795. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  796. .endif
  797. call \sym
  798. .if \ist
  799. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  800. .endif
  801. DISABLE_INTERRUPTS(CLBR_NONE)
  802. .if \irqtrace
  803. TRACE_IRQS_OFF
  804. .endif
  805. .endm
  806. /*
  807. * "Paranoid" exit path from exception stack.
  808. * Paranoid because this is used by NMIs and cannot take
  809. * any kernel state for granted.
  810. * We don't do kernel preemption checks here, because only
  811. * NMI should be common and it does not enable IRQs and
  812. * cannot get reschedule ticks.
  813. *
  814. * "trace" is 0 for the NMI handler only, because irq-tracing
  815. * is fundamentally NMI-unsafe. (we cannot change the soft and
  816. * hard flags at once, atomically)
  817. */
  818. .macro paranoidexit trace=1
  819. /* ebx: no swapgs flag */
  820. paranoid_exit\trace:
  821. testl %ebx,%ebx /* swapgs needed? */
  822. jnz paranoid_restore\trace
  823. testl $3,CS(%rsp)
  824. jnz paranoid_userspace\trace
  825. paranoid_swapgs\trace:
  826. .if \trace
  827. TRACE_IRQS_IRETQ 0
  828. .endif
  829. SWAPGS_UNSAFE_STACK
  830. paranoid_restore\trace:
  831. RESTORE_ALL 8
  832. jmp irq_return
  833. paranoid_userspace\trace:
  834. GET_THREAD_INFO(%rcx)
  835. movl threadinfo_flags(%rcx),%ebx
  836. andl $_TIF_WORK_MASK,%ebx
  837. jz paranoid_swapgs\trace
  838. movq %rsp,%rdi /* &pt_regs */
  839. call sync_regs
  840. movq %rax,%rsp /* switch stack for scheduling */
  841. testl $_TIF_NEED_RESCHED,%ebx
  842. jnz paranoid_schedule\trace
  843. movl %ebx,%edx /* arg3: thread flags */
  844. .if \trace
  845. TRACE_IRQS_ON
  846. .endif
  847. ENABLE_INTERRUPTS(CLBR_NONE)
  848. xorl %esi,%esi /* arg2: oldset */
  849. movq %rsp,%rdi /* arg1: &pt_regs */
  850. call do_notify_resume
  851. DISABLE_INTERRUPTS(CLBR_NONE)
  852. .if \trace
  853. TRACE_IRQS_OFF
  854. .endif
  855. jmp paranoid_userspace\trace
  856. paranoid_schedule\trace:
  857. .if \trace
  858. TRACE_IRQS_ON
  859. .endif
  860. ENABLE_INTERRUPTS(CLBR_ANY)
  861. call schedule
  862. DISABLE_INTERRUPTS(CLBR_ANY)
  863. .if \trace
  864. TRACE_IRQS_OFF
  865. .endif
  866. jmp paranoid_userspace\trace
  867. CFI_ENDPROC
  868. .endm
  869. /*
  870. * Exception entry point. This expects an error code/orig_rax on the stack
  871. * and the exception handler in %rax.
  872. */
  873. KPROBE_ENTRY(error_entry)
  874. _frame RDI
  875. CFI_REL_OFFSET rax,0
  876. /* rdi slot contains rax, oldrax contains error code */
  877. cld
  878. subq $14*8,%rsp
  879. CFI_ADJUST_CFA_OFFSET (14*8)
  880. movq %rsi,13*8(%rsp)
  881. CFI_REL_OFFSET rsi,RSI
  882. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  883. CFI_REGISTER rax,rsi
  884. movq %rdx,12*8(%rsp)
  885. CFI_REL_OFFSET rdx,RDX
  886. movq %rcx,11*8(%rsp)
  887. CFI_REL_OFFSET rcx,RCX
  888. movq %rsi,10*8(%rsp) /* store rax */
  889. CFI_REL_OFFSET rax,RAX
  890. movq %r8, 9*8(%rsp)
  891. CFI_REL_OFFSET r8,R8
  892. movq %r9, 8*8(%rsp)
  893. CFI_REL_OFFSET r9,R9
  894. movq %r10,7*8(%rsp)
  895. CFI_REL_OFFSET r10,R10
  896. movq %r11,6*8(%rsp)
  897. CFI_REL_OFFSET r11,R11
  898. movq %rbx,5*8(%rsp)
  899. CFI_REL_OFFSET rbx,RBX
  900. movq %rbp,4*8(%rsp)
  901. CFI_REL_OFFSET rbp,RBP
  902. movq %r12,3*8(%rsp)
  903. CFI_REL_OFFSET r12,R12
  904. movq %r13,2*8(%rsp)
  905. CFI_REL_OFFSET r13,R13
  906. movq %r14,1*8(%rsp)
  907. CFI_REL_OFFSET r14,R14
  908. movq %r15,(%rsp)
  909. CFI_REL_OFFSET r15,R15
  910. xorl %ebx,%ebx
  911. testl $3,CS(%rsp)
  912. je error_kernelspace
  913. error_swapgs:
  914. SWAPGS
  915. error_sti:
  916. movq %rdi,RDI(%rsp)
  917. CFI_REL_OFFSET rdi,RDI
  918. movq %rsp,%rdi
  919. movq ORIG_RAX(%rsp),%rsi /* get error code */
  920. movq $-1,ORIG_RAX(%rsp)
  921. call *%rax
  922. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  923. error_exit:
  924. movl %ebx,%eax
  925. RESTORE_REST
  926. DISABLE_INTERRUPTS(CLBR_NONE)
  927. TRACE_IRQS_OFF
  928. GET_THREAD_INFO(%rcx)
  929. testl %eax,%eax
  930. jne retint_kernel
  931. LOCKDEP_SYS_EXIT_IRQ
  932. movl threadinfo_flags(%rcx),%edx
  933. movl $_TIF_WORK_MASK,%edi
  934. andl %edi,%edx
  935. jnz retint_careful
  936. jmp retint_swapgs
  937. CFI_ENDPROC
  938. error_kernelspace:
  939. incl %ebx
  940. /* There are two places in the kernel that can potentially fault with
  941. usergs. Handle them here. The exception handlers after
  942. iret run with kernel gs again, so don't set the user space flag.
  943. B stepping K8s sometimes report an truncated RIP for IRET
  944. exceptions returning to compat mode. Check for these here too. */
  945. leaq irq_return(%rip),%rbp
  946. cmpq %rbp,RIP(%rsp)
  947. je error_swapgs
  948. movl %ebp,%ebp /* zero extend */
  949. cmpq %rbp,RIP(%rsp)
  950. je error_swapgs
  951. cmpq $gs_change,RIP(%rsp)
  952. je error_swapgs
  953. jmp error_sti
  954. KPROBE_END(error_entry)
  955. /* Reload gs selector with exception handling */
  956. /* edi: new selector */
  957. ENTRY(load_gs_index)
  958. CFI_STARTPROC
  959. pushf
  960. CFI_ADJUST_CFA_OFFSET 8
  961. DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
  962. SWAPGS
  963. gs_change:
  964. movl %edi,%gs
  965. 2: mfence /* workaround */
  966. SWAPGS
  967. popf
  968. CFI_ADJUST_CFA_OFFSET -8
  969. ret
  970. CFI_ENDPROC
  971. ENDPROC(load_gs_index)
  972. .section __ex_table,"a"
  973. .align 8
  974. .quad gs_change,bad_gs
  975. .previous
  976. .section .fixup,"ax"
  977. /* running with kernelgs */
  978. bad_gs:
  979. SWAPGS /* switch back to user gs */
  980. xorl %eax,%eax
  981. movl %eax,%gs
  982. jmp 2b
  983. .previous
  984. /*
  985. * Create a kernel thread.
  986. *
  987. * C extern interface:
  988. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  989. *
  990. * asm input arguments:
  991. * rdi: fn, rsi: arg, rdx: flags
  992. */
  993. ENTRY(kernel_thread)
  994. CFI_STARTPROC
  995. FAKE_STACK_FRAME $child_rip
  996. SAVE_ALL
  997. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  998. movq %rdx,%rdi
  999. orq kernel_thread_flags(%rip),%rdi
  1000. movq $-1, %rsi
  1001. movq %rsp, %rdx
  1002. xorl %r8d,%r8d
  1003. xorl %r9d,%r9d
  1004. # clone now
  1005. call do_fork
  1006. movq %rax,RAX(%rsp)
  1007. xorl %edi,%edi
  1008. /*
  1009. * It isn't worth to check for reschedule here,
  1010. * so internally to the x86_64 port you can rely on kernel_thread()
  1011. * not to reschedule the child before returning, this avoids the need
  1012. * of hacks for example to fork off the per-CPU idle tasks.
  1013. * [Hopefully no generic code relies on the reschedule -AK]
  1014. */
  1015. RESTORE_ALL
  1016. UNFAKE_STACK_FRAME
  1017. ret
  1018. CFI_ENDPROC
  1019. ENDPROC(kernel_thread)
  1020. child_rip:
  1021. pushq $0 # fake return address
  1022. CFI_STARTPROC
  1023. /*
  1024. * Here we are in the child and the registers are set as they were
  1025. * at kernel_thread() invocation in the parent.
  1026. */
  1027. movq %rdi, %rax
  1028. movq %rsi, %rdi
  1029. call *%rax
  1030. # exit
  1031. mov %eax, %edi
  1032. call do_exit
  1033. CFI_ENDPROC
  1034. ENDPROC(child_rip)
  1035. /*
  1036. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  1037. *
  1038. * C extern interface:
  1039. * extern long execve(char *name, char **argv, char **envp)
  1040. *
  1041. * asm input arguments:
  1042. * rdi: name, rsi: argv, rdx: envp
  1043. *
  1044. * We want to fallback into:
  1045. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs *regs)
  1046. *
  1047. * do_sys_execve asm fallback arguments:
  1048. * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
  1049. */
  1050. ENTRY(kernel_execve)
  1051. CFI_STARTPROC
  1052. FAKE_STACK_FRAME $0
  1053. SAVE_ALL
  1054. movq %rsp,%rcx
  1055. call sys_execve
  1056. movq %rax, RAX(%rsp)
  1057. RESTORE_REST
  1058. testq %rax,%rax
  1059. je int_ret_from_sys_call
  1060. RESTORE_ARGS
  1061. UNFAKE_STACK_FRAME
  1062. ret
  1063. CFI_ENDPROC
  1064. ENDPROC(kernel_execve)
  1065. KPROBE_ENTRY(page_fault)
  1066. errorentry do_page_fault
  1067. KPROBE_END(page_fault)
  1068. ENTRY(coprocessor_error)
  1069. zeroentry do_coprocessor_error
  1070. END(coprocessor_error)
  1071. ENTRY(simd_coprocessor_error)
  1072. zeroentry do_simd_coprocessor_error
  1073. END(simd_coprocessor_error)
  1074. ENTRY(device_not_available)
  1075. zeroentry math_state_restore
  1076. END(device_not_available)
  1077. /* runs on exception stack */
  1078. KPROBE_ENTRY(debug)
  1079. INTR_FRAME
  1080. pushq $0
  1081. CFI_ADJUST_CFA_OFFSET 8
  1082. paranoidentry do_debug, DEBUG_STACK
  1083. paranoidexit
  1084. KPROBE_END(debug)
  1085. /* runs on exception stack */
  1086. KPROBE_ENTRY(nmi)
  1087. INTR_FRAME
  1088. pushq $-1
  1089. CFI_ADJUST_CFA_OFFSET 8
  1090. paranoidentry do_nmi, 0, 0
  1091. #ifdef CONFIG_TRACE_IRQFLAGS
  1092. paranoidexit 0
  1093. #else
  1094. jmp paranoid_exit1
  1095. CFI_ENDPROC
  1096. #endif
  1097. KPROBE_END(nmi)
  1098. KPROBE_ENTRY(int3)
  1099. INTR_FRAME
  1100. pushq $0
  1101. CFI_ADJUST_CFA_OFFSET 8
  1102. paranoidentry do_int3, DEBUG_STACK
  1103. jmp paranoid_exit1
  1104. CFI_ENDPROC
  1105. KPROBE_END(int3)
  1106. ENTRY(overflow)
  1107. zeroentry do_overflow
  1108. END(overflow)
  1109. ENTRY(bounds)
  1110. zeroentry do_bounds
  1111. END(bounds)
  1112. ENTRY(invalid_op)
  1113. zeroentry do_invalid_op
  1114. END(invalid_op)
  1115. ENTRY(coprocessor_segment_overrun)
  1116. zeroentry do_coprocessor_segment_overrun
  1117. END(coprocessor_segment_overrun)
  1118. ENTRY(reserved)
  1119. zeroentry do_reserved
  1120. END(reserved)
  1121. /* runs on exception stack */
  1122. ENTRY(double_fault)
  1123. XCPT_FRAME
  1124. paranoidentry do_double_fault
  1125. jmp paranoid_exit1
  1126. CFI_ENDPROC
  1127. END(double_fault)
  1128. ENTRY(invalid_TSS)
  1129. errorentry do_invalid_TSS
  1130. END(invalid_TSS)
  1131. ENTRY(segment_not_present)
  1132. errorentry do_segment_not_present
  1133. END(segment_not_present)
  1134. /* runs on exception stack */
  1135. ENTRY(stack_segment)
  1136. XCPT_FRAME
  1137. paranoidentry do_stack_segment
  1138. jmp paranoid_exit1
  1139. CFI_ENDPROC
  1140. END(stack_segment)
  1141. KPROBE_ENTRY(general_protection)
  1142. errorentry do_general_protection
  1143. KPROBE_END(general_protection)
  1144. ENTRY(alignment_check)
  1145. errorentry do_alignment_check
  1146. END(alignment_check)
  1147. ENTRY(divide_error)
  1148. zeroentry do_divide_error
  1149. END(divide_error)
  1150. ENTRY(spurious_interrupt_bug)
  1151. zeroentry do_spurious_interrupt_bug
  1152. END(spurious_interrupt_bug)
  1153. #ifdef CONFIG_X86_MCE
  1154. /* runs on exception stack */
  1155. ENTRY(machine_check)
  1156. INTR_FRAME
  1157. pushq $0
  1158. CFI_ADJUST_CFA_OFFSET 8
  1159. paranoidentry do_machine_check
  1160. jmp paranoid_exit1
  1161. CFI_ENDPROC
  1162. END(machine_check)
  1163. #endif
  1164. /* Call softirq on interrupt stack. Interrupts are off. */
  1165. ENTRY(call_softirq)
  1166. CFI_STARTPROC
  1167. push %rbp
  1168. CFI_ADJUST_CFA_OFFSET 8
  1169. CFI_REL_OFFSET rbp,0
  1170. mov %rsp,%rbp
  1171. CFI_DEF_CFA_REGISTER rbp
  1172. incl %gs:pda_irqcount
  1173. cmove %gs:pda_irqstackptr,%rsp
  1174. push %rbp # backlink for old unwinder
  1175. call __do_softirq
  1176. leaveq
  1177. CFI_DEF_CFA_REGISTER rsp
  1178. CFI_ADJUST_CFA_OFFSET -8
  1179. decl %gs:pda_irqcount
  1180. ret
  1181. CFI_ENDPROC
  1182. ENDPROC(call_softirq)
  1183. KPROBE_ENTRY(ignore_sysret)
  1184. CFI_STARTPROC
  1185. mov $-ENOSYS,%eax
  1186. sysret
  1187. CFI_ENDPROC
  1188. ENDPROC(ignore_sysret)