entry.S 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. /*
  2. * linux/arch/x86_64/entry.S
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
  6. * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
  7. *
  8. * $Id$
  9. */
  10. /*
  11. * entry.S contains the system-call and fault low-level handling routines.
  12. *
  13. * NOTE: This code handles signal-recognition, which happens every time
  14. * after an interrupt and after each system call.
  15. *
  16. * Normal syscalls and interrupts don't save a full stack frame, this is
  17. * only done for syscall tracing, signals or fork/exec et.al.
  18. *
  19. * A note on terminology:
  20. * - top of stack: Architecture defined interrupt frame from SS to RIP
  21. * at the top of the kernel process stack.
  22. * - partial stack frame: partially saved registers upto R11.
  23. * - full stack frame: Like partial stack frame, but all register saved.
  24. *
  25. * Some macro usage:
  26. * - CFI macros are used to generate dwarf2 unwind information for better
  27. * backtraces. They don't change any code.
  28. * - SAVE_ALL/RESTORE_ALL - Save/restore all registers
  29. * - SAVE_ARGS/RESTORE_ARGS - Save/restore registers that C functions modify.
  30. * There are unfortunately lots of special cases where some registers
  31. * not touched. The macro is a big mess that should be cleaned up.
  32. * - SAVE_REST/RESTORE_REST - Handle the registers not saved by SAVE_ARGS.
  33. * Gives a full stack frame.
  34. * - ENTRY/END Define functions in the symbol table.
  35. * - FIXUP_TOP_OF_STACK/RESTORE_TOP_OF_STACK - Fix up the hardware stack
  36. * frame that is otherwise undefined after a SYSCALL
  37. * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
  38. * - errorentry/paranoidentry/zeroentry - Define exception entry points.
  39. */
  40. #include <linux/linkage.h>
  41. #include <asm/segment.h>
  42. #include <asm/cache.h>
  43. #include <asm/errno.h>
  44. #include <asm/dwarf2.h>
  45. #include <asm/calling.h>
  46. #include <asm/asm-offsets.h>
  47. #include <asm/msr.h>
  48. #include <asm/unistd.h>
  49. #include <asm/thread_info.h>
  50. #include <asm/hw_irq.h>
  51. #include <asm/page.h>
  52. #include <asm/irqflags.h>
  53. .code64
  54. #ifndef CONFIG_PREEMPT
  55. #define retint_kernel retint_restore_args
  56. #endif
  57. .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
  58. #ifdef CONFIG_TRACE_IRQFLAGS
  59. bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
  60. jnc 1f
  61. TRACE_IRQS_ON
  62. 1:
  63. #endif
  64. .endm
  65. /*
  66. * C code is not supposed to know about undefined top of stack. Every time
  67. * a C function with an pt_regs argument is called from the SYSCALL based
  68. * fast path FIXUP_TOP_OF_STACK is needed.
  69. * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  70. * manipulation.
  71. */
  72. /* %rsp:at FRAMEEND */
  73. .macro FIXUP_TOP_OF_STACK tmp
  74. movq %gs:pda_oldrsp,\tmp
  75. movq \tmp,RSP(%rsp)
  76. movq $__USER_DS,SS(%rsp)
  77. movq $__USER_CS,CS(%rsp)
  78. movq $-1,RCX(%rsp)
  79. movq R11(%rsp),\tmp /* get eflags */
  80. movq \tmp,EFLAGS(%rsp)
  81. .endm
  82. .macro RESTORE_TOP_OF_STACK tmp,offset=0
  83. movq RSP-\offset(%rsp),\tmp
  84. movq \tmp,%gs:pda_oldrsp
  85. movq EFLAGS-\offset(%rsp),\tmp
  86. movq \tmp,R11-\offset(%rsp)
  87. .endm
  88. .macro FAKE_STACK_FRAME child_rip
  89. /* push in order ss, rsp, eflags, cs, rip */
  90. xorl %eax, %eax
  91. pushq %rax /* ss */
  92. CFI_ADJUST_CFA_OFFSET 8
  93. /*CFI_REL_OFFSET ss,0*/
  94. pushq %rax /* rsp */
  95. CFI_ADJUST_CFA_OFFSET 8
  96. CFI_REL_OFFSET rsp,0
  97. pushq $(1<<9) /* eflags - interrupts on */
  98. CFI_ADJUST_CFA_OFFSET 8
  99. /*CFI_REL_OFFSET rflags,0*/
  100. pushq $__KERNEL_CS /* cs */
  101. CFI_ADJUST_CFA_OFFSET 8
  102. /*CFI_REL_OFFSET cs,0*/
  103. pushq \child_rip /* rip */
  104. CFI_ADJUST_CFA_OFFSET 8
  105. CFI_REL_OFFSET rip,0
  106. pushq %rax /* orig rax */
  107. CFI_ADJUST_CFA_OFFSET 8
  108. .endm
  109. .macro UNFAKE_STACK_FRAME
  110. addq $8*6, %rsp
  111. CFI_ADJUST_CFA_OFFSET -(6*8)
  112. .endm
  113. .macro CFI_DEFAULT_STACK start=1
  114. .if \start
  115. CFI_STARTPROC simple
  116. CFI_DEF_CFA rsp,SS+8
  117. .else
  118. CFI_DEF_CFA_OFFSET SS+8
  119. .endif
  120. CFI_REL_OFFSET r15,R15
  121. CFI_REL_OFFSET r14,R14
  122. CFI_REL_OFFSET r13,R13
  123. CFI_REL_OFFSET r12,R12
  124. CFI_REL_OFFSET rbp,RBP
  125. CFI_REL_OFFSET rbx,RBX
  126. CFI_REL_OFFSET r11,R11
  127. CFI_REL_OFFSET r10,R10
  128. CFI_REL_OFFSET r9,R9
  129. CFI_REL_OFFSET r8,R8
  130. CFI_REL_OFFSET rax,RAX
  131. CFI_REL_OFFSET rcx,RCX
  132. CFI_REL_OFFSET rdx,RDX
  133. CFI_REL_OFFSET rsi,RSI
  134. CFI_REL_OFFSET rdi,RDI
  135. CFI_REL_OFFSET rip,RIP
  136. /*CFI_REL_OFFSET cs,CS*/
  137. /*CFI_REL_OFFSET rflags,EFLAGS*/
  138. CFI_REL_OFFSET rsp,RSP
  139. /*CFI_REL_OFFSET ss,SS*/
  140. .endm
  141. /*
  142. * A newly forked process directly context switches into this.
  143. */
  144. /* rdi: prev */
  145. ENTRY(ret_from_fork)
  146. CFI_DEFAULT_STACK
  147. call schedule_tail
  148. GET_THREAD_INFO(%rcx)
  149. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
  150. jnz rff_trace
  151. rff_action:
  152. RESTORE_REST
  153. testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
  154. je int_ret_from_sys_call
  155. testl $_TIF_IA32,threadinfo_flags(%rcx)
  156. jnz int_ret_from_sys_call
  157. RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
  158. jmp ret_from_sys_call
  159. rff_trace:
  160. movq %rsp,%rdi
  161. call syscall_trace_leave
  162. GET_THREAD_INFO(%rcx)
  163. jmp rff_action
  164. CFI_ENDPROC
  165. END(ret_from_fork)
  166. /*
  167. * System call entry. Upto 6 arguments in registers are supported.
  168. *
  169. * SYSCALL does not save anything on the stack and does not change the
  170. * stack pointer.
  171. */
  172. /*
  173. * Register setup:
  174. * rax system call number
  175. * rdi arg0
  176. * rcx return address for syscall/sysret, C arg3
  177. * rsi arg1
  178. * rdx arg2
  179. * r10 arg3 (--> moved to rcx for C)
  180. * r8 arg4
  181. * r9 arg5
  182. * r11 eflags for syscall/sysret, temporary for C
  183. * r12-r15,rbp,rbx saved by C code, not touched.
  184. *
  185. * Interrupts are off on entry.
  186. * Only called from user space.
  187. *
  188. * XXX if we had a free scratch register we could save the RSP into the stack frame
  189. * and report it properly in ps. Unfortunately we haven't.
  190. *
  191. * When user can change the frames always force IRET. That is because
  192. * it deals with uncanonical addresses better. SYSRET has trouble
  193. * with them due to bugs in both AMD and Intel CPUs.
  194. */
  195. ENTRY(system_call)
  196. CFI_STARTPROC simple
  197. CFI_DEF_CFA rsp,PDA_STACKOFFSET
  198. CFI_REGISTER rip,rcx
  199. /*CFI_REGISTER rflags,r11*/
  200. swapgs
  201. movq %rsp,%gs:pda_oldrsp
  202. movq %gs:pda_kernelstack,%rsp
  203. /*
  204. * No need to follow this irqs off/on section - it's straight
  205. * and short:
  206. */
  207. sti
  208. SAVE_ARGS 8,1
  209. movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
  210. movq %rcx,RIP-ARGOFFSET(%rsp)
  211. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  212. GET_THREAD_INFO(%rcx)
  213. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
  214. CFI_REMEMBER_STATE
  215. jnz tracesys
  216. cmpq $__NR_syscall_max,%rax
  217. ja badsys
  218. movq %r10,%rcx
  219. call *sys_call_table(,%rax,8) # XXX: rip relative
  220. movq %rax,RAX-ARGOFFSET(%rsp)
  221. /*
  222. * Syscall return path ending with SYSRET (fast path)
  223. * Has incomplete stack frame and undefined top of stack.
  224. */
  225. .globl ret_from_sys_call
  226. ret_from_sys_call:
  227. movl $_TIF_ALLWORK_MASK,%edi
  228. /* edi: flagmask */
  229. sysret_check:
  230. GET_THREAD_INFO(%rcx)
  231. cli
  232. TRACE_IRQS_OFF
  233. movl threadinfo_flags(%rcx),%edx
  234. andl %edi,%edx
  235. CFI_REMEMBER_STATE
  236. jnz sysret_careful
  237. /*
  238. * sysretq will re-enable interrupts:
  239. */
  240. TRACE_IRQS_ON
  241. movq RIP-ARGOFFSET(%rsp),%rcx
  242. CFI_REGISTER rip,rcx
  243. RESTORE_ARGS 0,-ARG_SKIP,1
  244. /*CFI_REGISTER rflags,r11*/
  245. movq %gs:pda_oldrsp,%rsp
  246. swapgs
  247. sysretq
  248. /* Handle reschedules */
  249. /* edx: work, edi: workmask */
  250. sysret_careful:
  251. CFI_RESTORE_STATE
  252. bt $TIF_NEED_RESCHED,%edx
  253. jnc sysret_signal
  254. TRACE_IRQS_ON
  255. sti
  256. pushq %rdi
  257. CFI_ADJUST_CFA_OFFSET 8
  258. call schedule
  259. popq %rdi
  260. CFI_ADJUST_CFA_OFFSET -8
  261. jmp sysret_check
  262. /* Handle a signal */
  263. sysret_signal:
  264. TRACE_IRQS_ON
  265. sti
  266. testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
  267. jz 1f
  268. /* Really a signal */
  269. /* edx: work flags (arg3) */
  270. leaq do_notify_resume(%rip),%rax
  271. leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
  272. xorl %esi,%esi # oldset -> arg2
  273. call ptregscall_common
  274. 1: movl $_TIF_NEED_RESCHED,%edi
  275. /* Use IRET because user could have changed frame. This
  276. works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
  277. cli
  278. TRACE_IRQS_OFF
  279. jmp int_with_check
  280. badsys:
  281. movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
  282. jmp ret_from_sys_call
  283. /* Do syscall tracing */
  284. tracesys:
  285. CFI_RESTORE_STATE
  286. SAVE_REST
  287. movq $-ENOSYS,RAX(%rsp)
  288. FIXUP_TOP_OF_STACK %rdi
  289. movq %rsp,%rdi
  290. call syscall_trace_enter
  291. LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
  292. RESTORE_REST
  293. cmpq $__NR_syscall_max,%rax
  294. ja 1f
  295. movq %r10,%rcx /* fixup for C */
  296. call *sys_call_table(,%rax,8)
  297. 1: movq %rax,RAX-ARGOFFSET(%rsp)
  298. /* Use IRET because user could have changed frame */
  299. jmp int_ret_from_sys_call
  300. CFI_ENDPROC
  301. END(system_call)
  302. /*
  303. * Syscall return path ending with IRET.
  304. * Has correct top of stack, but partial stack frame.
  305. */
  306. ENTRY(int_ret_from_sys_call)
  307. CFI_STARTPROC simple
  308. CFI_DEF_CFA rsp,SS+8-ARGOFFSET
  309. /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
  310. CFI_REL_OFFSET rsp,RSP-ARGOFFSET
  311. /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
  312. /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
  313. CFI_REL_OFFSET rip,RIP-ARGOFFSET
  314. CFI_REL_OFFSET rdx,RDX-ARGOFFSET
  315. CFI_REL_OFFSET rcx,RCX-ARGOFFSET
  316. CFI_REL_OFFSET rax,RAX-ARGOFFSET
  317. CFI_REL_OFFSET rdi,RDI-ARGOFFSET
  318. CFI_REL_OFFSET rsi,RSI-ARGOFFSET
  319. CFI_REL_OFFSET r8,R8-ARGOFFSET
  320. CFI_REL_OFFSET r9,R9-ARGOFFSET
  321. CFI_REL_OFFSET r10,R10-ARGOFFSET
  322. CFI_REL_OFFSET r11,R11-ARGOFFSET
  323. cli
  324. TRACE_IRQS_OFF
  325. testl $3,CS-ARGOFFSET(%rsp)
  326. je retint_restore_args
  327. movl $_TIF_ALLWORK_MASK,%edi
  328. /* edi: mask to check */
  329. int_with_check:
  330. GET_THREAD_INFO(%rcx)
  331. movl threadinfo_flags(%rcx),%edx
  332. andl %edi,%edx
  333. jnz int_careful
  334. andl $~TS_COMPAT,threadinfo_status(%rcx)
  335. jmp retint_swapgs
  336. /* Either reschedule or signal or syscall exit tracking needed. */
  337. /* First do a reschedule test. */
  338. /* edx: work, edi: workmask */
  339. int_careful:
  340. bt $TIF_NEED_RESCHED,%edx
  341. jnc int_very_careful
  342. TRACE_IRQS_ON
  343. sti
  344. pushq %rdi
  345. CFI_ADJUST_CFA_OFFSET 8
  346. call schedule
  347. popq %rdi
  348. CFI_ADJUST_CFA_OFFSET -8
  349. cli
  350. TRACE_IRQS_OFF
  351. jmp int_with_check
  352. /* handle signals and tracing -- both require a full stack frame */
  353. int_very_careful:
  354. TRACE_IRQS_ON
  355. sti
  356. SAVE_REST
  357. /* Check for syscall exit trace */
  358. testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
  359. jz int_signal
  360. pushq %rdi
  361. CFI_ADJUST_CFA_OFFSET 8
  362. leaq 8(%rsp),%rdi # &ptregs -> arg1
  363. call syscall_trace_leave
  364. popq %rdi
  365. CFI_ADJUST_CFA_OFFSET -8
  366. andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
  367. cli
  368. TRACE_IRQS_OFF
  369. jmp int_restore_rest
  370. int_signal:
  371. testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
  372. jz 1f
  373. movq %rsp,%rdi # &ptregs -> arg1
  374. xorl %esi,%esi # oldset -> arg2
  375. call do_notify_resume
  376. 1: movl $_TIF_NEED_RESCHED,%edi
  377. int_restore_rest:
  378. RESTORE_REST
  379. cli
  380. TRACE_IRQS_OFF
  381. jmp int_with_check
  382. CFI_ENDPROC
  383. END(int_ret_from_sys_call)
  384. /*
  385. * Certain special system calls that need to save a complete full stack frame.
  386. */
  387. .macro PTREGSCALL label,func,arg
  388. .globl \label
  389. \label:
  390. leaq \func(%rip),%rax
  391. leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
  392. jmp ptregscall_common
  393. END(\label)
  394. .endm
  395. CFI_STARTPROC
  396. PTREGSCALL stub_clone, sys_clone, %r8
  397. PTREGSCALL stub_fork, sys_fork, %rdi
  398. PTREGSCALL stub_vfork, sys_vfork, %rdi
  399. PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
  400. PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
  401. PTREGSCALL stub_iopl, sys_iopl, %rsi
  402. ENTRY(ptregscall_common)
  403. popq %r11
  404. CFI_ADJUST_CFA_OFFSET -8
  405. CFI_REGISTER rip, r11
  406. SAVE_REST
  407. movq %r11, %r15
  408. CFI_REGISTER rip, r15
  409. FIXUP_TOP_OF_STACK %r11
  410. call *%rax
  411. RESTORE_TOP_OF_STACK %r11
  412. movq %r15, %r11
  413. CFI_REGISTER rip, r11
  414. RESTORE_REST
  415. pushq %r11
  416. CFI_ADJUST_CFA_OFFSET 8
  417. CFI_REL_OFFSET rip, 0
  418. ret
  419. CFI_ENDPROC
  420. END(ptregscall_common)
  421. ENTRY(stub_execve)
  422. CFI_STARTPROC
  423. popq %r11
  424. CFI_ADJUST_CFA_OFFSET -8
  425. CFI_REGISTER rip, r11
  426. SAVE_REST
  427. FIXUP_TOP_OF_STACK %r11
  428. call sys_execve
  429. RESTORE_TOP_OF_STACK %r11
  430. movq %rax,RAX(%rsp)
  431. RESTORE_REST
  432. jmp int_ret_from_sys_call
  433. CFI_ENDPROC
  434. END(stub_execve)
  435. /*
  436. * sigreturn is special because it needs to restore all registers on return.
  437. * This cannot be done with SYSRET, so use the IRET return path instead.
  438. */
  439. ENTRY(stub_rt_sigreturn)
  440. CFI_STARTPROC
  441. addq $8, %rsp
  442. CFI_ADJUST_CFA_OFFSET -8
  443. SAVE_REST
  444. movq %rsp,%rdi
  445. FIXUP_TOP_OF_STACK %r11
  446. call sys_rt_sigreturn
  447. movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
  448. RESTORE_REST
  449. jmp int_ret_from_sys_call
  450. CFI_ENDPROC
  451. END(stub_rt_sigreturn)
  452. /*
  453. * initial frame state for interrupts and exceptions
  454. */
  455. .macro _frame ref
  456. CFI_STARTPROC simple
  457. CFI_DEF_CFA rsp,SS+8-\ref
  458. /*CFI_REL_OFFSET ss,SS-\ref*/
  459. CFI_REL_OFFSET rsp,RSP-\ref
  460. /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
  461. /*CFI_REL_OFFSET cs,CS-\ref*/
  462. CFI_REL_OFFSET rip,RIP-\ref
  463. .endm
  464. /* initial frame state for interrupts (and exceptions without error code) */
  465. #define INTR_FRAME _frame RIP
  466. /* initial frame state for exceptions with error code (and interrupts with
  467. vector already pushed) */
  468. #define XCPT_FRAME _frame ORIG_RAX
  469. /*
  470. * Interrupt entry/exit.
  471. *
  472. * Interrupt entry points save only callee clobbered registers in fast path.
  473. *
  474. * Entry runs with interrupts off.
  475. */
  476. /* 0(%rsp): interrupt number */
  477. .macro interrupt func
  478. cld
  479. SAVE_ARGS
  480. leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler
  481. pushq %rbp
  482. CFI_ADJUST_CFA_OFFSET 8
  483. CFI_REL_OFFSET rbp, 0
  484. movq %rsp,%rbp
  485. CFI_DEF_CFA_REGISTER rbp
  486. testl $3,CS(%rdi)
  487. je 1f
  488. swapgs
  489. 1: incl %gs:pda_irqcount # RED-PEN should check preempt count
  490. cmoveq %gs:pda_irqstackptr,%rsp
  491. push %rbp # backlink for old unwinder
  492. /*
  493. * We entered an interrupt context - irqs are off:
  494. */
  495. TRACE_IRQS_OFF
  496. call \func
  497. .endm
  498. ENTRY(common_interrupt)
  499. XCPT_FRAME
  500. interrupt do_IRQ
  501. /* 0(%rsp): oldrsp-ARGOFFSET */
  502. ret_from_intr:
  503. cli
  504. TRACE_IRQS_OFF
  505. decl %gs:pda_irqcount
  506. leaveq
  507. CFI_DEF_CFA_REGISTER rsp
  508. CFI_ADJUST_CFA_OFFSET -8
  509. exit_intr:
  510. GET_THREAD_INFO(%rcx)
  511. testl $3,CS-ARGOFFSET(%rsp)
  512. je retint_kernel
  513. /* Interrupt came from user space */
  514. /*
  515. * Has a correct top of stack, but a partial stack frame
  516. * %rcx: thread info. Interrupts off.
  517. */
  518. retint_with_reschedule:
  519. movl $_TIF_WORK_MASK,%edi
  520. retint_check:
  521. movl threadinfo_flags(%rcx),%edx
  522. andl %edi,%edx
  523. CFI_REMEMBER_STATE
  524. jnz retint_careful
  525. retint_swapgs:
  526. /*
  527. * The iretq could re-enable interrupts:
  528. */
  529. cli
  530. TRACE_IRQS_IRETQ
  531. swapgs
  532. jmp restore_args
  533. retint_restore_args:
  534. cli
  535. /*
  536. * The iretq could re-enable interrupts:
  537. */
  538. TRACE_IRQS_IRETQ
  539. restore_args:
  540. RESTORE_ARGS 0,8,0
  541. iret_label:
  542. iretq
  543. .section __ex_table,"a"
  544. .quad iret_label,bad_iret
  545. .previous
  546. .section .fixup,"ax"
  547. /* force a signal here? this matches i386 behaviour */
  548. /* running with kernel gs */
  549. bad_iret:
  550. movq $11,%rdi /* SIGSEGV */
  551. TRACE_IRQS_ON
  552. sti
  553. jmp do_exit
  554. .previous
  555. /* edi: workmask, edx: work */
  556. retint_careful:
  557. CFI_RESTORE_STATE
  558. bt $TIF_NEED_RESCHED,%edx
  559. jnc retint_signal
  560. TRACE_IRQS_ON
  561. sti
  562. pushq %rdi
  563. CFI_ADJUST_CFA_OFFSET 8
  564. call schedule
  565. popq %rdi
  566. CFI_ADJUST_CFA_OFFSET -8
  567. GET_THREAD_INFO(%rcx)
  568. cli
  569. TRACE_IRQS_OFF
  570. jmp retint_check
  571. retint_signal:
  572. testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
  573. jz retint_swapgs
  574. TRACE_IRQS_ON
  575. sti
  576. SAVE_REST
  577. movq $-1,ORIG_RAX(%rsp)
  578. xorl %esi,%esi # oldset
  579. movq %rsp,%rdi # &pt_regs
  580. call do_notify_resume
  581. RESTORE_REST
  582. cli
  583. TRACE_IRQS_OFF
  584. movl $_TIF_NEED_RESCHED,%edi
  585. GET_THREAD_INFO(%rcx)
  586. jmp retint_check
  587. #ifdef CONFIG_PREEMPT
  588. /* Returning to kernel space. Check if we need preemption */
  589. /* rcx: threadinfo. interrupts off. */
  590. ENTRY(retint_kernel)
  591. cmpl $0,threadinfo_preempt_count(%rcx)
  592. jnz retint_restore_args
  593. bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
  594. jnc retint_restore_args
  595. bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
  596. jnc retint_restore_args
  597. call preempt_schedule_irq
  598. jmp exit_intr
  599. #endif
  600. CFI_ENDPROC
  601. END(common_interrupt)
  602. /*
  603. * APIC interrupts.
  604. */
  605. .macro apicinterrupt num,func
  606. INTR_FRAME
  607. pushq $~(\num)
  608. CFI_ADJUST_CFA_OFFSET 8
  609. interrupt \func
  610. jmp ret_from_intr
  611. CFI_ENDPROC
  612. .endm
  613. ENTRY(thermal_interrupt)
  614. apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
  615. END(thermal_interrupt)
  616. ENTRY(threshold_interrupt)
  617. apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
  618. END(threshold_interrupt)
  619. #ifdef CONFIG_SMP
  620. ENTRY(reschedule_interrupt)
  621. apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
  622. END(reschedule_interrupt)
  623. .macro INVALIDATE_ENTRY num
  624. ENTRY(invalidate_interrupt\num)
  625. apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
  626. END(invalidate_interrupt\num)
  627. .endm
  628. INVALIDATE_ENTRY 0
  629. INVALIDATE_ENTRY 1
  630. INVALIDATE_ENTRY 2
  631. INVALIDATE_ENTRY 3
  632. INVALIDATE_ENTRY 4
  633. INVALIDATE_ENTRY 5
  634. INVALIDATE_ENTRY 6
  635. INVALIDATE_ENTRY 7
  636. ENTRY(call_function_interrupt)
  637. apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
  638. END(call_function_interrupt)
  639. #endif
  640. ENTRY(apic_timer_interrupt)
  641. apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
  642. END(apic_timer_interrupt)
  643. ENTRY(error_interrupt)
  644. apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
  645. END(error_interrupt)
  646. ENTRY(spurious_interrupt)
  647. apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
  648. END(spurious_interrupt)
  649. /*
  650. * Exception entry points.
  651. */
  652. .macro zeroentry sym
  653. INTR_FRAME
  654. pushq $0 /* push error code/oldrax */
  655. CFI_ADJUST_CFA_OFFSET 8
  656. pushq %rax /* push real oldrax to the rdi slot */
  657. CFI_ADJUST_CFA_OFFSET 8
  658. leaq \sym(%rip),%rax
  659. jmp error_entry
  660. CFI_ENDPROC
  661. .endm
  662. .macro errorentry sym
  663. XCPT_FRAME
  664. pushq %rax
  665. CFI_ADJUST_CFA_OFFSET 8
  666. leaq \sym(%rip),%rax
  667. jmp error_entry
  668. CFI_ENDPROC
  669. .endm
  670. /* error code is on the stack already */
  671. /* handle NMI like exceptions that can happen everywhere */
  672. .macro paranoidentry sym, ist=0, irqtrace=1
  673. SAVE_ALL
  674. cld
  675. movl $1,%ebx
  676. movl $MSR_GS_BASE,%ecx
  677. rdmsr
  678. testl %edx,%edx
  679. js 1f
  680. swapgs
  681. xorl %ebx,%ebx
  682. 1:
  683. .if \ist
  684. movq %gs:pda_data_offset, %rbp
  685. .endif
  686. movq %rsp,%rdi
  687. movq ORIG_RAX(%rsp),%rsi
  688. movq $-1,ORIG_RAX(%rsp)
  689. .if \ist
  690. subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  691. .endif
  692. call \sym
  693. .if \ist
  694. addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
  695. .endif
  696. cli
  697. .if \irqtrace
  698. TRACE_IRQS_OFF
  699. .endif
  700. .endm
  701. /*
  702. * "Paranoid" exit path from exception stack.
  703. * Paranoid because this is used by NMIs and cannot take
  704. * any kernel state for granted.
  705. * We don't do kernel preemption checks here, because only
  706. * NMI should be common and it does not enable IRQs and
  707. * cannot get reschedule ticks.
  708. *
  709. * "trace" is 0 for the NMI handler only, because irq-tracing
  710. * is fundamentally NMI-unsafe. (we cannot change the soft and
  711. * hard flags at once, atomically)
  712. */
  713. .macro paranoidexit trace=1
  714. /* ebx: no swapgs flag */
  715. paranoid_exit\trace:
  716. testl %ebx,%ebx /* swapgs needed? */
  717. jnz paranoid_restore\trace
  718. testl $3,CS(%rsp)
  719. jnz paranoid_userspace\trace
  720. paranoid_swapgs\trace:
  721. TRACE_IRQS_IRETQ 0
  722. swapgs
  723. paranoid_restore\trace:
  724. RESTORE_ALL 8
  725. iretq
  726. paranoid_userspace\trace:
  727. GET_THREAD_INFO(%rcx)
  728. movl threadinfo_flags(%rcx),%ebx
  729. andl $_TIF_WORK_MASK,%ebx
  730. jz paranoid_swapgs\trace
  731. movq %rsp,%rdi /* &pt_regs */
  732. call sync_regs
  733. movq %rax,%rsp /* switch stack for scheduling */
  734. testl $_TIF_NEED_RESCHED,%ebx
  735. jnz paranoid_schedule\trace
  736. movl %ebx,%edx /* arg3: thread flags */
  737. .if \trace
  738. TRACE_IRQS_ON
  739. .endif
  740. sti
  741. xorl %esi,%esi /* arg2: oldset */
  742. movq %rsp,%rdi /* arg1: &pt_regs */
  743. call do_notify_resume
  744. cli
  745. .if \trace
  746. TRACE_IRQS_OFF
  747. .endif
  748. jmp paranoid_userspace\trace
  749. paranoid_schedule\trace:
  750. .if \trace
  751. TRACE_IRQS_ON
  752. .endif
  753. sti
  754. call schedule
  755. cli
  756. .if \trace
  757. TRACE_IRQS_OFF
  758. .endif
  759. jmp paranoid_userspace\trace
  760. CFI_ENDPROC
  761. .endm
  762. /*
  763. * Exception entry point. This expects an error code/orig_rax on the stack
  764. * and the exception handler in %rax.
  765. */
  766. ENTRY(error_entry)
  767. _frame RDI
  768. /* rdi slot contains rax, oldrax contains error code */
  769. cld
  770. subq $14*8,%rsp
  771. CFI_ADJUST_CFA_OFFSET (14*8)
  772. movq %rsi,13*8(%rsp)
  773. CFI_REL_OFFSET rsi,RSI
  774. movq 14*8(%rsp),%rsi /* load rax from rdi slot */
  775. movq %rdx,12*8(%rsp)
  776. CFI_REL_OFFSET rdx,RDX
  777. movq %rcx,11*8(%rsp)
  778. CFI_REL_OFFSET rcx,RCX
  779. movq %rsi,10*8(%rsp) /* store rax */
  780. CFI_REL_OFFSET rax,RAX
  781. movq %r8, 9*8(%rsp)
  782. CFI_REL_OFFSET r8,R8
  783. movq %r9, 8*8(%rsp)
  784. CFI_REL_OFFSET r9,R9
  785. movq %r10,7*8(%rsp)
  786. CFI_REL_OFFSET r10,R10
  787. movq %r11,6*8(%rsp)
  788. CFI_REL_OFFSET r11,R11
  789. movq %rbx,5*8(%rsp)
  790. CFI_REL_OFFSET rbx,RBX
  791. movq %rbp,4*8(%rsp)
  792. CFI_REL_OFFSET rbp,RBP
  793. movq %r12,3*8(%rsp)
  794. CFI_REL_OFFSET r12,R12
  795. movq %r13,2*8(%rsp)
  796. CFI_REL_OFFSET r13,R13
  797. movq %r14,1*8(%rsp)
  798. CFI_REL_OFFSET r14,R14
  799. movq %r15,(%rsp)
  800. CFI_REL_OFFSET r15,R15
  801. xorl %ebx,%ebx
  802. testl $3,CS(%rsp)
  803. je error_kernelspace
  804. error_swapgs:
  805. swapgs
  806. error_sti:
  807. movq %rdi,RDI(%rsp)
  808. movq %rsp,%rdi
  809. movq ORIG_RAX(%rsp),%rsi /* get error code */
  810. movq $-1,ORIG_RAX(%rsp)
  811. call *%rax
  812. /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
  813. error_exit:
  814. movl %ebx,%eax
  815. RESTORE_REST
  816. cli
  817. TRACE_IRQS_OFF
  818. GET_THREAD_INFO(%rcx)
  819. testl %eax,%eax
  820. jne retint_kernel
  821. movl threadinfo_flags(%rcx),%edx
  822. movl $_TIF_WORK_MASK,%edi
  823. andl %edi,%edx
  824. jnz retint_careful
  825. /*
  826. * The iret might restore flags:
  827. */
  828. TRACE_IRQS_IRETQ
  829. swapgs
  830. RESTORE_ARGS 0,8,0
  831. jmp iret_label
  832. CFI_ENDPROC
  833. error_kernelspace:
  834. incl %ebx
  835. /* There are two places in the kernel that can potentially fault with
  836. usergs. Handle them here. The exception handlers after
  837. iret run with kernel gs again, so don't set the user space flag.
  838. B stepping K8s sometimes report an truncated RIP for IRET
  839. exceptions returning to compat mode. Check for these here too. */
  840. leaq iret_label(%rip),%rbp
  841. cmpq %rbp,RIP(%rsp)
  842. je error_swapgs
  843. movl %ebp,%ebp /* zero extend */
  844. cmpq %rbp,RIP(%rsp)
  845. je error_swapgs
  846. cmpq $gs_change,RIP(%rsp)
  847. je error_swapgs
  848. jmp error_sti
  849. END(error_entry)
  850. /* Reload gs selector with exception handling */
  851. /* edi: new selector */
  852. ENTRY(load_gs_index)
  853. CFI_STARTPROC
  854. pushf
  855. CFI_ADJUST_CFA_OFFSET 8
  856. cli
  857. swapgs
  858. gs_change:
  859. movl %edi,%gs
  860. 2: mfence /* workaround */
  861. swapgs
  862. popf
  863. CFI_ADJUST_CFA_OFFSET -8
  864. ret
  865. CFI_ENDPROC
  866. ENDPROC(load_gs_index)
  867. .section __ex_table,"a"
  868. .align 8
  869. .quad gs_change,bad_gs
  870. .previous
  871. .section .fixup,"ax"
  872. /* running with kernelgs */
  873. bad_gs:
  874. swapgs /* switch back to user gs */
  875. xorl %eax,%eax
  876. movl %eax,%gs
  877. jmp 2b
  878. .previous
  879. /*
  880. * Create a kernel thread.
  881. *
  882. * C extern interface:
  883. * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
  884. *
  885. * asm input arguments:
  886. * rdi: fn, rsi: arg, rdx: flags
  887. */
  888. ENTRY(kernel_thread)
  889. CFI_STARTPROC
  890. FAKE_STACK_FRAME $child_rip
  891. SAVE_ALL
  892. # rdi: flags, rsi: usp, rdx: will be &pt_regs
  893. movq %rdx,%rdi
  894. orq kernel_thread_flags(%rip),%rdi
  895. movq $-1, %rsi
  896. movq %rsp, %rdx
  897. xorl %r8d,%r8d
  898. xorl %r9d,%r9d
  899. # clone now
  900. call do_fork
  901. movq %rax,RAX(%rsp)
  902. xorl %edi,%edi
  903. /*
  904. * It isn't worth to check for reschedule here,
  905. * so internally to the x86_64 port you can rely on kernel_thread()
  906. * not to reschedule the child before returning, this avoids the need
  907. * of hacks for example to fork off the per-CPU idle tasks.
  908. * [Hopefully no generic code relies on the reschedule -AK]
  909. */
  910. RESTORE_ALL
  911. UNFAKE_STACK_FRAME
  912. ret
  913. CFI_ENDPROC
  914. ENDPROC(kernel_thread)
  915. child_rip:
  916. pushq $0 # fake return address
  917. CFI_STARTPROC
  918. /*
  919. * Here we are in the child and the registers are set as they were
  920. * at kernel_thread() invocation in the parent.
  921. */
  922. movq %rdi, %rax
  923. movq %rsi, %rdi
  924. call *%rax
  925. # exit
  926. xorl %edi, %edi
  927. call do_exit
  928. CFI_ENDPROC
  929. ENDPROC(child_rip)
  930. /*
  931. * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
  932. *
  933. * C extern interface:
  934. * extern long execve(char *name, char **argv, char **envp)
  935. *
  936. * asm input arguments:
  937. * rdi: name, rsi: argv, rdx: envp
  938. *
  939. * We want to fallback into:
  940. * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
  941. *
  942. * do_sys_execve asm fallback arguments:
  943. * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
  944. */
  945. ENTRY(execve)
  946. CFI_STARTPROC
  947. FAKE_STACK_FRAME $0
  948. SAVE_ALL
  949. call sys_execve
  950. movq %rax, RAX(%rsp)
  951. RESTORE_REST
  952. testq %rax,%rax
  953. je int_ret_from_sys_call
  954. RESTORE_ARGS
  955. UNFAKE_STACK_FRAME
  956. ret
  957. CFI_ENDPROC
  958. ENDPROC(execve)
  959. KPROBE_ENTRY(page_fault)
  960. errorentry do_page_fault
  961. END(page_fault)
  962. .previous .text
  963. ENTRY(coprocessor_error)
  964. zeroentry do_coprocessor_error
  965. END(coprocessor_error)
  966. ENTRY(simd_coprocessor_error)
  967. zeroentry do_simd_coprocessor_error
  968. END(simd_coprocessor_error)
  969. ENTRY(device_not_available)
  970. zeroentry math_state_restore
  971. END(device_not_available)
  972. /* runs on exception stack */
  973. KPROBE_ENTRY(debug)
  974. INTR_FRAME
  975. pushq $0
  976. CFI_ADJUST_CFA_OFFSET 8
  977. paranoidentry do_debug, DEBUG_STACK
  978. paranoidexit
  979. END(debug)
  980. .previous .text
  981. /* runs on exception stack */
  982. KPROBE_ENTRY(nmi)
  983. INTR_FRAME
  984. pushq $-1
  985. CFI_ADJUST_CFA_OFFSET 8
  986. paranoidentry do_nmi, 0, 0
  987. #ifdef CONFIG_TRACE_IRQFLAGS
  988. paranoidexit 0
  989. #else
  990. jmp paranoid_exit1
  991. CFI_ENDPROC
  992. #endif
  993. END(nmi)
  994. .previous .text
  995. KPROBE_ENTRY(int3)
  996. INTR_FRAME
  997. pushq $0
  998. CFI_ADJUST_CFA_OFFSET 8
  999. paranoidentry do_int3, DEBUG_STACK
  1000. jmp paranoid_exit1
  1001. CFI_ENDPROC
  1002. END(int3)
  1003. .previous .text
  1004. ENTRY(overflow)
  1005. zeroentry do_overflow
  1006. END(overflow)
  1007. ENTRY(bounds)
  1008. zeroentry do_bounds
  1009. END(bounds)
  1010. ENTRY(invalid_op)
  1011. zeroentry do_invalid_op
  1012. END(invalid_op)
  1013. ENTRY(coprocessor_segment_overrun)
  1014. zeroentry do_coprocessor_segment_overrun
  1015. END(coprocessor_segment_overrun)
  1016. ENTRY(reserved)
  1017. zeroentry do_reserved
  1018. END(reserved)
  1019. /* runs on exception stack */
  1020. ENTRY(double_fault)
  1021. XCPT_FRAME
  1022. paranoidentry do_double_fault
  1023. jmp paranoid_exit1
  1024. CFI_ENDPROC
  1025. END(double_fault)
  1026. ENTRY(invalid_TSS)
  1027. errorentry do_invalid_TSS
  1028. END(invalid_TSS)
  1029. ENTRY(segment_not_present)
  1030. errorentry do_segment_not_present
  1031. END(segment_not_present)
  1032. /* runs on exception stack */
  1033. ENTRY(stack_segment)
  1034. XCPT_FRAME
  1035. paranoidentry do_stack_segment
  1036. jmp paranoid_exit1
  1037. CFI_ENDPROC
  1038. END(stack_segment)
  1039. KPROBE_ENTRY(general_protection)
  1040. errorentry do_general_protection
  1041. END(general_protection)
  1042. .previous .text
  1043. ENTRY(alignment_check)
  1044. errorentry do_alignment_check
  1045. END(alignment_check)
  1046. ENTRY(divide_error)
  1047. zeroentry do_divide_error
  1048. END(divide_error)
  1049. ENTRY(spurious_interrupt_bug)
  1050. zeroentry do_spurious_interrupt_bug
  1051. END(spurious_interrupt_bug)
  1052. #ifdef CONFIG_X86_MCE
  1053. /* runs on exception stack */
  1054. ENTRY(machine_check)
  1055. INTR_FRAME
  1056. pushq $0
  1057. CFI_ADJUST_CFA_OFFSET 8
  1058. paranoidentry do_machine_check
  1059. jmp paranoid_exit1
  1060. CFI_ENDPROC
  1061. END(machine_check)
  1062. #endif
  1063. /* Call softirq on interrupt stack. Interrupts are off. */
  1064. ENTRY(call_softirq)
  1065. CFI_STARTPROC
  1066. push %rbp
  1067. CFI_ADJUST_CFA_OFFSET 8
  1068. CFI_REL_OFFSET rbp,0
  1069. mov %rsp,%rbp
  1070. CFI_DEF_CFA_REGISTER rbp
  1071. incl %gs:pda_irqcount
  1072. cmove %gs:pda_irqstackptr,%rsp
  1073. push %rbp # backlink for old unwinder
  1074. call __do_softirq
  1075. leaveq
  1076. CFI_DEF_CFA_REGISTER rsp
  1077. CFI_ADJUST_CFA_OFFSET -8
  1078. decl %gs:pda_irqcount
  1079. ret
  1080. CFI_ENDPROC
  1081. ENDPROC(call_softirq)
  1082. #ifdef CONFIG_STACK_UNWIND
  1083. ENTRY(arch_unwind_init_running)
  1084. CFI_STARTPROC
  1085. movq %r15, R15(%rdi)
  1086. movq %r14, R14(%rdi)
  1087. xchgq %rsi, %rdx
  1088. movq %r13, R13(%rdi)
  1089. movq %r12, R12(%rdi)
  1090. xorl %eax, %eax
  1091. movq %rbp, RBP(%rdi)
  1092. movq %rbx, RBX(%rdi)
  1093. movq (%rsp), %rcx
  1094. movq %rax, R11(%rdi)
  1095. movq %rax, R10(%rdi)
  1096. movq %rax, R9(%rdi)
  1097. movq %rax, R8(%rdi)
  1098. movq %rax, RAX(%rdi)
  1099. movq %rax, RCX(%rdi)
  1100. movq %rax, RDX(%rdi)
  1101. movq %rax, RSI(%rdi)
  1102. movq %rax, RDI(%rdi)
  1103. movq %rax, ORIG_RAX(%rdi)
  1104. movq %rcx, RIP(%rdi)
  1105. leaq 8(%rsp), %rcx
  1106. movq $__KERNEL_CS, CS(%rdi)
  1107. movq %rax, EFLAGS(%rdi)
  1108. movq %rcx, RSP(%rdi)
  1109. movq $__KERNEL_DS, SS(%rdi)
  1110. jmpq *%rdx
  1111. CFI_ENDPROC
  1112. ENDPROC(arch_unwind_init_running)
  1113. #endif