entry.S 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054
  1. /*
  2. * arch/s390/kernel/entry.S
  3. * S390 low-level entry points.
  4. *
  5. * Copyright (C) IBM Corp. 1999,2006
  6. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  7. * Hartmut Penner (hp@de.ibm.com),
  8. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  9. * Heiko Carstens <heiko.carstens@de.ibm.com>
  10. */
  11. #include <linux/sys.h>
  12. #include <linux/linkage.h>
  13. #include <asm/cache.h>
  14. #include <asm/lowcore.h>
  15. #include <asm/errno.h>
  16. #include <asm/ptrace.h>
  17. #include <asm/thread_info.h>
  18. #include <asm/asm-offsets.h>
  19. #include <asm/unistd.h>
  20. #include <asm/page.h>
  21. /*
  22. * Stack layout for the system_call stack entry.
  23. * The first few entries are identical to the user_regs_struct.
  24. */
  25. SP_PTREGS = STACK_FRAME_OVERHEAD
  26. SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS
  27. SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW
  28. SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS
  29. SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4
  30. SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8
  31. SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12
  32. SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16
  33. SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20
  34. SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24
  35. SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28
  36. SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32
  37. SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36
  38. SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40
  39. SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44
  40. SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48
  41. SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52
  42. SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56
  43. SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
  44. SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
  45. SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
  46. SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
  47. SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
  48. _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
  49. _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
  50. _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
  51. _TIF_MCCK_PENDING)
  52. STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
  53. STACK_SIZE = 1 << STACK_SHIFT
  54. #define BASED(name) name-system_call(%r13)
  55. #ifdef CONFIG_TRACE_IRQFLAGS
  56. .macro TRACE_IRQS_ON
  57. l %r1,BASED(.Ltrace_irq_on)
  58. basr %r14,%r1
  59. .endm
  60. .macro TRACE_IRQS_OFF
  61. l %r1,BASED(.Ltrace_irq_off)
  62. basr %r14,%r1
  63. .endm
  64. #else
  65. #define TRACE_IRQS_ON
  66. #define TRACE_IRQS_OFF
  67. #endif
  68. /*
  69. * Register usage in interrupt handlers:
  70. * R9 - pointer to current task structure
  71. * R13 - pointer to literal pool
  72. * R14 - return register for function calls
  73. * R15 - kernel stack pointer
  74. */
  75. .macro STORE_TIMER lc_offset
  76. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  77. stpt \lc_offset
  78. #endif
  79. .endm
  80. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  81. .macro UPDATE_VTIME lc_from,lc_to,lc_sum
  82. lm %r10,%r11,\lc_from
  83. sl %r10,\lc_to
  84. sl %r11,\lc_to+4
  85. bc 3,BASED(0f)
  86. sl %r10,BASED(.Lc_1)
  87. 0: al %r10,\lc_sum
  88. al %r11,\lc_sum+4
  89. bc 12,BASED(1f)
  90. al %r10,BASED(.Lc_1)
  91. 1: stm %r10,%r11,\lc_sum
  92. .endm
  93. #endif
  94. .macro SAVE_ALL_BASE savearea
  95. stm %r12,%r15,\savearea
  96. l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13
  97. .endm
  98. .macro SAVE_ALL_SYNC psworg,savearea
  99. la %r12,\psworg
  100. tm \psworg+1,0x01 # test problem state bit
  101. bz BASED(2f) # skip stack setup save
  102. l %r15,__LC_KERNEL_STACK # problem state -> load ksp
  103. #ifdef CONFIG_CHECK_STACK
  104. b BASED(3f)
  105. 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
  106. bz BASED(stack_overflow)
  107. 3:
  108. #endif
  109. 2:
  110. .endm
  111. .macro SAVE_ALL_ASYNC psworg,savearea
  112. la %r12,\psworg
  113. tm \psworg+1,0x01 # test problem state bit
  114. bnz BASED(1f) # from user -> load async stack
  115. clc \psworg+4(4),BASED(.Lcritical_end)
  116. bhe BASED(0f)
  117. clc \psworg+4(4),BASED(.Lcritical_start)
  118. bl BASED(0f)
  119. l %r14,BASED(.Lcleanup_critical)
  120. basr %r14,%r14
  121. tm 1(%r12),0x01 # retest problem state after cleanup
  122. bnz BASED(1f)
  123. 0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ?
  124. slr %r14,%r15
  125. sra %r14,STACK_SHIFT
  126. be BASED(2f)
  127. 1: l %r15,__LC_ASYNC_STACK
  128. #ifdef CONFIG_CHECK_STACK
  129. b BASED(3f)
  130. 2: tml %r15,STACK_SIZE - CONFIG_STACK_GUARD
  131. bz BASED(stack_overflow)
  132. 3:
  133. #endif
  134. 2:
  135. .endm
  136. .macro CREATE_STACK_FRAME psworg,savearea
  137. s %r15,BASED(.Lc_spsize) # make room for registers & psw
  138. mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
  139. la %r12,\psworg
  140. st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
  141. icm %r12,12,__LC_SVC_ILC
  142. stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
  143. st %r12,SP_ILC(%r15)
  144. mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
  145. la %r12,0
  146. st %r12,__SF_BACKCHAIN(%r15) # clear back chain
  147. .endm
  148. .macro RESTORE_ALL psworg,sync
  149. mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore
  150. .if !\sync
  151. ni \psworg+1,0xfd # clear wait state bit
  152. .endif
  153. lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
  154. STORE_TIMER __LC_EXIT_TIMER
  155. lpsw \psworg # back to caller
  156. .endm
  157. /*
  158. * Scheduler resume function, called by switch_to
  159. * gpr2 = (task_struct *) prev
  160. * gpr3 = (task_struct *) next
  161. * Returns:
  162. * gpr2 = prev
  163. */
  164. .globl __switch_to
  165. __switch_to:
  166. basr %r1,0
  167. __switch_to_base:
  168. tm __THREAD_per(%r3),0xe8 # new process is using per ?
  169. bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
  170. stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff
  171. clc __THREAD_per(12,%r3),__SF_EMPTY(%r15)
  172. be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
  173. lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
  174. __switch_to_noper:
  175. l %r4,__THREAD_info(%r2) # get thread_info of prev
  176. tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending?
  177. bz __switch_to_no_mcck-__switch_to_base(%r1)
  178. ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev
  179. l %r4,__THREAD_info(%r3) # get thread_info of next
  180. oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next
  181. __switch_to_no_mcck:
  182. stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task
  183. st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
  184. l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
  185. lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task
  186. st %r3,__LC_CURRENT # __LC_CURRENT = current task struct
  187. lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
  188. l %r3,__THREAD_info(%r3) # load thread_info from task struct
  189. st %r3,__LC_THREAD_INFO
  190. ahi %r3,STACK_SIZE
  191. st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack
  192. br %r14
  193. __critical_start:
  194. /*
  195. * SVC interrupt handler routine. System calls are synchronous events and
  196. * are executed with interrupts enabled.
  197. */
  198. .globl system_call
  199. system_call:
  200. STORE_TIMER __LC_SYNC_ENTER_TIMER
  201. sysc_saveall:
  202. SAVE_ALL_BASE __LC_SAVE_AREA
  203. SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
  204. CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
  205. lh %r7,0x8a # get svc number from lowcore
  206. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  207. sysc_vtime:
  208. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  209. bz BASED(sysc_do_svc)
  210. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  211. sysc_stime:
  212. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  213. sysc_update:
  214. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  215. #endif
  216. sysc_do_svc:
  217. l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  218. sla %r7,2 # *4 and test for svc 0
  219. bnz BASED(sysc_nr_ok) # svc number > 0
  220. # svc 0: system call number in %r1
  221. cl %r1,BASED(.Lnr_syscalls)
  222. bnl BASED(sysc_nr_ok)
  223. lr %r7,%r1 # copy svc number to %r7
  224. sla %r7,2 # *4
  225. sysc_nr_ok:
  226. mvc SP_ARGS(4,%r15),SP_R7(%r15)
  227. sysc_do_restart:
  228. l %r8,BASED(.Lsysc_table)
  229. tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
  230. l %r8,0(%r7,%r8) # get system call addr.
  231. bnz BASED(sysc_tracesys)
  232. basr %r14,%r8 # call sys_xxxx
  233. st %r2,SP_R2(%r15) # store return value (change R2 on stack)
  234. # ATTENTION: check sys_execve_glue before
  235. # changing anything here !!
  236. sysc_return:
  237. tm SP_PSW+1(%r15),0x01 # returning to user ?
  238. bno BASED(sysc_leave)
  239. tm __TI_flags+3(%r9),_TIF_WORK_SVC
  240. bnz BASED(sysc_work) # there is work to do (signals etc.)
  241. sysc_leave:
  242. RESTORE_ALL __LC_RETURN_PSW,1
  243. #
  244. # recheck if there is more work to do
  245. #
  246. sysc_work_loop:
  247. tm __TI_flags+3(%r9),_TIF_WORK_SVC
  248. bz BASED(sysc_leave) # there is no work to do
  249. #
  250. # One of the work bits is on. Find out which one.
  251. #
  252. sysc_work:
  253. tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
  254. bo BASED(sysc_mcck_pending)
  255. tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
  256. bo BASED(sysc_reschedule)
  257. tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
  258. bnz BASED(sysc_sigpending)
  259. tm __TI_flags+3(%r9),_TIF_RESTART_SVC
  260. bo BASED(sysc_restart)
  261. tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
  262. bo BASED(sysc_singlestep)
  263. b BASED(sysc_leave)
  264. #
  265. # _TIF_NEED_RESCHED is set, call schedule
  266. #
  267. sysc_reschedule:
  268. l %r1,BASED(.Lschedule)
  269. la %r14,BASED(sysc_work_loop)
  270. br %r1 # call scheduler
  271. #
  272. # _TIF_MCCK_PENDING is set, call handler
  273. #
  274. sysc_mcck_pending:
  275. l %r1,BASED(.Ls390_handle_mcck)
  276. la %r14,BASED(sysc_work_loop)
  277. br %r1 # TIF bit will be cleared by handler
  278. #
  279. # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
  280. #
  281. sysc_sigpending:
  282. ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
  283. la %r2,SP_PTREGS(%r15) # load pt_regs
  284. l %r1,BASED(.Ldo_signal)
  285. basr %r14,%r1 # call do_signal
  286. tm __TI_flags+3(%r9),_TIF_RESTART_SVC
  287. bo BASED(sysc_restart)
  288. tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
  289. bo BASED(sysc_singlestep)
  290. b BASED(sysc_work_loop)
  291. #
  292. # _TIF_RESTART_SVC is set, set up registers and restart svc
  293. #
  294. sysc_restart:
  295. ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
  296. l %r7,SP_R2(%r15) # load new svc number
  297. sla %r7,2
  298. mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
  299. lm %r2,%r6,SP_R2(%r15) # load svc arguments
  300. b BASED(sysc_do_restart) # restart svc
  301. #
  302. # _TIF_SINGLE_STEP is set, call do_single_step
  303. #
  304. sysc_singlestep:
  305. ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
  306. mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check
  307. la %r2,SP_PTREGS(%r15) # address of register-save area
  308. l %r1,BASED(.Lhandle_per) # load adr. of per handler
  309. la %r14,BASED(sysc_return) # load adr. of system return
  310. br %r1 # branch to do_single_step
  311. #
  312. # call trace before and after sys_call
  313. #
  314. sysc_tracesys:
  315. l %r1,BASED(.Ltrace)
  316. la %r2,SP_PTREGS(%r15) # load pt_regs
  317. la %r3,0
  318. srl %r7,2
  319. st %r7,SP_R2(%r15)
  320. basr %r14,%r1
  321. clc SP_R2(4,%r15),BASED(.Lnr_syscalls)
  322. bnl BASED(sysc_tracenogo)
  323. l %r8,BASED(.Lsysc_table)
  324. l %r7,SP_R2(%r15) # strace might have changed the
  325. sll %r7,2 # system call
  326. l %r8,0(%r7,%r8)
  327. sysc_tracego:
  328. lm %r3,%r6,SP_R3(%r15)
  329. l %r2,SP_ORIG_R2(%r15)
  330. basr %r14,%r8 # call sys_xxx
  331. st %r2,SP_R2(%r15) # store return value
  332. sysc_tracenogo:
  333. tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
  334. bz BASED(sysc_return)
  335. l %r1,BASED(.Ltrace)
  336. la %r2,SP_PTREGS(%r15) # load pt_regs
  337. la %r3,1
  338. la %r14,BASED(sysc_return)
  339. br %r1
  340. #
  341. # a new process exits the kernel with ret_from_fork
  342. #
  343. .globl ret_from_fork
  344. ret_from_fork:
  345. l %r13,__LC_SVC_NEW_PSW+4
  346. l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  347. tm SP_PSW+1(%r15),0x01 # forking a kernel thread ?
  348. bo BASED(0f)
  349. st %r15,SP_R15(%r15) # store stack pointer for new kthread
  350. 0: l %r1,BASED(.Lschedtail)
  351. basr %r14,%r1
  352. TRACE_IRQS_ON
  353. stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
  354. b BASED(sysc_return)
  355. #
  356. # clone, fork, vfork, exec and sigreturn need glue,
  357. # because they all expect pt_regs as parameter,
  358. # but are called with different parameter.
  359. # return-address is set up above
  360. #
  361. sys_clone_glue:
  362. la %r2,SP_PTREGS(%r15) # load pt_regs
  363. l %r1,BASED(.Lclone)
  364. br %r1 # branch to sys_clone
  365. sys_fork_glue:
  366. la %r2,SP_PTREGS(%r15) # load pt_regs
  367. l %r1,BASED(.Lfork)
  368. br %r1 # branch to sys_fork
  369. sys_vfork_glue:
  370. la %r2,SP_PTREGS(%r15) # load pt_regs
  371. l %r1,BASED(.Lvfork)
  372. br %r1 # branch to sys_vfork
  373. sys_execve_glue:
  374. la %r2,SP_PTREGS(%r15) # load pt_regs
  375. l %r1,BASED(.Lexecve)
  376. lr %r12,%r14 # save return address
  377. basr %r14,%r1 # call sys_execve
  378. ltr %r2,%r2 # check if execve failed
  379. bnz 0(%r12) # it did fail -> store result in gpr2
  380. b 4(%r12) # SKIP ST 2,SP_R2(15) after BASR 14,8
  381. # in system_call/sysc_tracesys
  382. sys_sigreturn_glue:
  383. la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
  384. l %r1,BASED(.Lsigreturn)
  385. br %r1 # branch to sys_sigreturn
  386. sys_rt_sigreturn_glue:
  387. la %r2,SP_PTREGS(%r15) # load pt_regs as parameter
  388. l %r1,BASED(.Lrt_sigreturn)
  389. br %r1 # branch to sys_sigreturn
  390. sys_sigaltstack_glue:
  391. la %r4,SP_PTREGS(%r15) # load pt_regs as parameter
  392. l %r1,BASED(.Lsigaltstack)
  393. br %r1 # branch to sys_sigreturn
  394. /*
  395. * Program check handler routine
  396. */
  397. .globl pgm_check_handler
  398. pgm_check_handler:
  399. /*
  400. * First we need to check for a special case:
  401. * Single stepping an instruction that disables the PER event mask will
  402. * cause a PER event AFTER the mask has been set. Example: SVC or LPSW.
  403. * For a single stepped SVC the program check handler gets control after
  404. * the SVC new PSW has been loaded. But we want to execute the SVC first and
  405. * then handle the PER event. Therefore we update the SVC old PSW to point
  406. * to the pgm_check_handler and branch to the SVC handler after we checked
  407. * if we have to load the kernel stack register.
  408. * For every other possible cause for PER event without the PER mask set
  409. * we just ignore the PER event (FIXME: is there anything we have to do
  410. * for LPSW?).
  411. */
  412. STORE_TIMER __LC_SYNC_ENTER_TIMER
  413. SAVE_ALL_BASE __LC_SAVE_AREA
  414. tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception
  415. bnz BASED(pgm_per) # got per exception -> special case
  416. SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
  417. CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
  418. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  419. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  420. bz BASED(pgm_no_vtime)
  421. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  422. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  423. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  424. pgm_no_vtime:
  425. #endif
  426. l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  427. l %r3,__LC_PGM_ILC # load program interruption code
  428. la %r8,0x7f
  429. nr %r8,%r3
  430. pgm_do_call:
  431. l %r7,BASED(.Ljump_table)
  432. sll %r8,2
  433. l %r7,0(%r8,%r7) # load address of handler routine
  434. la %r2,SP_PTREGS(%r15) # address of register-save area
  435. la %r14,BASED(sysc_return)
  436. br %r7 # branch to interrupt-handler
  437. #
  438. # handle per exception
  439. #
  440. pgm_per:
  441. tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on
  442. bnz BASED(pgm_per_std) # ok, normal per event from user space
  443. # ok its one of the special cases, now we need to find out which one
  444. clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW
  445. be BASED(pgm_svcper)
  446. # no interesting special case, ignore PER event
  447. lm %r12,%r15,__LC_SAVE_AREA
  448. lpsw 0x28
  449. #
  450. # Normal per exception
  451. #
  452. pgm_per_std:
  453. SAVE_ALL_SYNC __LC_PGM_OLD_PSW,__LC_SAVE_AREA
  454. CREATE_STACK_FRAME __LC_PGM_OLD_PSW,__LC_SAVE_AREA
  455. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  456. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  457. bz BASED(pgm_no_vtime2)
  458. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  459. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  460. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  461. pgm_no_vtime2:
  462. #endif
  463. l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  464. l %r1,__TI_task(%r9)
  465. mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
  466. mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
  467. mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
  468. oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
  469. l %r3,__LC_PGM_ILC # load program interruption code
  470. la %r8,0x7f
  471. nr %r8,%r3 # clear per-event-bit and ilc
  472. be BASED(sysc_return) # only per or per+check ?
  473. b BASED(pgm_do_call)
  474. #
  475. # it was a single stepped SVC that is causing all the trouble
  476. #
  477. pgm_svcper:
  478. SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
  479. CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
  480. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  481. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  482. bz BASED(pgm_no_vtime3)
  483. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  484. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  485. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  486. pgm_no_vtime3:
  487. #endif
  488. lh %r7,0x8a # get svc number from lowcore
  489. l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  490. l %r1,__TI_task(%r9)
  491. mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
  492. mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
  493. mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
  494. oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
  495. TRACE_IRQS_ON
  496. stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
  497. b BASED(sysc_do_svc)
  498. /*
  499. * IO interrupt handler routine
  500. */
  501. .globl io_int_handler
  502. io_int_handler:
  503. STORE_TIMER __LC_ASYNC_ENTER_TIMER
  504. stck __LC_INT_CLOCK
  505. SAVE_ALL_BASE __LC_SAVE_AREA+16
  506. SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
  507. CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
  508. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  509. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  510. bz BASED(io_no_vtime)
  511. UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
  512. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  513. mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
  514. io_no_vtime:
  515. #endif
  516. l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  517. TRACE_IRQS_OFF
  518. l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ
  519. la %r2,SP_PTREGS(%r15) # address of register-save area
  520. basr %r14,%r1 # branch to standard irq handler
  521. TRACE_IRQS_ON
  522. io_return:
  523. tm SP_PSW+1(%r15),0x01 # returning to user ?
  524. #ifdef CONFIG_PREEMPT
  525. bno BASED(io_preempt) # no -> check for preemptive scheduling
  526. #else
  527. bno BASED(io_leave) # no-> skip resched & signal
  528. #endif
  529. tm __TI_flags+3(%r9),_TIF_WORK_INT
  530. bnz BASED(io_work) # there is work to do (signals etc.)
  531. io_leave:
  532. RESTORE_ALL __LC_RETURN_PSW,0
  533. io_done:
  534. #ifdef CONFIG_PREEMPT
  535. io_preempt:
  536. icm %r0,15,__TI_precount(%r9)
  537. bnz BASED(io_leave)
  538. l %r1,SP_R15(%r15)
  539. s %r1,BASED(.Lc_spsize)
  540. mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
  541. xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
  542. lr %r15,%r1
  543. io_resume_loop:
  544. tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
  545. bno BASED(io_leave)
  546. mvc __TI_precount(4,%r9),BASED(.Lc_pactive)
  547. stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
  548. l %r1,BASED(.Lschedule)
  549. basr %r14,%r1 # call schedule
  550. stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
  551. xc __TI_precount(4,%r9),__TI_precount(%r9)
  552. b BASED(io_resume_loop)
  553. #endif
  554. #
  555. # switch to kernel stack, then check the TIF bits
  556. #
  557. io_work:
  558. l %r1,__LC_KERNEL_STACK
  559. s %r1,BASED(.Lc_spsize)
  560. mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
  561. xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
  562. lr %r15,%r1
  563. #
  564. # One of the work bits is on. Find out which one.
  565. # Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGMASK, _TIF_NEED_RESCHED
  566. # and _TIF_MCCK_PENDING
  567. #
  568. io_work_loop:
  569. tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
  570. bo BASED(io_mcck_pending)
  571. tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
  572. bo BASED(io_reschedule)
  573. tm __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
  574. bnz BASED(io_sigpending)
  575. b BASED(io_leave)
  576. #
  577. # _TIF_MCCK_PENDING is set, call handler
  578. #
  579. io_mcck_pending:
  580. l %r1,BASED(.Ls390_handle_mcck)
  581. la %r14,BASED(io_work_loop)
  582. br %r1 # TIF bit will be cleared by handler
  583. #
  584. # _TIF_NEED_RESCHED is set, call schedule
  585. #
  586. io_reschedule:
  587. l %r1,BASED(.Lschedule)
  588. stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
  589. basr %r14,%r1 # call scheduler
  590. stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
  591. tm __TI_flags+3(%r9),_TIF_WORK_INT
  592. bz BASED(io_leave) # there is no work to do
  593. b BASED(io_work_loop)
  594. #
  595. # _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
  596. #
  597. io_sigpending:
  598. stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
  599. la %r2,SP_PTREGS(%r15) # load pt_regs
  600. l %r1,BASED(.Ldo_signal)
  601. basr %r14,%r1 # call do_signal
  602. stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
  603. b BASED(io_work_loop)
  604. /*
  605. * External interrupt handler routine
  606. */
  607. .globl ext_int_handler
  608. ext_int_handler:
  609. STORE_TIMER __LC_ASYNC_ENTER_TIMER
  610. stck __LC_INT_CLOCK
  611. SAVE_ALL_BASE __LC_SAVE_AREA+16
  612. SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
  613. CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
  614. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  615. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  616. bz BASED(ext_no_vtime)
  617. UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
  618. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  619. mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
  620. ext_no_vtime:
  621. #endif
  622. l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  623. TRACE_IRQS_OFF
  624. la %r2,SP_PTREGS(%r15) # address of register-save area
  625. lh %r3,__LC_EXT_INT_CODE # get interruption code
  626. l %r1,BASED(.Ldo_extint)
  627. basr %r14,%r1
  628. TRACE_IRQS_ON
  629. b BASED(io_return)
  630. __critical_end:
  631. /*
  632. * Machine check handler routines
  633. */
  634. .globl mcck_int_handler
  635. mcck_int_handler:
  636. spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
  637. lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
  638. SAVE_ALL_BASE __LC_SAVE_AREA+32
  639. la %r12,__LC_MCK_OLD_PSW
  640. tm __LC_MCCK_CODE,0x80 # system damage?
  641. bo BASED(mcck_int_main) # yes -> rest of mcck code invalid
  642. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  643. mvc __LC_SAVE_AREA+52(8),__LC_ASYNC_ENTER_TIMER
  644. mvc __LC_ASYNC_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA
  645. tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
  646. bo BASED(1f)
  647. la %r14,__LC_SYNC_ENTER_TIMER
  648. clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
  649. bl BASED(0f)
  650. la %r14,__LC_ASYNC_ENTER_TIMER
  651. 0: clc 0(8,%r14),__LC_EXIT_TIMER
  652. bl BASED(0f)
  653. la %r14,__LC_EXIT_TIMER
  654. 0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
  655. bl BASED(0f)
  656. la %r14,__LC_LAST_UPDATE_TIMER
  657. 0: spt 0(%r14)
  658. mvc __LC_ASYNC_ENTER_TIMER(8),0(%r14)
  659. 1:
  660. #endif
  661. tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
  662. bno BASED(mcck_int_main) # no -> skip cleanup critical
  663. tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit
  664. bnz BASED(mcck_int_main) # from user -> load async stack
  665. clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end)
  666. bhe BASED(mcck_int_main)
  667. clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start)
  668. bl BASED(mcck_int_main)
  669. l %r14,BASED(.Lcleanup_critical)
  670. basr %r14,%r14
  671. mcck_int_main:
  672. l %r14,__LC_PANIC_STACK # are we already on the panic stack?
  673. slr %r14,%r15
  674. sra %r14,PAGE_SHIFT
  675. be BASED(0f)
  676. l %r15,__LC_PANIC_STACK # load panic stack
  677. 0: CREATE_STACK_FRAME __LC_MCK_OLD_PSW,__LC_SAVE_AREA+32
  678. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  679. tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid?
  680. bno BASED(mcck_no_vtime) # no -> skip cleanup critical
  681. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  682. bz BASED(mcck_no_vtime)
  683. UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER
  684. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  685. mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER
  686. mcck_no_vtime:
  687. #endif
  688. l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
  689. la %r2,SP_PTREGS(%r15) # load pt_regs
  690. l %r1,BASED(.Ls390_mcck)
  691. basr %r14,%r1 # call machine check handler
  692. tm SP_PSW+1(%r15),0x01 # returning to user ?
  693. bno BASED(mcck_return)
  694. l %r1,__LC_KERNEL_STACK # switch to kernel stack
  695. s %r1,BASED(.Lc_spsize)
  696. mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
  697. xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
  698. lr %r15,%r1
  699. stosm __SF_EMPTY(%r15),0x04 # turn dat on
  700. tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
  701. bno BASED(mcck_return)
  702. TRACE_IRQS_OFF
  703. l %r1,BASED(.Ls390_handle_mcck)
  704. basr %r14,%r1 # call machine check handler
  705. TRACE_IRQS_ON
  706. mcck_return:
  707. mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW
  708. ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit
  709. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  710. mvc __LC_ASYNC_ENTER_TIMER(8),__LC_SAVE_AREA+52
  711. tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
  712. bno BASED(0f)
  713. lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
  714. stpt __LC_EXIT_TIMER
  715. lpsw __LC_RETURN_MCCK_PSW # back to caller
  716. 0:
  717. #endif
  718. lm %r0,%r15,SP_R0(%r15) # load gprs 0-15
  719. lpsw __LC_RETURN_MCCK_PSW # back to caller
  720. RESTORE_ALL __LC_RETURN_MCCK_PSW,0
  721. #ifdef CONFIG_SMP
  722. /*
  723. * Restart interruption handler, kick starter for additional CPUs
  724. */
  725. .globl restart_int_handler
  726. restart_int_handler:
  727. l %r15,__LC_SAVE_AREA+60 # load ksp
  728. lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs
  729. lam %a0,%a15,__LC_AREGS_SAVE_AREA
  730. lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone
  731. stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
  732. basr %r14,0
  733. l %r14,restart_addr-.(%r14)
  734. br %r14 # branch to start_secondary
  735. restart_addr:
  736. .long start_secondary
  737. #else
  738. /*
  739. * If we do not run with SMP enabled, let the new CPU crash ...
  740. */
  741. .globl restart_int_handler
  742. restart_int_handler:
  743. basr %r1,0
  744. restart_base:
  745. lpsw restart_crash-restart_base(%r1)
  746. .align 8
  747. restart_crash:
  748. .long 0x000a0000,0x00000000
  749. restart_go:
  750. #endif
  751. #ifdef CONFIG_CHECK_STACK
  752. /*
  753. * The synchronous or the asynchronous stack overflowed. We are dead.
  754. * No need to properly save the registers, we are going to panic anyway.
  755. * Setup a pt_regs so that show_trace can provide a good call trace.
  756. */
  757. stack_overflow:
  758. l %r15,__LC_PANIC_STACK # change to panic stack
  759. sl %r15,BASED(.Lc_spsize)
  760. mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
  761. stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
  762. la %r1,__LC_SAVE_AREA
  763. ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ?
  764. be BASED(0f)
  765. ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ?
  766. be BASED(0f)
  767. la %r1,__LC_SAVE_AREA+16
  768. 0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack
  769. xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain
  770. l %r1,BASED(1f) # branch to kernel_stack_overflow
  771. la %r2,SP_PTREGS(%r15) # load pt_regs
  772. br %r1
  773. 1: .long kernel_stack_overflow
  774. #endif
  775. cleanup_table_system_call:
  776. .long system_call + 0x80000000, sysc_do_svc + 0x80000000
  777. cleanup_table_sysc_return:
  778. .long sysc_return + 0x80000000, sysc_leave + 0x80000000
  779. cleanup_table_sysc_leave:
  780. .long sysc_leave + 0x80000000, sysc_work_loop + 0x80000000
  781. cleanup_table_sysc_work_loop:
  782. .long sysc_work_loop + 0x80000000, sysc_reschedule + 0x80000000
  783. cleanup_table_io_return:
  784. .long io_return + 0x80000000, io_leave + 0x80000000
  785. cleanup_table_io_leave:
  786. .long io_leave + 0x80000000, io_done + 0x80000000
  787. cleanup_table_io_work_loop:
  788. .long io_work_loop + 0x80000000, io_mcck_pending + 0x80000000
  789. cleanup_critical:
  790. clc 4(4,%r12),BASED(cleanup_table_system_call)
  791. bl BASED(0f)
  792. clc 4(4,%r12),BASED(cleanup_table_system_call+4)
  793. bl BASED(cleanup_system_call)
  794. 0:
  795. clc 4(4,%r12),BASED(cleanup_table_sysc_return)
  796. bl BASED(0f)
  797. clc 4(4,%r12),BASED(cleanup_table_sysc_return+4)
  798. bl BASED(cleanup_sysc_return)
  799. 0:
  800. clc 4(4,%r12),BASED(cleanup_table_sysc_leave)
  801. bl BASED(0f)
  802. clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
  803. bl BASED(cleanup_sysc_leave)
  804. 0:
  805. clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
  806. bl BASED(0f)
  807. clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
  808. bl BASED(cleanup_sysc_return)
  809. 0:
  810. clc 4(4,%r12),BASED(cleanup_table_io_return)
  811. bl BASED(0f)
  812. clc 4(4,%r12),BASED(cleanup_table_io_return+4)
  813. bl BASED(cleanup_io_return)
  814. 0:
  815. clc 4(4,%r12),BASED(cleanup_table_io_leave)
  816. bl BASED(0f)
  817. clc 4(4,%r12),BASED(cleanup_table_io_leave+4)
  818. bl BASED(cleanup_io_leave)
  819. 0:
  820. clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
  821. bl BASED(0f)
  822. clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
  823. bl BASED(cleanup_io_return)
  824. 0:
  825. br %r14
  826. cleanup_system_call:
  827. mvc __LC_RETURN_PSW(8),0(%r12)
  828. c %r12,BASED(.Lmck_old_psw)
  829. be BASED(0f)
  830. la %r12,__LC_SAVE_AREA+16
  831. b BASED(1f)
  832. 0: la %r12,__LC_SAVE_AREA+32
  833. 1:
  834. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  835. clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4)
  836. bh BASED(0f)
  837. mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
  838. 0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8)
  839. bhe BASED(cleanup_vtime)
  840. #endif
  841. clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn)
  842. bh BASED(0f)
  843. mvc __LC_SAVE_AREA(16),0(%r12)
  844. 0: st %r13,4(%r12)
  845. st %r12,__LC_SAVE_AREA+48 # argh
  846. SAVE_ALL_SYNC __LC_SVC_OLD_PSW,__LC_SAVE_AREA
  847. CREATE_STACK_FRAME __LC_SVC_OLD_PSW,__LC_SAVE_AREA
  848. l %r12,__LC_SAVE_AREA+48 # argh
  849. st %r15,12(%r12)
  850. lh %r7,0x8a
  851. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  852. cleanup_vtime:
  853. clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12)
  854. bhe BASED(cleanup_stime)
  855. tm SP_PSW+1(%r15),0x01 # interrupting from user ?
  856. bz BASED(cleanup_novtime)
  857. UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER
  858. cleanup_stime:
  859. clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16)
  860. bh BASED(cleanup_update)
  861. UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER
  862. cleanup_update:
  863. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  864. cleanup_novtime:
  865. #endif
  866. mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4)
  867. la %r12,__LC_RETURN_PSW
  868. br %r14
  869. cleanup_system_call_insn:
  870. .long sysc_saveall + 0x80000000
  871. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  872. .long system_call + 0x80000000
  873. .long sysc_vtime + 0x80000000
  874. .long sysc_stime + 0x80000000
  875. .long sysc_update + 0x80000000
  876. #endif
  877. cleanup_sysc_return:
  878. mvc __LC_RETURN_PSW(4),0(%r12)
  879. mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_return)
  880. la %r12,__LC_RETURN_PSW
  881. br %r14
  882. cleanup_sysc_leave:
  883. clc 4(4,%r12),BASED(cleanup_sysc_leave_insn)
  884. be BASED(2f)
  885. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  886. mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
  887. clc 4(4,%r12),BASED(cleanup_sysc_leave_insn+4)
  888. be BASED(2f)
  889. #endif
  890. mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
  891. c %r12,BASED(.Lmck_old_psw)
  892. bne BASED(0f)
  893. mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
  894. b BASED(1f)
  895. 0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
  896. 1: lm %r0,%r11,SP_R0(%r15)
  897. l %r15,SP_R15(%r15)
  898. 2: la %r12,__LC_RETURN_PSW
  899. br %r14
  900. cleanup_sysc_leave_insn:
  901. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  902. .long sysc_leave + 14 + 0x80000000
  903. #endif
  904. .long sysc_leave + 10 + 0x80000000
  905. cleanup_io_return:
  906. mvc __LC_RETURN_PSW(4),0(%r12)
  907. mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
  908. la %r12,__LC_RETURN_PSW
  909. br %r14
  910. cleanup_io_leave:
  911. clc 4(4,%r12),BASED(cleanup_io_leave_insn)
  912. be BASED(2f)
  913. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  914. mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
  915. clc 4(4,%r12),BASED(cleanup_io_leave_insn+4)
  916. be BASED(2f)
  917. #endif
  918. mvc __LC_RETURN_PSW(8),SP_PSW(%r15)
  919. c %r12,BASED(.Lmck_old_psw)
  920. bne BASED(0f)
  921. mvc __LC_SAVE_AREA+32(16),SP_R12(%r15)
  922. b BASED(1f)
  923. 0: mvc __LC_SAVE_AREA+16(16),SP_R12(%r15)
  924. 1: lm %r0,%r11,SP_R0(%r15)
  925. l %r15,SP_R15(%r15)
  926. 2: la %r12,__LC_RETURN_PSW
  927. br %r14
  928. cleanup_io_leave_insn:
  929. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  930. .long io_leave + 18 + 0x80000000
  931. #endif
  932. .long io_leave + 14 + 0x80000000
  933. /*
  934. * Integer constants
  935. */
  936. .align 4
  937. .Lc_spsize: .long SP_SIZE
  938. .Lc_overhead: .long STACK_FRAME_OVERHEAD
  939. .Lc_pactive: .long PREEMPT_ACTIVE
  940. .Lnr_syscalls: .long NR_syscalls
  941. .L0x018: .short 0x018
  942. .L0x020: .short 0x020
  943. .L0x028: .short 0x028
  944. .L0x030: .short 0x030
  945. .L0x038: .short 0x038
  946. .Lc_1: .long 1
  947. /*
  948. * Symbol constants
  949. */
  950. .Ls390_mcck: .long s390_do_machine_check
  951. .Ls390_handle_mcck:
  952. .long s390_handle_mcck
  953. .Lmck_old_psw: .long __LC_MCK_OLD_PSW
  954. .Ldo_IRQ: .long do_IRQ
  955. .Ldo_extint: .long do_extint
  956. .Ldo_signal: .long do_signal
  957. .Lhandle_per: .long do_single_step
  958. .Ljump_table: .long pgm_check_table
  959. .Lschedule: .long schedule
  960. .Lclone: .long sys_clone
  961. .Lexecve: .long sys_execve
  962. .Lfork: .long sys_fork
  963. .Lrt_sigreturn:.long sys_rt_sigreturn
  964. .Lrt_sigsuspend:
  965. .long sys_rt_sigsuspend
  966. .Lsigreturn: .long sys_sigreturn
  967. .Lsigsuspend: .long sys_sigsuspend
  968. .Lsigaltstack: .long sys_sigaltstack
  969. .Ltrace: .long syscall_trace
  970. .Lvfork: .long sys_vfork
  971. .Lschedtail: .long schedule_tail
  972. .Lsysc_table: .long sys_call_table
  973. #ifdef CONFIG_TRACE_IRQFLAGS
  974. .Ltrace_irq_on:.long trace_hardirqs_on
  975. .Ltrace_irq_off:
  976. .long trace_hardirqs_off
  977. #endif
  978. .Lcritical_start:
  979. .long __critical_start + 0x80000000
  980. .Lcritical_end:
  981. .long __critical_end + 0x80000000
  982. .Lcleanup_critical:
  983. .long cleanup_critical
  984. .section .rodata, "a"
  985. #define SYSCALL(esa,esame,emu) .long esa
  986. sys_call_table:
  987. #include "syscalls.S"
  988. #undef SYSCALL