|
@@ -291,38 +291,36 @@ sysc_restore_trace_psw:
|
|
|
#endif
|
|
|
|
|
|
#
|
|
|
-# recheck if there is more work to do
|
|
|
-#
|
|
|
-sysc_work_loop:
|
|
|
- tm __TI_flags+7(%r9),_TIF_WORK_SVC
|
|
|
- jz sysc_restore # there is no work to do
|
|
|
-#
|
|
|
-# One of the work bits is on. Find out which one.
|
|
|
+# There is work to do, but first we need to check if we return to userspace.
|
|
|
#
|
|
|
sysc_work:
|
|
|
tm SP_PSW+1(%r15),0x01 # returning to user ?
|
|
|
jno sysc_restore
|
|
|
+
|
|
|
+#
|
|
|
+# One of the work bits is on. Find out which one.
|
|
|
+#
|
|
|
+sysc_work_loop:
|
|
|
tm __TI_flags+7(%r9),_TIF_MCCK_PENDING
|
|
|
jo sysc_mcck_pending
|
|
|
tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
|
|
|
jo sysc_reschedule
|
|
|
tm __TI_flags+7(%r9),_TIF_SIGPENDING
|
|
|
- jnz sysc_sigpending
|
|
|
+ jo sysc_sigpending
|
|
|
tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
|
|
|
- jnz sysc_notify_resume
|
|
|
+ jo sysc_notify_resume
|
|
|
tm __TI_flags+7(%r9),_TIF_RESTART_SVC
|
|
|
jo sysc_restart
|
|
|
tm __TI_flags+7(%r9),_TIF_SINGLE_STEP
|
|
|
jo sysc_singlestep
|
|
|
- j sysc_restore
|
|
|
-sysc_work_done:
|
|
|
+ j sysc_return # beware of critical section cleanup
|
|
|
|
|
|
#
|
|
|
# _TIF_NEED_RESCHED is set, call schedule
|
|
|
#
|
|
|
sysc_reschedule:
|
|
|
larl %r14,sysc_work_loop
|
|
|
- jg schedule # return point is sysc_return
|
|
|
+ jg schedule # return point is sysc_work_loop
|
|
|
|
|
|
#
|
|
|
# _TIF_MCCK_PENDING is set, call handler
|
|
@@ -369,7 +367,7 @@ sysc_singlestep:
|
|
|
ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
|
|
|
xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
|
|
|
la %r2,SP_PTREGS(%r15) # address of register-save area
|
|
|
- larl %r14,sysc_return # load adr. of system return
|
|
|
+ larl %r14,sysc_work_loop # load adr. of system return
|
|
|
jg do_single_step # branch to do_sigtrap
|
|
|
|
|
|
#
|
|
@@ -605,37 +603,27 @@ io_restore_trace_psw:
|
|
|
#endif
|
|
|
|
|
|
#
|
|
|
-# There is work todo, we need to check if we return to userspace, then
|
|
|
-# check, if we are in SIE, if yes leave it
|
|
|
+# There is work todo, find out in which context we have been interrupted:
|
|
|
+# 1) if we return to user space we can do all _TIF_WORK_INT work
|
|
|
+# 2) if we return to kernel code and kvm is enabled check if we need to
|
|
|
+# modify the psw to leave SIE
|
|
|
+# 3) if we return to kernel code and preemptive scheduling is enabled check
|
|
|
+# the preemption counter and if it is zero call preempt_schedule_irq
|
|
|
+# Before any work can be done, a switch to the kernel stack is required.
|
|
|
#
|
|
|
io_work:
|
|
|
tm SP_PSW+1(%r15),0x01 # returning to user ?
|
|
|
-#ifndef CONFIG_PREEMPT
|
|
|
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
|
|
|
- jnz io_work_user # yes -> no need to check for SIE
|
|
|
- la %r1, BASED(sie_opcode) # we return to kernel here
|
|
|
- lg %r2, SP_PSW+8(%r15)
|
|
|
- clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
|
|
|
- jne io_restore # no-> return to kernel
|
|
|
- lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
|
|
|
- aghi %r1, 4
|
|
|
- stg %r1, SP_PSW+8(%r15)
|
|
|
- j io_restore # return to kernel
|
|
|
-#else
|
|
|
- jno io_restore # no-> skip resched & signal
|
|
|
-#endif
|
|
|
-#else
|
|
|
- jnz io_work_user # yes -> do resched & signal
|
|
|
+ jo io_work_user # yes -> do resched & signal
|
|
|
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
|
|
|
- la %r1, BASED(sie_opcode)
|
|
|
- lg %r2, SP_PSW+8(%r15)
|
|
|
- clc 0(2,%r1), 0(%r2) # is current instruction = SIE?
|
|
|
+ lg %r2,SP_PSW+8(%r15) # check if current instruction is SIE
|
|
|
+ lh %r1,0(%r2)
|
|
|
+ chi %r1,-19948 # signed 16 bit compare with 0xb214
|
|
|
jne 0f # no -> leave PSW alone
|
|
|
- lg %r1, SP_PSW+8(%r15) # yes-> add 4 bytes to leave SIE
|
|
|
- aghi %r1, 4
|
|
|
- stg %r1, SP_PSW+8(%r15)
|
|
|
+ aghi %r2,4 # yes-> add 4 bytes to leave SIE
|
|
|
+ stg %r2,SP_PSW+8(%r15)
|
|
|
0:
|
|
|
#endif
|
|
|
+#ifdef CONFIG_PREEMPT
|
|
|
# check for preemptive scheduling
|
|
|
icm %r0,15,__TI_precount(%r9)
|
|
|
jnz io_restore # preemption is disabled
|
|
@@ -646,21 +634,25 @@ io_work:
|
|
|
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
|
|
|
lgr %r15,%r1
|
|
|
io_resume_loop:
|
|
|
- tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
|
|
|
- jno io_restore
|
|
|
larl %r14,io_resume_loop
|
|
|
- jg preempt_schedule_irq
|
|
|
+ tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
|
|
|
+ jgo preempt_schedule_irq
|
|
|
#endif
|
|
|
+ j io_restore
|
|
|
|
|
|
+#
|
|
|
+# Need to do work before returning to userspace, switch to kernel stack
|
|
|
+#
|
|
|
io_work_user:
|
|
|
lg %r1,__LC_KERNEL_STACK
|
|
|
aghi %r1,-SP_SIZE
|
|
|
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
|
|
|
xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain
|
|
|
lgr %r15,%r1
|
|
|
+
|
|
|
#
|
|
|
# One of the work bits is on. Find out which one.
|
|
|
-# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGPENDING, _TIF_NEED_RESCHED
|
|
|
+# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
|
|
|
# and _TIF_MCCK_PENDING
|
|
|
#
|
|
|
io_work_loop:
|
|
@@ -669,16 +661,10 @@ io_work_loop:
|
|
|
tm __TI_flags+7(%r9),_TIF_NEED_RESCHED
|
|
|
jo io_reschedule
|
|
|
tm __TI_flags+7(%r9),_TIF_SIGPENDING
|
|
|
- jnz io_sigpending
|
|
|
+ jo io_sigpending
|
|
|
tm __TI_flags+7(%r9),_TIF_NOTIFY_RESUME
|
|
|
- jnz io_notify_resume
|
|
|
- j io_restore
|
|
|
-io_work_done:
|
|
|
-
|
|
|
-#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
|
|
|
-sie_opcode:
|
|
|
- .long 0xb2140000
|
|
|
-#endif
|
|
|
+ jo io_notify_resume
|
|
|
+ j io_return # beware of critical section cleanup
|
|
|
|
|
|
#
|
|
|
# _TIF_MCCK_PENDING is set, call handler
|
|
@@ -696,8 +682,6 @@ io_reschedule:
|
|
|
brasl %r14,schedule # call scheduler
|
|
|
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
|
|
|
TRACE_IRQS_OFF
|
|
|
- tm __TI_flags+7(%r9),_TIF_WORK_INT
|
|
|
- jz io_restore # there is no work to do
|
|
|
j io_work_loop
|
|
|
|
|
|
#
|
|
@@ -903,14 +887,10 @@ cleanup_table_sysc_return:
|
|
|
.quad sysc_return, sysc_leave
|
|
|
cleanup_table_sysc_leave:
|
|
|
.quad sysc_leave, sysc_done
|
|
|
-cleanup_table_sysc_work_loop:
|
|
|
- .quad sysc_work_loop, sysc_work_done
|
|
|
cleanup_table_io_return:
|
|
|
.quad io_return, io_leave
|
|
|
cleanup_table_io_leave:
|
|
|
.quad io_leave, io_done
|
|
|
-cleanup_table_io_work_loop:
|
|
|
- .quad io_work_loop, io_work_done
|
|
|
|
|
|
cleanup_critical:
|
|
|
clc 8(8,%r12),BASED(cleanup_table_system_call)
|
|
@@ -927,11 +907,6 @@ cleanup_critical:
|
|
|
jl 0f
|
|
|
clc 8(8,%r12),BASED(cleanup_table_sysc_leave+8)
|
|
|
jl cleanup_sysc_leave
|
|
|
-0:
|
|
|
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop)
|
|
|
- jl 0f
|
|
|
- clc 8(8,%r12),BASED(cleanup_table_sysc_work_loop+8)
|
|
|
- jl cleanup_sysc_return
|
|
|
0:
|
|
|
clc 8(8,%r12),BASED(cleanup_table_io_return)
|
|
|
jl 0f
|
|
@@ -942,11 +917,6 @@ cleanup_critical:
|
|
|
jl 0f
|
|
|
clc 8(8,%r12),BASED(cleanup_table_io_leave+8)
|
|
|
jl cleanup_io_leave
|
|
|
-0:
|
|
|
- clc 8(8,%r12),BASED(cleanup_table_io_work_loop)
|
|
|
- jl 0f
|
|
|
- clc 8(8,%r12),BASED(cleanup_table_io_work_loop+8)
|
|
|
- jl cleanup_io_work_loop
|
|
|
0:
|
|
|
br %r14
|
|
|
|
|
@@ -1025,12 +995,6 @@ cleanup_io_return:
|
|
|
la %r12,__LC_RETURN_PSW
|
|
|
br %r14
|
|
|
|
|
|
-cleanup_io_work_loop:
|
|
|
- mvc __LC_RETURN_PSW(8),0(%r12)
|
|
|
- mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_work_loop)
|
|
|
- la %r12,__LC_RETURN_PSW
|
|
|
- br %r14
|
|
|
-
|
|
|
cleanup_io_leave:
|
|
|
clc 8(8,%r12),BASED(cleanup_io_leave_insn)
|
|
|
je 3f
|