summaryrefslogtreecommitdiff
path: root/arch/s390/kernel/entry.S
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2010-05-17 10:00:01 +0200
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2010-05-17 10:00:15 +0200
commit43d399d2ab7e96cb8d952d0ba4e9131587b7c8b9 (patch)
tree3b5c651e8cc1cdbde50a846ace4500aebcfe5ea2 /arch/s390/kernel/entry.S
parent94038a99119c171aea27608f81c7ba359de98c4e (diff)
[S390] cleanup sysc_work and io_work code
Cleanup the #ifdef mess at io_work in entry[64].S and streamline the TIF work code of the system call and io exit path. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/entry.S')
-rw-r--r--arch/s390/kernel/entry.S75
1 files changed, 28 insertions, 47 deletions
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 6af7045280a8..ffebfb64b913 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -301,31 +301,29 @@ sysc_restore_trace_psw:
#endif
#
-# recheck if there is more work to do
-#
-sysc_work_loop:
- tm __TI_flags+3(%r9),_TIF_WORK_SVC
- bz BASED(sysc_restore) # there is no work to do
-#
-# One of the work bits is on. Find out which one.
+# There is work to do, but first we need to check if we return to userspace.
#
sysc_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
bno BASED(sysc_restore)
+
+#
+# One of the work bits is on. Find out which one.
+#
+sysc_work_loop:
tm __TI_flags+3(%r9),_TIF_MCCK_PENDING
bo BASED(sysc_mcck_pending)
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bo BASED(sysc_reschedule)
tm __TI_flags+3(%r9),_TIF_SIGPENDING
- bnz BASED(sysc_sigpending)
+ bo BASED(sysc_sigpending)
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
- bnz BASED(sysc_notify_resume)
+ bo BASED(sysc_notify_resume)
tm __TI_flags+3(%r9),_TIF_RESTART_SVC
bo BASED(sysc_restart)
tm __TI_flags+3(%r9),_TIF_SINGLE_STEP
bo BASED(sysc_singlestep)
- b BASED(sysc_restore)
-sysc_work_done:
+ b BASED(sysc_return) # beware of critical section cleanup
#
# _TIF_NEED_RESCHED is set, call schedule
@@ -386,7 +384,7 @@ sysc_singlestep:
mvi SP_SVCNR+1(%r15),0xff
la %r2,SP_PTREGS(%r15) # address of register-save area
l %r1,BASED(.Lhandle_per) # load adr. of per handler
- la %r14,BASED(sysc_return) # load adr. of system return
+ la %r14,BASED(sysc_work_loop) # load adr. of system return
br %r1 # branch to do_single_step
#
@@ -636,30 +634,36 @@ io_restore_trace_psw:
#endif
#
-# switch to kernel stack, then check the TIF bits
+# There is work todo, find out in which context we have been interrupted:
+# 1) if we return to user space we can do all _TIF_WORK_INT work
+# 2) if we return to kernel code and preemptive scheduling is enabled check
+# the preemption counter and if it is zero call preempt_schedule_irq
+# Before any work can be done, a switch to the kernel stack is required.
#
io_work:
tm SP_PSW+1(%r15),0x01 # returning to user ?
-#ifndef CONFIG_PREEMPT
- bno BASED(io_restore) # no-> skip resched & signal
-#else
- bnz BASED(io_work_user) # no -> check for preemptive scheduling
+ bo BASED(io_work_user) # yes -> do resched & signal
+#ifdef CONFIG_PREEMPT
# check for preemptive scheduling
icm %r0,15,__TI_precount(%r9)
bnz BASED(io_restore) # preemption disabled
+ # switch to kernel stack
l %r1,SP_R15(%r15)
s %r1,BASED(.Lc_spsize)
mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15)
xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain
lr %r15,%r1
io_resume_loop:
- tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
- bno BASED(io_restore)
l %r1,BASED(.Lpreempt_schedule_irq)
la %r14,BASED(io_resume_loop)
- br %r1 # call schedule
+ tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
+ bor %r1 # call preempt_schedule_irq
#endif
+ b BASED(io_restore)
+#
+# Need to do work before returning to userspace, switch to kernel stack
+#
io_work_user:
l %r1,__LC_KERNEL_STACK
s %r1,BASED(.Lc_spsize)
@@ -668,7 +672,7 @@ io_work_user:
lr %r15,%r1
#
# One of the work bits is on. Find out which one.
-# Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED
+# Checked are: _TIF_SIGPENDING, _TIF_NOTIFY_RESUME, _TIF_NEED_RESCHED
# and _TIF_MCCK_PENDING
#
io_work_loop:
@@ -677,11 +681,10 @@ io_work_loop:
tm __TI_flags+3(%r9),_TIF_NEED_RESCHED
bo BASED(io_reschedule)
tm __TI_flags+3(%r9),_TIF_SIGPENDING
- bnz BASED(io_sigpending)
+ bo BASED(io_sigpending)
tm __TI_flags+3(%r9),_TIF_NOTIFY_RESUME
- bnz BASED(io_notify_resume)
- b BASED(io_restore)
-io_work_done:
+ bo BASED(io_notify_resume)
+ b BASED(io_return) # beware of critical section cleanup
#
# _TIF_MCCK_PENDING is set, call handler
@@ -701,8 +704,6 @@ io_reschedule:
basr %r14,%r1 # call scheduler
stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts
TRACE_IRQS_OFF
- tm __TI_flags+3(%r9),_TIF_WORK_INT
- bz BASED(io_restore) # there is no work to do
b BASED(io_work_loop)
#
@@ -921,14 +922,10 @@ cleanup_table_sysc_return:
.long sysc_return + 0x80000000, sysc_leave + 0x80000000
cleanup_table_sysc_leave:
.long sysc_leave + 0x80000000, sysc_done + 0x80000000
-cleanup_table_sysc_work_loop:
- .long sysc_work_loop + 0x80000000, sysc_work_done + 0x80000000
cleanup_table_io_return:
.long io_return + 0x80000000, io_leave + 0x80000000
cleanup_table_io_leave:
.long io_leave + 0x80000000, io_done + 0x80000000
-cleanup_table_io_work_loop:
- .long io_work_loop + 0x80000000, io_work_done + 0x80000000
cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_system_call)
@@ -946,11 +943,6 @@ cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_sysc_leave+4)
bl BASED(cleanup_sysc_leave)
0:
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_sysc_work_loop+4)
- bl BASED(cleanup_sysc_return)
-0:
clc 4(4,%r12),BASED(cleanup_table_io_return)
bl BASED(0f)
clc 4(4,%r12),BASED(cleanup_table_io_return+4)
@@ -961,11 +953,6 @@ cleanup_critical:
clc 4(4,%r12),BASED(cleanup_table_io_leave+4)
bl BASED(cleanup_io_leave)
0:
- clc 4(4,%r12),BASED(cleanup_table_io_work_loop)
- bl BASED(0f)
- clc 4(4,%r12),BASED(cleanup_table_io_work_loop+4)
- bl BASED(cleanup_io_work_loop)
-0:
br %r14
cleanup_system_call:
@@ -1043,12 +1030,6 @@ cleanup_io_return:
la %r12,__LC_RETURN_PSW
br %r14
-cleanup_io_work_loop:
- mvc __LC_RETURN_PSW(4),0(%r12)
- mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_work_loop)
- la %r12,__LC_RETURN_PSW
- br %r14
-
cleanup_io_leave:
clc 4(4,%r12),BASED(cleanup_io_leave_insn)
be BASED(2f)