|
@@ -1014,14 +1014,21 @@ intr_restore:
|
|
|
nop
|
|
|
nop
|
|
|
|
|
|
+#ifndef CONFIG_PREEMPT
|
|
|
+# define intr_do_preempt intr_restore
|
|
|
+#endif /* !CONFIG_PREEMPT */
|
|
|
+
|
|
|
.import schedule,code
|
|
|
intr_do_resched:
|
|
|
- /* Only do reschedule if we are returning to user space */
|
|
|
+ /* Only call schedule on return to userspace. If we're returning
|
|
|
+ * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
|
|
|
+ * we jump back to intr_restore.
|
|
|
+ */
|
|
|
LDREG PT_IASQ0(%r16), %r20
|
|
|
- CMPIB= 0,%r20,intr_restore /* backward */
|
|
|
+ CMPIB= 0, %r20, intr_do_preempt
|
|
|
nop
|
|
|
LDREG PT_IASQ1(%r16), %r20
|
|
|
- CMPIB= 0,%r20,intr_restore /* backward */
|
|
|
+ CMPIB= 0, %r20, intr_do_preempt
|
|
|
nop
|
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
@@ -1037,6 +1044,32 @@ intr_do_resched:
|
|
|
#endif
|
|
|
ldo R%intr_check_sig(%r2), %r2
|
|
|
|
|
|
+ /* preempt the current task on returning to kernel
|
|
|
+ * mode from an interrupt, iff need_resched is set,
|
|
|
+ * and preempt_count is 0. otherwise, we continue on
|
|
|
+ * our merry way back to the current running task.
|
|
|
+ */
|
|
|
+#ifdef CONFIG_PREEMPT
|
|
|
+ .import preempt_schedule_irq,code
|
|
|
+intr_do_preempt:
|
|
|
+ rsm PSW_SM_I, %r0 /* disable interrupts */
|
|
|
+
|
|
|
+ /* current_thread_info()->preempt_count */
|
|
|
+ mfctl %cr30, %r1
|
|
|
+ LDREG TI_PRE_COUNT(%r1), %r19
|
|
|
+ CMPIB<> 0, %r19, intr_restore /* if preempt_count > 0 */
|
|
|
+ nop /* prev insn branched backwards */
|
|
|
+
|
|
|
+ /* check if we interrupted a critical path */
|
|
|
+ LDREG PT_PSW(%r16), %r20
|
|
|
+ bb,<,n %r20, 31 - PSW_SM_I, intr_restore
|
|
|
+ nop
|
|
|
+
|
|
|
+ BL preempt_schedule_irq, %r2
|
|
|
+ nop
|
|
|
+
|
|
|
+ b intr_restore /* ssm PSW_SM_I done by intr_restore */
|
|
|
+#endif /* CONFIG_PREEMPT */
|
|
|
|
|
|
.import do_signal,code
|
|
|
intr_do_signal:
|