|
@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk)
|
|
|
}
|
|
|
#endif /* defined(CHECK_STACK) */
|
|
|
|
|
|
-#ifdef CONFIG_ALTIVEC
|
|
|
-int
|
|
|
-dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
|
|
|
+/*
|
|
|
+ * Make sure the floating-point register state in the
|
|
|
+ * the thread_struct is up to date for task tsk.
|
|
|
+ */
|
|
|
+void flush_fp_to_thread(struct task_struct *tsk)
|
|
|
{
|
|
|
- if (regs->msr & MSR_VEC)
|
|
|
- giveup_altivec(current);
|
|
|
- memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs));
|
|
|
+ if (tsk->thread.regs) {
|
|
|
+ /*
|
|
|
+ * We need to disable preemption here because if we didn't,
|
|
|
+ * another process could get scheduled after the regs->msr
|
|
|
+ * test but before we have finished saving the FP registers
|
|
|
+ * to the thread_struct. That process could take over the
|
|
|
+ * FPU, and then when we get scheduled again we would store
|
|
|
+ * bogus values for the remaining FP registers.
|
|
|
+ */
|
|
|
+ preempt_disable();
|
|
|
+ if (tsk->thread.regs->msr & MSR_FP) {
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ /*
|
|
|
+ * This should only ever be called for current or
|
|
|
+ * for a stopped child process. Since we save away
|
|
|
+ * the FP register state on context switch on SMP,
|
|
|
+ * there is something wrong if a stopped child appears
|
|
|
+ * to still have its FP state in the CPU registers.
|
|
|
+ */
|
|
|
+ BUG_ON(tsk != current);
|
|
|
+#endif
|
|
|
+ giveup_fpu(current);
|
|
|
+ }
|
|
|
+ preempt_enable();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void enable_kernel_fp(void)
|
|
|
+{
|
|
|
+ WARN_ON(preemptible());
|
|
|
+
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
|
|
|
+ giveup_fpu(current);
|
|
|
+ else
|
|
|
+ giveup_fpu(NULL); /* just enables FP for kernel */
|
|
|
+#else
|
|
|
+ giveup_fpu(last_task_used_math);
|
|
|
+#endif /* CONFIG_SMP */
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(enable_kernel_fp);
|
|
|
+
|
|
|
+int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
|
|
|
+{
|
|
|
+ preempt_disable();
|
|
|
+ if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
|
|
|
+ giveup_fpu(tsk);
|
|
|
+ preempt_enable();
|
|
|
+ memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-void
|
|
|
-enable_kernel_altivec(void)
|
|
|
+#ifdef CONFIG_ALTIVEC
|
|
|
+void enable_kernel_altivec(void)
|
|
|
{
|
|
|
WARN_ON(preemptible());
|
|
|
|
|
@@ -177,19 +225,35 @@ enable_kernel_altivec(void)
|
|
|
#endif /* __SMP __ */
|
|
|
}
|
|
|
EXPORT_SYMBOL(enable_kernel_altivec);
|
|
|
-#endif /* CONFIG_ALTIVEC */
|
|
|
|
|
|
-#ifdef CONFIG_SPE
|
|
|
-int
|
|
|
-dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
|
|
|
+/*
|
|
|
+ * Make sure the VMX/Altivec register state in the
|
|
|
+ * the thread_struct is up to date for task tsk.
|
|
|
+ */
|
|
|
+void flush_altivec_to_thread(struct task_struct *tsk)
|
|
|
{
|
|
|
- if (regs->msr & MSR_SPE)
|
|
|
- giveup_spe(current);
|
|
|
- /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
|
|
|
- memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35);
|
|
|
+ if (tsk->thread.regs) {
|
|
|
+ preempt_disable();
|
|
|
+ if (tsk->thread.regs->msr & MSR_VEC) {
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ BUG_ON(tsk != current);
|
|
|
+#endif
|
|
|
+ giveup_altivec(current);
|
|
|
+ }
|
|
|
+ preempt_enable();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
|
|
|
+{
|
|
|
+ if (regs->msr & MSR_VEC)
|
|
|
+ giveup_altivec(current);
|
|
|
+ memcpy(vrregs, ¤t->thread.vr[0], sizeof(*vrregs));
|
|
|
return 1;
|
|
|
}
|
|
|
+#endif /* CONFIG_ALTIVEC */
|
|
|
|
|
|
+#ifdef CONFIG_SPE
|
|
|
void
|
|
|
enable_kernel_spe(void)
|
|
|
{
|
|
@@ -205,34 +269,30 @@ enable_kernel_spe(void)
|
|
|
#endif /* __SMP __ */
|
|
|
}
|
|
|
EXPORT_SYMBOL(enable_kernel_spe);
|
|
|
-#endif /* CONFIG_SPE */
|
|
|
|
|
|
-void
|
|
|
-enable_kernel_fp(void)
|
|
|
+void flush_spe_to_thread(struct task_struct *tsk)
|
|
|
{
|
|
|
- WARN_ON(preemptible());
|
|
|
-
|
|
|
+ if (tsk->thread.regs) {
|
|
|
+ preempt_disable();
|
|
|
+ if (tsk->thread.regs->msr & MSR_SPE) {
|
|
|
#ifdef CONFIG_SMP
|
|
|
- if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
|
|
|
- giveup_fpu(current);
|
|
|
- else
|
|
|
- giveup_fpu(NULL); /* just enables FP for kernel */
|
|
|
-#else
|
|
|
- giveup_fpu(last_task_used_math);
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
+ BUG_ON(tsk != current);
|
|
|
+#endif
|
|
|
+ giveup_spe(current);
|
|
|
+ }
|
|
|
+ preempt_enable();
|
|
|
+ }
|
|
|
}
|
|
|
-EXPORT_SYMBOL(enable_kernel_fp);
|
|
|
|
|
|
-int
|
|
|
-dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
|
|
|
+int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
|
|
|
{
|
|
|
- preempt_disable();
|
|
|
- if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
|
|
|
- giveup_fpu(tsk);
|
|
|
- preempt_enable();
|
|
|
- memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
|
|
|
+ if (regs->msr & MSR_SPE)
|
|
|
+ giveup_spe(current);
|
|
|
+ /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
|
|
|
+ memcpy(evrregs, ¤t->thread.evr[0], sizeof(u32) * 35);
|
|
|
return 1;
|
|
|
}
|
|
|
+#endif /* CONFIG_SPE */
|
|
|
|
|
|
struct task_struct *__switch_to(struct task_struct *prev,
|
|
|
struct task_struct *new)
|